Merge branch 'master' into add-logical-backup
This commit is contained in:
commit
0e5ed5ee8f
49
README.md
49
README.md
|
|
@ -8,48 +8,33 @@
|
|||
|
||||
<img src="docs/diagrams/logo.png" width="200">
|
||||
|
||||
# Google Summer of Code
|
||||
|
||||
The Postgres Operator made it to the [Google Summer of Code 2019](https://summerofcode.withgoogle.com/)! As a brand new mentoring organization, we are now looking for our first mentees. Check [our ideas](https://github.com/zalando/postgres-operator/blob/master/docs/gsoc-2019/ideas.md#google-summer-of-code-2019) and start discussion in [the issue tracker](https://github.com/zalando/postgres-operator/issues). And don't forget to spread a word about our GSoC participation to attract even more students.
|
||||
|
||||
## Introduction
|
||||
## Introduction to the Postgres Operator
|
||||
|
||||
The Postgres [operator](https://coreos.com/blog/introducing-operators.html)
|
||||
manages PostgreSQL clusters on Kubernetes:
|
||||
|
||||
1. The operator watches additions, updates, and deletions of PostgreSQL cluster
|
||||
manifests and changes the running clusters accordingly. For example, when a
|
||||
user submits a new manifest, the operator fetches that manifest and spawns a
|
||||
new Postgres cluster along with all necessary entities such as Kubernetes
|
||||
StatefulSets and Postgres roles. See this
|
||||
user submits a new manifest, the operator spawns a new Postgres cluster with
|
||||
necessary entities such as StatefulSets, Services, and also Postgres roles. See this
|
||||
[Postgres cluster manifest](manifests/complete-postgres-manifest.yaml)
|
||||
for settings that a manifest may contain.
|
||||
|
||||
2. The operator also watches updates to [its own configuration](manifests/configmap.yaml)
|
||||
and alters running Postgres clusters if necessary. For instance, if a pod
|
||||
docker image is changed, the operator carries out the rolling update. That
|
||||
Docker image is changed, the operator carries out the rolling update. That
|
||||
is, the operator re-spawns one-by-one pods of each StatefulSet it manages
|
||||
with the new Docker image.
|
||||
|
||||
3. Finally, the operator periodically synchronizes the actual state of each
|
||||
Postgres cluster with the desired state defined in the cluster's manifest.
|
||||
|
||||
Here is a diagram, that summarizes what would be created by the operator, when a
|
||||
new Postgres cluster CRD was submitted:
|
||||
4. The operator aims to be hands free and configuration happens only via manifests and its own config.
|
||||
This enables easy integration in automated deploy pipelines with no access to Kubernetes directly.
|
||||
|
||||

|
||||
|
||||
This picture is not complete without an overview of what is inside a pod, so
|
||||
let's zoom in:
|
||||
|
||||

|
||||
|
||||
These two diagrams should help you to understand the basics of what kind of
|
||||
functionality the operator provides. Below we discuss all everything in more
|
||||
details.
|
||||
|
||||
There is a browser-friendly version of this documentation at [postgres-operator.readthedocs.io](https://postgres-operator.readthedocs.io)
|
||||
# Google Summer of Code
|
||||
|
||||
The Postgres Operator made it to the [Google Summer of Code 2019](https://summerofcode.withgoogle.com/)! As a brand new mentoring organization, we are now looking for our first mentees. Check [our ideas](https://github.com/zalando/postgres-operator/blob/master/docs/gsoc-2019/ideas.md#google-summer-of-code-2019) and start discussion in [the issue tracker](https://github.com/zalando/postgres-operator/issues). And don't forget to spread a word about our GSoC participation to attract even more students.
|
||||
|
||||
## Table of contents
|
||||
|
||||
|
|
@ -61,8 +46,24 @@ There is a browser-friendly version of this documentation at [postgres-operator.
|
|||
* [cluster manifest reference](docs/reference/cluster_manifest.md)
|
||||
* [command-line options and environment variables](docs/reference/command_line_and_environment.md)
|
||||
|
||||
the rest of the document is a tutorial to get you up and running with the operator on Minikube.
|
||||
The rest of this document is a tutorial to get you up and running locally with the operator on Minikube.
|
||||
|
||||
## Overview of involved entities
|
||||
|
||||
Here is a diagram, that summarizes what would be created by the operator, when a
|
||||
new Postgres cluster CRD is submitted:
|
||||
|
||||

|
||||
|
||||
This picture is not complete without an overview of what is inside a single cluster pod, so
|
||||
let's zoom in:
|
||||
|
||||

|
||||
|
||||
These two diagrams should help you to understand the basics of what kind of
|
||||
functionality the operator provides.
|
||||
|
||||
There is a browser-friendly version of this documentation at [postgres-operator.readthedocs.io](https://postgres-operator.readthedocs.io)
|
||||
|
||||
## Community
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,21 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator
|
||||
version: 0.1.0
|
||||
appVersion: 1.1.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
maintainers:
|
||||
- name: kimxogus
|
||||
email: kgyoo8232@gmail.com
|
||||
sources:
|
||||
- https://github.com/zalando-incubator/postgres-operator
|
||||
engine: gotpl
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
To verify that postgres-operator has started, run:
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} get pods -l "app.kubernetes.io/name={{ template "postgres-operator.name" . }}"
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "postgres-operator.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "postgres-operator.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "postgres-operator.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
|
@ -0,0 +1,141 @@
|
|||
{{ if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- acid.zalan.do
|
||||
resources:
|
||||
- postgresqls
|
||||
- operatorconfigurations
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- watch # needed if zalando-postgres-operator account is used for pods as well
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update # only for resizing AWS volumes
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
resources:
|
||||
- clusterroles
|
||||
verbs:
|
||||
- bind
|
||||
resourceNames:
|
||||
- {{ template "postgres-operator.fullname" . }}
|
||||
{{ end }}
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
{{ if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
# note: the cluster role binding needs to be defined
|
||||
# for every namespace the operator service account lives in.
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ end }}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
data:
|
||||
pod_service_account_name: {{ template "postgres-operator.fullname" . }}
|
||||
{{ toYaml .Values.config | indent 2 }}
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{ toYaml .Values.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "postgres-operator.fullname" . }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: CONFIG_MAP_NAME
|
||||
value: {{ template "postgres-operator.fullname" . }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 10 }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ toYaml .Values.imagePullSecrets | indent 8 }}
|
||||
{{- end }}
|
||||
affinity:
|
||||
{{ toYaml .Values.affinity | indent 8 }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.tolerations | indent 8 }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
{{ if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{ end }}
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.1.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
# Secrets must be manually created in the namespace.
|
||||
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
|
||||
# imagePullSecrets:
|
||||
# - name: myRegistryKeySecretName
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
config:
|
||||
watched_namespace: "*" # listen to all namespaces
|
||||
cluster_labels: application:spilo
|
||||
cluster_name_label: version
|
||||
pod_role_label: spilo-role
|
||||
|
||||
debug_logging: "true"
|
||||
workers: "4"
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.5-p35
|
||||
secret_name_template: '{username}.{cluster}.credentials'
|
||||
super_username: postgres
|
||||
enable_teams_api: "false"
|
||||
# set_memory_request_to_limit: "true"
|
||||
# postgres_superuser_teams: "postgres_superusers"
|
||||
# enable_team_superuser: "false"
|
||||
# team_admin_role: "admin"
|
||||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||
# team_api_role_configuration: "log_statement:all"
|
||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||
# oauth_token_secret_name: postgresql-operator
|
||||
# pam_role_name: zalandos
|
||||
# pam_configuration: |
|
||||
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
||||
aws_region: eu-central-1
|
||||
db_hosted_zone: db.example.com
|
||||
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}'
|
||||
replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}'
|
||||
enable_master_load_balancer: "true"
|
||||
enable_replica_load_balancer: "false"
|
||||
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
|
||||
api_port: "8080"
|
||||
ring_log_lines: "100"
|
||||
cluster_history_entries: "1000"
|
||||
pod_terminate_grace_period: 5m
|
||||
pod_deletion_wait_timeout: 10m
|
||||
pod_label_wait_timeout: 10m
|
||||
ready_wait_interval: 3s
|
||||
ready_wait_timeout: 30s
|
||||
replication_username: standby
|
||||
resource_check_interval: 3s
|
||||
resource_check_timeout: 10m
|
||||
resync_period: 5m
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 300Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 300Mi
|
||||
|
||||
# Affinity for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
affinity: {}
|
||||
|
||||
# Tolerations for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
tolerations: []
|
||||
|
||||
# Node labels for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
nodeSelector: {}
|
||||
|
|
@ -29,6 +29,16 @@ ConfigMap is used to store the configuration of the operator
|
|||
|
||||
## Deploying the operator
|
||||
|
||||
### - Helm chart
|
||||
|
||||
You can install postgres-operator with helm chart.
|
||||
|
||||
```bash
|
||||
$ helm install --name my-release ./charts/postgres-operator
|
||||
```
|
||||
|
||||
### - Kubernetes manifest
|
||||
|
||||
First you need to install the service account definition in your Minikube cluster.
|
||||
|
||||
```bash
|
||||
|
|
|
|||
|
|
@ -18,11 +18,30 @@ cd postgres-operator
|
|||
|
||||
minikube start
|
||||
|
||||
# start the operator; may take a few seconds
|
||||
# start the operator using one of helm chart or yaml manifests;
|
||||
|
||||
# - install postgres-operator with helm chart.
|
||||
# 1) initialize helm
|
||||
helm init
|
||||
# 2) install postgres-operator chart
|
||||
helm install --name postgres-operator ./charts/postgres-operator
|
||||
|
||||
# - install postgres-operator with yaml manifests.
|
||||
kubectl create -f manifests/configmap.yaml # configuration
|
||||
kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions
|
||||
kubectl create -f manifests/postgres-operator.yaml # deployment
|
||||
|
||||
|
||||
# starting the operator may take a few seconds
|
||||
# check if operator pod is running
|
||||
|
||||
# - if you've created the operator using helm chart
|
||||
kubectl get po -l app.kubernetes.io/name=postgres-operator
|
||||
|
||||
# - if you've created the operator using yaml manifests
|
||||
kubectl get po -l name=postgres-operator
|
||||
|
||||
|
||||
# create a Postgres cluster
|
||||
kubectl create -f manifests/minimal-postgres-manifest.yaml
|
||||
|
||||
|
|
|
|||
|
|
@ -226,6 +226,11 @@ configuration they are grouped under the `kubernetes` key.
|
|||
[topology key](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels)
|
||||
for pod anti affinity. The default is `kubernetes.io/hostname`.
|
||||
|
||||
* **pod_management_policy**
|
||||
specify the
|
||||
[pod management policy](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies)
|
||||
of stateful sets of PG clusters. The default is `ordered_ready`, the second possible value is `parallel`.
|
||||
|
||||
## Kubernetes resource requests
|
||||
|
||||
This group allows you to configure resource requests for the Postgres pods.
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ data:
|
|||
pod_terminate_grace_period: 5m
|
||||
pod_deletion_wait_timeout: 10m
|
||||
pod_label_wait_timeout: 10m
|
||||
pod_management_policy: "ordered_ready"
|
||||
ready_wait_interval: 3s
|
||||
ready_wait_timeout: 30s
|
||||
# master_pod_move_timeout: 10m
|
||||
|
|
|
|||
|
|
@ -327,14 +327,10 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
|
|||
match = false
|
||||
reasons = append(reasons, "new statefulset's annotations doesn't match the current one")
|
||||
}
|
||||
if len(c.Statefulset.Spec.Template.Spec.Containers) != len(statefulSet.Spec.Template.Spec.Containers) {
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's container specification doesn't match the current one")
|
||||
} else {
|
||||
var containerReasons []string
|
||||
needsRollUpdate, containerReasons = c.compareContainers(c.Statefulset, statefulSet)
|
||||
reasons = append(reasons, containerReasons...)
|
||||
}
|
||||
|
||||
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
|
||||
needsRollUpdate, reasons = c.compareContainers("containers", c.Statefulset.Spec.Template.Spec.Containers, statefulSet.Spec.Template.Spec.Containers, needsRollUpdate, reasons)
|
||||
|
||||
if len(c.Statefulset.Spec.Template.Spec.Containers) == 0 {
|
||||
c.logger.Warningf("statefulset %q has no container", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
||||
return &compareStatefulsetResult{}
|
||||
|
|
@ -425,34 +421,37 @@ func newCheck(msg string, cond containerCondition) containerCheck {
|
|||
return containerCheck{reason: msg, condition: cond}
|
||||
}
|
||||
|
||||
// compareContainers: compare containers from two stateful sets
|
||||
// compareContainers: compare two list of Containers
|
||||
// and return:
|
||||
// * whether or not a rolling update is needed
|
||||
// * a list of reasons in a human readable format
|
||||
func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []string) {
|
||||
reasons := make([]string, 0)
|
||||
needsRollUpdate := false
|
||||
|
||||
func (c *Cluster) compareContainers(description string, setA, setB []v1.Container, needsRollUpdate bool, reasons []string) (bool, []string) {
|
||||
if len(setA) != len(setB) {
|
||||
return true, append(reasons, fmt.Sprintf("new statefulset %s's length does not match the current ones", description))
|
||||
}
|
||||
|
||||
checks := []containerCheck{
|
||||
newCheck("new statefulset's container %s (index %d) name doesn't match the current one",
|
||||
newCheck("new statefulset %s's %s (index %d) name doesn't match the current one",
|
||||
func(a, b v1.Container) bool { return a.Name != b.Name }),
|
||||
newCheck("new statefulset's container %s (index %d) image doesn't match the current one",
|
||||
newCheck("new statefulset %s's %s (index %d) image doesn't match the current one",
|
||||
func(a, b v1.Container) bool { return a.Image != b.Image }),
|
||||
newCheck("new statefulset's container %s (index %d) ports don't match the current one",
|
||||
newCheck("new statefulset %s's %s (index %d) ports don't match the current one",
|
||||
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }),
|
||||
newCheck("new statefulset's container %s (index %d) resources don't match the current ones",
|
||||
newCheck("new statefulset %s's %s (index %d) resources don't match the current ones",
|
||||
func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }),
|
||||
newCheck("new statefulset's container %s (index %d) environment doesn't match the current one",
|
||||
newCheck("new statefulset %s's %s (index %d) environment doesn't match the current one",
|
||||
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }),
|
||||
newCheck("new statefulset's container %s (index %d) environment sources don't match the current one",
|
||||
newCheck("new statefulset %s's %s (index %d) environment sources don't match the current one",
|
||||
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }),
|
||||
}
|
||||
|
||||
for index, containerA := range setA.Spec.Template.Spec.Containers {
|
||||
containerB := setB.Spec.Template.Spec.Containers[index]
|
||||
for index, containerA := range setA {
|
||||
containerB := setB[index]
|
||||
for _, check := range checks {
|
||||
if check.condition(containerA, containerB) {
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, fmt.Sprintf(check.reason, containerA.Name, index))
|
||||
reasons = append(reasons, fmt.Sprintf(check.reason, description, containerA.Name, index))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -861,6 +861,20 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
|
|||
|
||||
numberOfInstances := c.getNumberOfInstances(spec)
|
||||
|
||||
// the operator has domain-specific logic on how to do rolling updates of PG clusters
|
||||
// so we do not use default rolling updates implemented by stateful sets
|
||||
// that leaves the legacy "OnDelete" update strategy as the only option
|
||||
updateStrategy := v1beta1.StatefulSetUpdateStrategy{Type: v1beta1.OnDeleteStatefulSetStrategyType}
|
||||
|
||||
var podManagementPolicy v1beta1.PodManagementPolicyType
|
||||
if c.OpConfig.PodManagementPolicy == "ordered_ready" {
|
||||
podManagementPolicy = v1beta1.OrderedReadyPodManagement
|
||||
} else if c.OpConfig.PodManagementPolicy == "parallel" {
|
||||
podManagementPolicy = v1beta1.ParallelPodManagement
|
||||
} else {
|
||||
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
||||
}
|
||||
|
||||
statefulSet := &v1beta1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.statefulSetName(),
|
||||
|
|
@ -874,6 +888,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
|
|||
ServiceName: c.serviceName(Master),
|
||||
Template: *podTemplate,
|
||||
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate},
|
||||
UpdateStrategy: updateStrategy,
|
||||
PodManagementPolicy: podManagementPolicy,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
|
|||
)
|
||||
|
||||
if err != nil {
|
||||
c.logger.Warning("failed to move master pods from the node %q: timeout of %v minutes expired", node.Name, c.opConfig.MasterPodMoveTimeout)
|
||||
c.logger.Warningf("failed to move master pods from the node %q: timeout of %v minutes expired", node.Name, c.opConfig.MasterPodMoveTimeout)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -118,6 +118,7 @@ type Config struct {
|
|||
ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"`
|
||||
TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"`
|
||||
ProtectedRoles []string `name:"protected_role_names" default:"admin"`
|
||||
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
|
||||
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" defaults:"false"`
|
||||
|
|
|
|||
Loading…
Reference in New Issue