Merge branch 'master' into improve-tolerations-doc

This commit is contained in:
Sergey Dudoladov 2019-02-19 15:14:39 +01:00 committed by GitHub
commit 618d4cd19f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 405 additions and 26 deletions

4
.gitignore vendored
View File

@ -30,3 +30,7 @@ _testmain.go
.idea
scm-source.json
# diagrams
*.aux
*.log

View File

@ -6,6 +6,9 @@
[![GoDoc](https://godoc.org/github.com/zalando-incubator/postgres-operator?status.svg)](https://godoc.org/github.com/zalando-incubator/postgres-operator)
[![golangci](https://golangci.com/badges/github.com/zalando-incubator/postgres-operator.svg)](https://golangci.com/r/github.com/zalando-incubator/postgres-operator)
<img src="docs/diagrams/logo.png" width="200">
## Introduction
The Postgres [operator](https://coreos.com/blog/introducing-operators.html)
@ -33,8 +36,17 @@ new Postgres cluster CRD was submitted:
![postgresql-operator](docs/diagrams/operator.png "K8S resources, created by operator")
There is a browser-friendly version of this documentation at
[postgres-operator.readthedocs.io](https://postgres-operator.readthedocs.io)
This picture is not complete without an overview of what is inside a pod, so
let's zoom in:
![pod](docs/diagrams/pod.png "Database pod components")
These two diagrams should help you to understand the basics of what kind of
functionality the operator provides. Below we discuss all everything in more
details.
There is a browser-friendly version of this documentation at [postgres-operator.readthedocs.io](https://postgres-operator.readthedocs.io)
## Table of contents
@ -48,6 +60,13 @@ There is a browser-friendly version of this documentation at
the rest of the document is a tutorial to get you up and running with the operator on Minikube.
## Community
There are two places to get in touch with the community:
1. The [GitHub issue tracker](https://github.com/zalando-incubator/postgres-operator/issues)
2. The #postgres-operator slack channel under [Postgres Slack](https://postgres-slack.herokuapp.com)
## Quickstart
Prerequisites:

View File

@ -151,6 +151,58 @@ Postgres pods by default receive tolerations for `unreachable` and `noExecute` t
Depending on you setup, you may want to adjust these parameters to prevent master pods from being evicted by Kubernetes runtime.
To prevent eviction completely, specify the toleration without specifying the `tolerationSeconds` value (similar to how Kubernetes own DaemonSets are configured)
### Add cluster-specific labels
In some cases, you might want to add `labels` that are specific to a given
postgres cluster, in order to identify its child objects.
The typical use case is to add labels that identifies the `Pods` created by the
operator, in order to implement fine-controlled `NetworkPolicies`.
**OperatorConfiguration**
```yaml
apiVersion: "acid.zalan.do/v1"
kind: OperatorConfiguration
metadata:
name: postgresql-operator-configuration
configuration:
kubernetes:
inherited_labels:
- application
- environment
...
```
**cluster manifest**
```yaml
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: demo-cluster
labels:
application: my-app
environment: demo
spec:
...
```
**network policy**
```yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: netpol-example
spec:
podSelector:
matchLabels:
application: my-app
environment: demo
...
```
## Custom Pod Environment Variables
It is possible to configure a ConfigMap which is used by the Postgres pods as

11
docs/diagrams/Makefile Normal file
View File

@ -0,0 +1,11 @@
OBJ=$(patsubst %.tex, %.png, $(wildcard *.tex))
.PHONY: all
all: $(OBJ)
%.pdf: %.tex
lualatex $< -shell-escape $@
%.png: %.pdf
convert -flatten -density 300 $< -quality 90 $@

BIN
docs/diagrams/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 253 KiB

After

Width:  |  Height:  |  Size: 253 KiB

101
docs/diagrams/operator.tex Normal file
View File

@ -0,0 +1,101 @@
\documentclass{article}
\usepackage{tikz}
\usepackage[graphics,tightpage,active]{preview}
\usetikzlibrary{arrows, shadows.blur, positioning, fit, calc, backgrounds}
\usepackage{lscape}
\pagenumbering{gobble}
\PreviewEnvironment{tikzpicture}
\PreviewEnvironment{equation}
\PreviewEnvironment{equation*}
\newlength{\imagewidth}
\newlength{\imagescale}
\pagestyle{empty}
\thispagestyle{empty}
\begin{document}
\begin{center}
\begin{tikzpicture}[
scale=0.5,transform shape,
font=\sffamily,
every matrix/.style={ampersand replacement=\&,column sep=2cm,row sep=2cm},
operator/.style={draw,solid,thick,circle,fill=red!20,inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
component/.style={draw,solid,thick,rounded corners,fill=yellow!20,inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
border/.style={draw,dashed,rounded corners,fill=gray!20,inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
pod/.style={draw,solid,thick,rounded corners,fill=blue!20, inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
service/.style={draw,solid,thick,rounded corners,fill=blue!20, inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
endpoint/.style={draw,solid,thick,rounded corners,fill=blue!20, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
secret/.style={draw,solid,thick,rounded corners,fill=blue!20, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
pvc/.style={draw,solid,thick,rounded corners,fill=blue!20, inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
label/.style={rectangle,inner sep=0,outer sep=0},
to/.style={->,>=stealth',shorten >=1pt,semithick,font=\sffamily\footnotesize},
every node/.style={align=center}]
% Position the nodes using a matrix layout
\matrix{
\& \node[component] (crd) {CRD}; \\
\& \node[operator] (operator) {Operator}; \\
\path
node[service] (service-master) {Master}
node[label, right of=service-master] (service-middle) {}
node[label, below of=service-middle] (services-label) {Services}
node[service, right=.5cm of service-master] (service-replica) {Replica}
node[border, behind path,
fit=(service-master)(service-replica)(services-label)
] (services) {};
\&
\node[component] (sts) {Statefulset}; \& \node[component] (pdb) {Pod Disruption Budget}; \\
\path
node[service] (master-endpoint) {Master}
node[service, right=.5cm of master-endpoint] (replica-endpoint) {Replica}
node[label, right of=master-endpoint] (endpoint-middle) {}
node[label, below of=endpoint-middle] (endpoint-label) {Endpoints}
node[border, behind path,
fit=(master-endpoint)(replica-endpoint)(endpoint-label)
] (endpoints) {}; \&
\node[component] (pod-template) {Pod Template}; \&
\node[border] (secrets) {
\begin{tikzpicture}[]
\node[secret] (users-secret) at (0, 0) {Users};
\node[secret] (robots-secret) at (2, 0) {Robots};
\node[secret] (standby-secret) at (4, 0) {Standby};
\end{tikzpicture} \\
Secrets
}; \\ \&
\path
node[pod] (replica1-pod) {Replica}
node[pod, left=.5cm of replica1-pod] (master-pod) {Master}
node[pod, right=.5cm of replica1-pod] (replica2-pod) {Replica}
node[label, below of=replica1-pod] (pod-label) {Pods}
node[border, behind path,
fit=(master-pod)(replica1-pod)(replica2-pod)(pod-label)
] (pods) {}; \\ \&
\path
node[pvc] (replica1-pvc) {Replica}
node[pvc, left=.5cm of replica1-pvc] (master-pvc) {Master}
node[pvc, right=.5cm of replica1-pvc] (replica2-pvc) {Replica}
node[label, below of=replica1-pvc] (pvc-label) {Persistent Volume Claims}
node[border, behind path,
fit=(master-pvc)(replica1-pvc)(replica2-pvc)(pvc-label)
] (pvcs) {}; \&
\\ \& \\
};
% Draw the arrows between the nodes and label them.
\draw[to] (crd) -- node[midway,above] {} node[midway,below] {} (operator);
\draw[to] (operator) -- node[midway,above] {} node[midway,below] {} (sts);
\draw[to] (operator) -- node[midway,above] {} node[midway,below] {} (secrets);
\draw[to] (operator) -| node[midway,above] {} node[midway,below] {} (pdb);
\draw[to] (service-master) -- node[midway,above] {} node[midway,below] {} (master-endpoint);
\draw[to] (service-replica) -- node[midway,above] {} node[midway,below] {} (replica-endpoint);
\draw[to] (master-pod) -- node[midway,above] {} node[midway,below] {} (master-pvc);
\draw[to] (replica1-pod) -- node[midway,above] {} node[midway,below] {} (replica1-pvc);
\draw[to] (replica2-pod) -- node[midway,above] {} node[midway,below] {} (replica2-pvc);
\draw[to] (operator) -| node[midway,above] {} node[midway,below] {} (services);
\draw[to] (sts) -- node[midway,above] {} node[midway,below] {} (pod-template);
\draw[to] (pod-template) -- node[midway,above] {} node[midway,below] {} (pods);
\end{tikzpicture}
\end{center}
\end{document}

BIN
docs/diagrams/pod.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

92
docs/diagrams/pod.tex Normal file
View File

@ -0,0 +1,92 @@
\documentclass{article}
\usepackage{tikz}
\usepackage[graphics,tightpage,active]{preview}
\usetikzlibrary{arrows, shadows.blur, positioning, fit, calc, backgrounds}
\usepackage{lscape}
\pagenumbering{gobble}
\PreviewEnvironment{tikzpicture}
\PreviewEnvironment{equation}
\PreviewEnvironment{equation*}
\newlength{\imagewidth}
\newlength{\imagescale}
\pagestyle{empty}
\thispagestyle{empty}
\begin{document}
\begin{center}
\begin{tikzpicture}[
scale=0.5,transform shape,
font=\sffamily,
every matrix/.style={ampersand replacement=\&,column sep=2cm,row sep=2cm},
pod/.style={draw,solid,thick,circle,fill=red!20,inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
component/.style={draw,solid,thick,rounded corners,fill=yellow!20,inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
border/.style={draw,dashed,rounded corners,fill=gray!20,inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
volume/.style={draw,solid,thick,rounded corners,fill=blue!20, inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
sidecar/.style={draw,solid,thick,rounded corners,fill=blue!20, inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
k8s-label/.style={draw,solid,thick,rounded corners,fill=blue!20, minimum width=1.5cm, inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
affinity/.style={draw,solid,thick,rounded corners,fill=blue!20, minimum width=2cm, inner sep=.3cm, blur shadow={shadow blur steps=5,shadow blur extra rounding=1.3pt}},
label/.style={rectangle,inner sep=0,outer sep=0},
to/.style={->,>=stealth',shorten >=1pt,semithick,font=\sffamily\footnotesize},
every node/.style={align=center}]
% Position the nodes using a matrix layout
\matrix{
\path
node[k8s-label] (app-label) {App}
node[k8s-label, right=.25cm of app-label] (role-label) {Role}
node[k8s-label, right=.25cm of role-label] (custom-label) {Custom}
node[label, below of=role-label] (k8s-label-label) {K8S Labels}
node[border, behind path,
fit=(app-label)(role-label)(custom-label)(k8s-label-label)
] (k8s-labels) {}; \& \&
\path
node[affinity] (affinity) {Affinity}
node[label, right=.25cm of affinity] (affinity-middle) {}
node[affinity, right=.25cm of affinity-middle] (anti-affinity) {Anti-affinity}
node[label, below of=affinity-middle] (affinity-label) {Assigning to nodes}
node[border, behind path,
fit=(affinity)(anti-affinity)(affinity-label)
] (affinity) {}; \\
\& \node[pod] (pod) {Pod}; \& \\
\path
node[volume, minimum width={width("shm-volume")}] (data-volume) {Data}
node[volume, right=.25cm of data-volume, minimum width={width("shm-volume")}] (tokens-volume) {Tokens}
node[volume, right=.25cm of tokens-volume] (shm-volume) {/dev/shm}
node[label, below of=tokens-volume] (volumes-label) {Volumes}
node[border, behind path,
fit=(data-volume)(shm-volume)(tokens-volume)(volumes-label)
] (volumes) {}; \&
\node[component] (spilo) {Spilo}; \&
\node[sidecar] (scalyr) {Scalyr}; \& \\ \&
\path
node[component] (patroni) {Patroni}
node[component, below=.25cm of patroni] (postgres) {PostgreSQL}
node[border, behind path,
fit=(postgres)(patroni)
] (spilo-components) {}; \&
\path
node[sidecar] (custom-sidecar1) {User defined}
node[label, right=.25cm of custom-sidecar1] (sidecars-middle) {}
node[sidecar, right=.25cm of sidecars-middle] (custom-sidecar2) {User defined}
node[label, below of=sidecars-middle] (sidecars-label) {Custom sidecars}
node[border, behind path,
fit=(custom-sidecar1)(custom-sidecar2)(sidecars-label)
] (sidecars) {};
\\ \& \\
};
% Draw the arrows between the nodes and label them.
\draw[to] (pod) to [bend left=25] (volumes);
\draw[to] (pod) to [bend left=25] (k8s-labels);
\draw[to] (pod) to [bend right=25] (affinity);
\draw[to] (pod) to [bend right=25] (scalyr);
\draw[to] (pod) to [bend right=25] (sidecars);
\draw[to] (pod) -- node[midway,above] {} node[midway,below] {} (spilo);
\draw[to] (spilo) -- node[midway,above] {} node[midway,below] {} (spilo-components);
\end{tikzpicture}
\end{center}
\end{document}

64
docs/gsoc-2019/ideas.md Normal file
View File

@ -0,0 +1,64 @@
# Google Summer of Code 2019
## Applications steps
1. Please carefully read the official [Google Summer of Code Student Guide](https://google.github.io/gsocguides/student/)
2. Join the #postgres-operator slack channel under [Postgres Slack](https://postgres-slack.herokuapp.com) to introduce yourself to the community and get quick feedback on your application.
3. Select a project from the list of ideas below or propose your own.
4. Write a proposal draft. Please open an issue with the label `gsoc2019_application` in the [operator repository](https://github.com/zalando-incubator/postgres-operator/issues) so that the community members can publicly review it. See proposal instructions below for details.
5. Submit proposal and the proof of enrollment before April 9 2019 18:00 UTC through the web site of the Program.
## Project ideas
### Place database pods into the "Guaranteed" Quality-of-Service class
* **Description**: Kubernetes runtime does not kill pods in this class on condition they stay within their resource limits, which is desirable for the DB pods serving production workloads. To be assigned to that class, pod's resources must equal its limits. The task is to add the `enableGuaranteedQoSClass` or the like option to the Postgres manifest and the operator configmap that forcibly re-write pod resources to match the limits.
* **Recommended skills**: golang, basic Kubernetes abstractions
* **Difficulty**: moderate
* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
### Implement the kubectl plugin for the Postgres CustomResourceDefinition
* **Description**: [kubectl plugins](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/) enable extending the Kubernetes command-line client `kubectl` with commands to manage custom resources. The task is to design and implement a plugin for the `kubectl postgres` command,
that can enable, for example, correct deletion or major version upgrade of Postgres clusters.
* **Recommended skills**: golang, shell scripting, operational experience with Kubernetes
* **Difficulty**: moderate to medium, depending on the plugin design
* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
### Implement the openAPIV3Schema for the Postgres CRD
* **Description**: at present the operator validates a database manifest on its own.
It will be helpful to reject erroneous manifests before they reach the operator using the [native Kubernetes CRD validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation). It is up to the student to decide whether to write the schema manually or to adopt existing [schema generator developed for the Prometheus project](https://github.com/ant31/crd-validation).
* **Recommended skills**: golang, JSON schema
* **Difficulty**: medium
* **Mentor(s)**: Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#388](https://github.com/zalando-incubator/postgres-operator/issues/388)
### Design a solution for the local testing of the operator
* **Description**: The current way of testing is to run minikube, either manually or with some tooling around it like `/run-operator_locally.sh` or Vagrant. This has at least three problems:
First, minikube is a single node cluster, so it is unsuitable for testing vital functions such as pod migration between nodes. Second, minikube starts slowly; that prolongs local testing.
Third, every contributor needs to come up with their own solution for local testing. The task is to come up with a better option which will enable us to conveniently and uniformly run e2e tests locally / potentially in Travis CI.
A promising option is the Kubernetes own [kind](https://github.com/kubernetes-sigs/kind)
* **Recommended skills**: Docker, shell scripting, basic Kubernetes abstractions
* **Difficulty**: medium to hard depending on the selected desing
* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#475](https://github.com/zalando-incubator/postgres-operator/issues/475)
### Detach a Postgres cluster from the operator for maintenance
* **Description**: sometimes a Postgres cluster requires manual maintenance. During such maintenance the operator should ignore all the changes manually applied to the cluster.
Currently the only way to achieve this behavior is to shutdown the operator altogether, for instance by scaling down the operator's own deployment to zero pods. That approach evidently affects all Postgres databases under the operator control and thus is highly undesirable in production Kubernetes clusters. It would be much better to be able to detach only the desired Postgres cluster from the operator for the time being and re-attach it again after maintenance.
* **Recommended skills**: golang, architecture of a Kubernetes operator
* **Difficulty**: hard - requires significant modification of the operator's internals and careful consideration of the corner cases.
* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#421](https://github.com/zalando-incubator/postgres-operator/issues/421)
### Propose your own idea
Feel free to come up with your own ideas. For inspiration,
see [our bug tracker](https://github.com/zalando-incubator/postgres-operator/issues),
the [official `CustomResouceDefinition` docs](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)
and [other operators](https://github.com/operator-framework/awesome-operators).

View File

@ -51,7 +51,7 @@ Please, report any issues discovered to https://github.com/zalando-incubator/pos
## Talks
1. "PostgreSQL and Kubernetes: DBaaS without a vendor-lock" talk by Oleksii Kliukin, PostgreSQL Sessions 2018: [slides](https://speakerdeck.com/alexeyklyukin/postgresql-and-kubernetes-dbaas-without-a-vendor-lock)
1. "PostgreSQL and Kubernetes: DBaaS without a vendor-lock" talk by Oleksii Kliukin, PostgreSQL Sessions 2018: [video](https://www.youtube.com/watch?v=q26U2rQcqMw) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-and-kubernetes-dbaas-without-a-vendor-lock)
2. "PostgreSQL High Availability on Kubernetes with Patroni" talk by Oleksii Kliukin, Atmosphere 2018: [video](https://www.youtube.com/watch?v=cFlwQOPPkeg) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-high-availability-on-kubernetes-with-patroni)

View File

@ -33,7 +33,15 @@ Those parameters are grouped under the `metadata` top-level key.
services, secrets) for the cluster. Changing it after the cluster creation
results in deploying or updating a completely separate cluster in the target
namespace. Optional (if present, should match the namespace where the
manifest is applied).
manifest is applied).
* **labels**
if labels are matching one of the `inherited_labels` [configured in the
operator parameters](operator_parameters.md#kubernetes-resources),
they will automatically be added to all the objects (StatefulSet, Service,
Endpoints, etc.) that are created by the operator.
Labels that are set here but not listed as `inherited_labels` in the operator
parameters are ignored.
## Top-level parameters
@ -89,7 +97,7 @@ Those are parameters grouped directly under the `spec` key in the manifest.
examples](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
for details on tolerations and possible values of those keys. When set, this
value overrides the `pod_toleration` setting from the operator. Optional.
* **podPriorityClassName**
a name of the [priority
class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass)
@ -135,7 +143,7 @@ explanation of `ttl` and `loop_wait` parameters.
a map of key-value pairs describing initdb parameters. For `data-checksum`,
`debug`, `no-locale`, `noclean`, `nosync` and `sync-only` parameters use
`true` as the value if you want to set them. Changes to this option do not
affect the already initialized clusters. Optional.
affect the already initialized clusters. Optional.
* **pg_hba**
list of custom `pg_hba` lines to replace default ones. Note that the default
@ -215,7 +223,7 @@ under the `clone` top-level key and do not affect the already running cluster.
different namespaces) , the operator uses UID in the S3 bucket name in order
to guarantee uniqueness. Has no effect when cloning from the running
clusters. Optional.
* **timestamp**
the timestamp up to which the recovery should proceed. The operator always
configures non-inclusive recovery target, stopping right before the given
@ -235,7 +243,7 @@ properties of the persistent storage that stores postgres data.
the name of the Kubernetes storage class to draw the persistent volume from.
See [Kubernetes
documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/)
for the details on storage classes. Optional.
for the details on storage classes. Optional.
### Sidecar definitions

View File

@ -12,12 +12,12 @@ configuration.
* CRD-based configuration. The configuration is stored in a custom YAML
manifest. The manifest is an instance of the custom resource definition (CRD) called
`OperatorConfiguration`. The operator registers this CRD
`OperatorConfiguration`. The operator registers this CRD
during the start and uses it for configuration if the [operator deployment manifest ](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/postgres-operator.yaml#L21) sets the `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` env variable to a non-empty value. The variable should point to the
`postgresql-operator-configuration` object in the operator's namespace.
The CRD-based configuration is a regular YAML
document; non-scalar keys are simply represented in the usual YAML way.
document; non-scalar keys are simply represented in the usual YAML way.
There are no default values built-in in the operator, each parameter that is
not supplied in the configuration receives an empty value. In order to
create your own configuration just copy the [default
@ -172,6 +172,14 @@ configuration they are grouped under the `kubernetes` key.
list of `name:value` pairs for additional labels assigned to the cluster
objects. The default is `application:spilo`.
* **inherited_labels**
list of labels that can be inherited from the cluster manifest, and added to
each child objects (`StatefulSet`, `Pod`, `Service` and `Endpoints`) created by
the opertor.
Typical use case is to dynamically pass labels that are specific to a given
postgres cluster, in order to implement `NetworkPolicy`.
The default is empty.
* **cluster_name_label**
name of the label assigned to Kubernetes objects created by the operator that
indicates which cluster a given object belongs to. The default is
@ -198,13 +206,13 @@ configuration they are grouped under the `kubernetes` key.
All variables from that ConfigMap are injected to the pod's environment, on
conflicts they are overridden by the environment variables generated by the
operator. The default is empty.
* **pod_priority_class_name**
a name of the [priority
class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass)
that should be assigned to the Postgres pods. The priority class itself must be defined in advance.
Default is empty (use the default priority class).
## Kubernetes resource requests
@ -350,7 +358,7 @@ Options to aid debugging of the operator itself. Grouped under the `debug` key.
boolean parameter that toggles the functionality of the operator that require
access to the postgres database, i.e. creating databases and users. The default
is `true`.
## Automatic creation of human users in the database
Options to automate creation of human users with the aid of the teams API
@ -448,4 +456,4 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the
Memory limit value for the Scalyr sidecar. The default is `1Gi`.
For the configmap operator configuration, the [default parameter values](https://github.com/zalando-incubator/postgres-operator/blob/master/pkg/util/config/config.go#L14) mentioned here are likely to be overwritten in your local operator installation via your local version of the operator configmap. In the case you use the operator CRD, all the CRD defaults are provided in the [operator's default configuration manifest](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
For the configmap operator configuration, the [default parameter values](https://github.com/zalando-incubator/postgres-operator/blob/master/pkg/util/config/config.go#L14) mentioned here are likely to be overwritten in your local operator installation via your local version of the operator configmap. In the case you use the operator CRD, all the CRD defaults are provided in the [operator's default configuration manifest](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)

View File

@ -25,8 +25,11 @@ configuration:
pod_role_label: spilo-role
cluster_labels:
application: spilo
# inherited_labels:
# - application
# - app
cluster_name_label: cluster-name
# watched_namespace:""
# watched_namespace:""
# node_readiness_label: ""
# toleration: {}
# infrastructure_roles_secret_name: ""
@ -53,7 +56,7 @@ configuration:
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
aws_or_gcp:
# db_hosted_zone: ""
# wal_s3_bucket: ""
# wal_s3_bucket: ""
# log_s3_bucket: ""
# kube_iam_role: ""
aws_region: eu-central-1
@ -62,13 +65,13 @@ configuration:
enable_database_access: true
teams_api:
enable_teams_api: false
team_api_role_configuration:
team_api_role_configuration:
log_statement: all
enable_team_superuser: false
team_admin_role: admin
pam_role_name: zalandos
# pam_configuration: ""
protected_role_names:
protected_role_names:
- admin
# teams_api_url: ""
# postgres_superuser_teams: "postgres_superusers"

View File

@ -52,6 +52,7 @@ type KubernetesMetaConfiguration struct {
InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"`
PodRoleLabel string `json:"pod_role_label,omitempty"`
ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
InheritedLabels []string `json:"inherited_labels,omitempty"`
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
// TODO: use a proper toleration structure?

View File

@ -494,7 +494,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
defer func() {
if updateFailed {
c.setStatus(acidv1.ClusterStatusUpdateFailed)
} else if c.Status != acidv1.ClusterStatusRunning {
} else {
c.setStatus(acidv1.ClusterStatusRunning)
}
}()

View File

@ -1073,7 +1073,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
}
if role == Replica {
serviceSpec.Selector = c.roleLabelsSet(role)
serviceSpec.Selector = c.roleLabelsSet(false, role)
}
var annotations map[string]string
@ -1113,7 +1113,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
ObjectMeta: metav1.ObjectMeta{
Name: c.serviceName(role),
Namespace: c.Namespace,
Labels: c.roleLabelsSet(role),
Labels: c.roleLabelsSet(true, role),
Annotations: annotations,
},
Spec: serviceSpec,
@ -1127,7 +1127,7 @@ func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubse
ObjectMeta: metav1.ObjectMeta{
Name: c.endpointName(role),
Namespace: c.Namespace,
Labels: c.roleLabelsSet(role),
Labels: c.roleLabelsSet(true, role),
},
}
if len(subsets) > 0 {
@ -1191,7 +1191,7 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget
Spec: policybeta1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: &metav1.LabelSelector{
MatchLabels: c.roleLabelsSet(Master),
MatchLabels: c.roleLabelsSet(false, Master),
},
},
}

View File

@ -27,7 +27,7 @@ func (c *Cluster) listPods() ([]v1.Pod, error) {
func (c *Cluster) getRolePods(role PostgresRole) ([]v1.Pod, error) {
listOptions := metav1.ListOptions{
LabelSelector: c.roleLabelsSet(role).String(),
LabelSelector: c.roleLabelsSet(false, role).String(),
}
pods, err := c.KubeClient.Pods(c.Namespace).List(listOptions)

View File

@ -389,6 +389,19 @@ func (c *Cluster) labelsSet(shouldAddExtraLabels bool) labels.Set {
if shouldAddExtraLabels {
// enables filtering resources owned by a team
lbls["team"] = c.Postgresql.Spec.TeamID
// allow to inherit certain labels from the 'postgres' object
if spec, err := c.GetSpec(); err == nil {
for k, v := range spec.ObjectMeta.Labels {
for _, match := range c.OpConfig.InheritedLabels {
if k == match {
lbls[k] = v
}
}
}
} else {
c.logger.Warningf("could not get the list of InheritedLabels for cluster %q: %v", c.Name, err)
}
}
return labels.Set(lbls)
@ -398,8 +411,8 @@ func (c *Cluster) labelsSelector() *metav1.LabelSelector {
return &metav1.LabelSelector{MatchLabels: c.labelsSet(false), MatchExpressions: nil}
}
func (c *Cluster) roleLabelsSet(role PostgresRole) labels.Set {
lbls := c.labelsSet(false)
func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) labels.Set {
lbls := c.labelsSet(shouldAddExtraLabels)
lbls[c.OpConfig.PodRoleLabel] = string(role)
return lbls
}

View File

@ -39,6 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap
result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod)
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
@ -47,6 +48,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
result.PodRoleLabel = fromCRD.Kubernetes.PodRoleLabel
result.ClusterLabels = fromCRD.Kubernetes.ClusterLabels
result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels
result.ClusterNameLabel = fromCRD.Kubernetes.ClusterNameLabel
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName

View File

@ -27,6 +27,7 @@ type Resources struct {
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
PodPriorityClassName string `name:"pod_priority_class_name"`
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
InheritedLabels []string `name:"inherited_labels" default:""`
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
PodToleration map[string]string `name:"toleration" default:""`