merge with master and resolve conflict

This commit is contained in:
Felix Kunde 2020-02-20 10:09:13 +01:00
commit cf85db5d7a
43 changed files with 612 additions and 286 deletions

View File

@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: postgres-operator-ui name: postgres-operator-ui
version: 0.1.0 version: 0.1.0
appVersion: 1.2.0 appVersion: 1.3.0
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
keywords: keywords:
@ -12,6 +12,8 @@ keywords:
- patroni - patroni
- spilo - spilo
maintainers: maintainers:
- name: Zalando
email: opensource@zalando.de
- name: siku4 - name: siku4
email: sk@sik-net.de email: sk@sik-net.de
sources: sources:

View File

@ -24,6 +24,13 @@ If release name contains chart name it will be used as a full name.
{{- end -}} {{- end -}}
{{- end -}} {{- end -}}
{{/*
Create a service account name.
*/}}
{{- define "postgres-operator-ui.serviceAccountName" -}}
{{ default (include "postgres-operator-ui.fullname" .) .Values.serviceAccount.name }}
{{- end -}}
{{/* {{/*
Create chart name and version as used by the chart label. Create chart name and version as used by the chart label.
*/}} */}}

View File

@ -0,0 +1,52 @@
{{ if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups:
- acid.zalan.do
resources:
- postgresqls
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- list
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
{{ end }}

View File

@ -0,0 +1,19 @@
{{ if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "postgres-operator-ui.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{ end }}

View File

@ -20,7 +20,7 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
team: "acid" # Parameterize? team: "acid" # Parameterize?
spec: spec:
serviceAccountName: {{ template "postgres-operator-ui.name" . }} serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }}
containers: containers:
- name: "service" - name: "service"
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"

View File

@ -1,81 +1,11 @@
{{ if .Values.serviceAccount.create }}
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ template "postgres-operator-ui.name" . }} name: {{ include "postgres-operator-ui.serviceAccountName" . }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
{{ end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "postgres-operator-ui.name" . }}
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups:
- acid.zalan.do
resources:
- postgresqls
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- list
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "postgres-operator-ui.name" . }}
labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "postgres-operator-ui.name" . }}
subjects:
- kind: ServiceAccount
# note: the cluster role binding needs to be defined
# for every namespace the operator-ui service account lives in.
name: {{ template "postgres-operator-ui.name" . }}
namespace: {{ .Release.Namespace }}

View File

@ -11,6 +11,17 @@ image:
tag: v1.2.0 tag: v1.2.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# configure UI pod resources # configure UI pod resources
resources: resources:
limits: limits:

View File

@ -0,0 +1,53 @@
{{ if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: postgres-pod
labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
# Patroni needs to watch and manage endpoints
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# Patroni needs to watch pods
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- patch
- update
- watch
# to let Patroni create a headless service
- apiGroups:
- ""
resources:
- services
verbs:
- create
# to run privileged pods
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- privileged
verbs:
- use
{{ end }}

View File

@ -9,6 +9,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
rules: rules:
# all verbs allowed for custom operator resources
- apiGroups: - apiGroups:
- acid.zalan.do - acid.zalan.do
resources: resources:
@ -16,7 +17,15 @@ rules:
- postgresqls/status - postgresqls/status
- operatorconfigurations - operatorconfigurations
verbs: verbs:
- "*" - create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# to create or get/update CRDs when starting up
- apiGroups: - apiGroups:
- apiextensions.k8s.io - apiextensions.k8s.io
resources: resources:
@ -26,12 +35,14 @@ rules:
- get - get
- patch - patch
- update - update
# to read configuration from ConfigMaps
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- configmaps - configmaps
verbs: verbs:
- get - get
# to manage endpoints which are also used by Patroni
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -43,7 +54,9 @@ rules:
- get - get
- list - list
- patch - patch
- watch # needed if zalando-postgres-operator account is used for pods as well - update
- watch
# to CRUD secrets for database access
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -53,6 +66,7 @@ rules:
- update - update
- delete - delete
- get - get
# to check nodes for node readiness label
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -61,6 +75,7 @@ rules:
- get - get
- list - list
- watch - watch
# to read or delete existing PVCs. Creation via StatefulSet
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -69,6 +84,7 @@ rules:
- delete - delete
- get - get
- list - list
# to read existing PVs. Creation should be done via dynamic provisioning
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -77,6 +93,7 @@ rules:
- get - get
- list - list
- update # only for resizing AWS volumes - update # only for resizing AWS volumes
# to watch Spilo pods and do rolling updates. Creation via StatefulSet
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -86,13 +103,16 @@ rules:
- get - get
- list - list
- watch - watch
- update
- patch - patch
# to resize the filesystem in Spilo pods when increasing volume size
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- pods/exec - pods/exec
verbs: verbs:
- create - create
# to CRUD services to point to Postgres cluster instances
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -102,6 +122,8 @@ rules:
- delete - delete
- get - get
- patch - patch
- update
# to CRUD the StatefulSet which controls the Postgres cluster instances
- apiGroups: - apiGroups:
- apps - apps
resources: resources:
@ -113,12 +135,26 @@ rules:
- get - get
- list - list
- patch - patch
# to CRUD cron jobs for logical backups
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- create
- delete
- get
- list
- patch
- update
# to get namespaces operator resources can run in
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- namespaces - namespaces
verbs: verbs:
- get - get
# to define PDBs. Update happens via delete/create
- apiGroups: - apiGroups:
- policy - policy
resources: resources:
@ -127,6 +163,7 @@ rules:
- create - create
- delete - delete
- get - get
# to create ServiceAccounts in each namespace the operator watches
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -134,30 +171,21 @@ rules:
verbs: verbs:
- get - get
- create - create
# to create role bindings to the postgres-pod service account
- apiGroups: - apiGroups:
- "rbac.authorization.k8s.io" - rbac.authorization.k8s.io
resources: resources:
- rolebindings - rolebindings
verbs: verbs:
- get - get
- create - create
# to grant privilege to run privileged pods
- apiGroups: - apiGroups:
- "rbac.authorization.k8s.io" - extensions
resources: resources:
- clusterroles - podsecuritypolicies
verbs:
- bind
resourceNames: resourceNames:
- {{ include "postgres-operator.serviceAccountName" . }} - privileged
- apiGroups:
- batch
resources:
- cronjobs # enables logical backups
verbs: verbs:
- create - use
- delete
- get
- list
- patch
- update
{{ end }} {{ end }}

View File

@ -14,8 +14,6 @@ roleRef:
name: {{ include "postgres-operator.serviceAccountName" . }} name: {{ include "postgres-operator.serviceAccountName" . }}
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
# note: the cluster role binding needs to be defined
# for every namespace the operator service account lives in.
name: {{ include "postgres-operator.serviceAccountName" . }} name: {{ include "postgres-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
{{ end }} {{ end }}

View File

@ -9,7 +9,6 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
data: data:
pod_service_account_name: {{ include "postgres-operator.serviceAccountName" . }}
{{ toYaml .Values.configGeneral | indent 2 }} {{ toYaml .Values.configGeneral | indent 2 }}
{{ toYaml .Values.configUsers | indent 2 }} {{ toYaml .Values.configUsers | indent 2 }}
{{ toYaml .Values.configKubernetes | indent 2 }} {{ toYaml .Values.configKubernetes | indent 2 }}

View File

@ -14,7 +14,6 @@ configuration:
{{ toYaml .Values.configUsers | indent 4 }} {{ toYaml .Values.configUsers | indent 4 }}
kubernetes: kubernetes:
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
pod_service_account_name: {{ include "postgres-operator.serviceAccountName" . }}
{{ toYaml .Values.configKubernetes | indent 4 }} {{ toYaml .Values.configKubernetes | indent 4 }}
postgres_pod_resources: postgres_pod_resources:
{{ toYaml .Values.configPostgresPodResources | indent 4 }} {{ toYaml .Values.configPostgresPodResources | indent 4 }}

View File

@ -8,6 +8,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
spec: spec:
type: ClusterIP
ports: ports:
- port: 8080 - port: 8080
protocol: TCP protocol: TCP
@ -15,7 +16,3 @@ spec:
selector: selector:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/name: {{ template "postgres-operator.name" . }} app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}

View File

@ -100,6 +100,8 @@ configKubernetes:
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# label assigned to the Postgres pods (and services/endpoints) # label assigned to the Postgres pods (and services/endpoints)
pod_role_label: spilo-role pod_role_label: spilo-role
# name of service account to be used by postgres cluster pods
pod_service_account_name: "postgres-pod"
# Postgres pods are terminated forcefully after this timeout # Postgres pods are terminated forcefully after this timeout
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator

View File

@ -55,7 +55,7 @@ configKubernetes:
# additional labels assigned to the cluster objects # additional labels assigned to the cluster objects
cluster_labels: application:spilo cluster_labels: application:spilo
# label assigned to Kubernetes objects created by the operator # label assigned to Kubernetes objects created by the operator
cluster_name_label: version cluster_name_label: cluster-name
# annotations attached to each database pod # annotations attached to each database pod
# custom_pod_annotations: "keya:valuea,keyb:valueb" # custom_pod_annotations: "keya:valuea,keyb:valueb"
@ -93,6 +93,8 @@ configKubernetes:
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# label assigned to the Postgres pods (and services/endpoints) # label assigned to the Postgres pods (and services/endpoints)
pod_role_label: spilo-role pod_role_label: spilo-role
# name of service account to be used by postgres cluster pods
pod_service_account_name: "postgres-pod"
# Postgres pods are terminated forcefully after this timeout # Postgres pods are terminated forcefully after this timeout
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator

View File

@ -47,6 +47,12 @@ patching the CRD manifest:
zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}' zk8 patch crd postgresqls.acid.zalan.do -p '{"spec":{"validation": null}}'
``` ```
## Non-default cluster domain
If your cluster uses a DNS domain other than the default `cluster.local`, this
needs to be set in the operator configuration (`cluster_domain` variable). This
is used by the operator to connect to the clusters after creation.
## Namespaces ## Namespaces
### Select the namespace to deploy to ### Select the namespace to deploy to
@ -89,36 +95,13 @@ lacks access rights to any of them (except K8s system namespaces like
'list pods' execute at the cluster scope and fail at the first violation of 'list pods' execute at the cluster scope and fail at the first violation of
access rights. access rights.
The watched namespace also needs to have a (possibly different) service account
in the case database pods need to talk to the K8s API (e.g. when using
K8s-native configuration of Patroni). The operator checks that the
`pod_service_account_name` exists in the target namespace, and, if not, deploys
there the `pod_service_account_definition` from the operator
[`Config`](../pkg/util/config/config.go) with the default value of:
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: operator
```
In this definition, the operator overwrites the account's name to match
`pod_service_account_name` and the `default` namespace to match the target
namespace. The operator performs **no** further syncing of this account.
## Non-default cluster domain
If your cluster uses a DNS domain other than the default `cluster.local`, this
needs to be set in the operator configuration (`cluster_domain` variable). This
is used by the operator to connect to the clusters after creation.
## Role-based access control for the operator ## Role-based access control for the operator
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml) The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)
defines the service account, cluster roles and bindings needed for the operator defines the service account, cluster roles and bindings needed for the operator
to function under access control restrictions. To deploy the operator with this to function under access control restrictions. The file also includes a cluster
RBAC policy use: role `postgres-pod` with privileges for Patroni to watch and manage pods and
endpoints. To deploy the operator with this RBAC policies use:
```bash ```bash
kubectl create -f manifests/configmap.yaml kubectl create -f manifests/configmap.yaml
@ -127,14 +110,14 @@ kubectl create -f manifests/postgres-operator.yaml
kubectl create -f manifests/minimal-postgres-manifest.yaml kubectl create -f manifests/minimal-postgres-manifest.yaml
``` ```
### Service account and cluster roles ### Namespaced service account and role binding
Note that the service account is named `zalando-postgres-operator`. You may have For each namespace the operator watches it creates (or reads) a service account
to change the `service_account_name` in the operator ConfigMap and and role binding to be used by the Postgres Pods. The service account is bound
`serviceAccountName` in the `postgres-operator` deployment appropriately. This to the `postgres-pod` cluster role. The name and definitions of these resources
is done intentionally to avoid breaking those setups that already work with the can be [configured](reference/operator_parameters.md#kubernetes-resources).
default `operator` account. In the future the operator should ideally be run Note, that the operator performs **no** further syncing of namespaced service
under the `zalando-postgres-operator` service account. accounts and role bindings.
### Give K8s users access to create/list `postgresqls` ### Give K8s users access to create/list `postgresqls`
@ -497,37 +480,71 @@ A secret can be pre-provisioned in different ways:
## Setting up the Postgres Operator UI ## Setting up the Postgres Operator UI
With the v1.2 release the Postgres Operator is shipped with a browser-based Since the v1.2 release the Postgres Operator is shipped with a browser-based
configuration user interface (UI) that simplifies managing Postgres clusters configuration user interface (UI) that simplifies managing Postgres clusters
with the operator. The UI runs with Node.js and comes with it's own Docker with the operator.
image.
Run NPM to continuously compile `tags/js` code. Basically, it creates an ### Building the UI image
`app.js` file in: `static/build/app.js`
``` The UI runs with Node.js and comes with it's own Docker
(cd ui/app && npm start) image. However, installing Node.js to build the operator UI is not required. It
``` is handled via Docker containers when running:
To build the Docker image open a shell and change to the `ui` folder. Then run:
```bash ```bash
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 . make docker
``` ```
Apply all manifests for the `ui/manifests` folder to deploy the Postgres ### Configure endpoints and options
Operator UI on K8s. For local tests you don't need the Ingress resource.
The UI talks to the K8s API server as well as the Postgres Operator [REST API](developer.md#debugging-the-operator).
K8s API server URLs are loaded from the machine's kubeconfig environment by
default. Alternatively, a list can also be passed when starting the Python
application with the `--cluster` option.
The Operator API endpoint can be configured via the `OPERATOR_API_URL`
environment variables in the [deployment manifest](../ui/manifests/deployment.yaml#L40).
You can also expose the operator API through a [service](../manifests/api-service.yaml).
Some displayed options can be disabled from UI using simple flags under the
`OPERATOR_UI_CONFIG` field in the deployment.
### Deploy the UI on K8s
Now, apply all manifests from the `ui/manifests` folder to deploy the Postgres
Operator UI on K8s. Replace the image tag in the deployment manifest if you
want to test the image you've built with `make docker`. Make sure the pods for
the operator and the UI are both running.
```bash ```bash
kubectl apply -f ui/manifests sed -e "s/\(image\:.*\:\).*$/\1$TAG/" manifests/deployment.yaml | kubectl apply -f manifests/
kubectl get all -l application=postgres-operator-ui
``` ```
Make sure the pods for the operator and the UI are both running. For local ### Local testing
testing you need to apply proxying and port forwarding so that the UI can talk
to the K8s and Postgres Operator REST API. You can use the provided For local testing you need to apply K8s proxying and operator pod port
`run_local.sh` script for this. Make sure it uses the correct URL to your K8s forwarding so that the UI can talk to the K8s and Postgres Operator REST API.
API server, e.g. for minikube it would be `https://192.168.99.100:8443`. The Ingress resource is not needed. You can use the provided `run_local.sh`
script for this. Make sure that:
* Python dependencies are installed on your machine
* the K8s API server URL is set for kubectl commands, e.g. for minikube it would usually be `https://192.168.99.100:8443`.
* the pod label selectors for port forwarding are correct
When testing with minikube you have to build the image in its docker environment
(running `make docker` doesn't do it for you). From the `ui` directory execute:
```bash ```bash
# compile and build operator UI
make docker
# build in image in minikube docker env
eval $(minikube docker-env)
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0 .
# apply UI manifests next to a running Postgres Operator
kubectl apply -f manifests/
# install python dependencies to run UI locally
pip3 install -r requirements
./run_local.sh ./run_local.sh
``` ```

View File

@ -31,9 +31,13 @@ status page.
![pgui-waiting-for-master](diagrams/pgui-waiting-for-master.png "Waiting for master pod") ![pgui-waiting-for-master](diagrams/pgui-waiting-for-master.png "Waiting for master pod")
Usually, the startup should only take up to 1 minute. If you feel the process Usually, the startup should only take up to 1 minute. If you feel the process
got stuck click on the "Logs" button to inspect the operator logs. From the got stuck click on the "Logs" button to inspect the operator logs. If the logs
"Status" field in the top menu you can also retrieve the logs and queue of each look fine, but the UI seems to got stuck, check if you are have configured the
worker the operator is using. The number of concurrent workers can be same [cluster name label](../ui/manifests/deployment.yaml#L45) like for the
[operator](../manifests/configmap.yaml#L13).
From the "Status" field in the top menu you can also retrieve the logs and queue
of each worker the operator is using. The number of concurrent workers can be
[configured](reference/operator_parameters.md#general). [configured](reference/operator_parameters.md#general).
![pgui-operator-logs](diagrams/pgui-operator-logs.png "Checking operator logs") ![pgui-operator-logs](diagrams/pgui-operator-logs.png "Checking operator logs")

View File

@ -52,6 +52,7 @@ cd postgres-operator
kubectl create -f manifests/configmap.yaml # configuration kubectl create -f manifests/configmap.yaml # configuration
kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions
kubectl create -f manifests/postgres-operator.yaml # deployment kubectl create -f manifests/postgres-operator.yaml # deployment
kubectl create -f manifests/api-service.yaml # operator API to be used by UI
``` ```
There is a [Kustomization](https://github.com/kubernetes-sigs/kustomize) There is a [Kustomization](https://github.com/kubernetes-sigs/kustomize)
@ -104,7 +105,7 @@ kubectl create -f https://operatorhub.io/install/postgres-operator.yaml
This installs the operator in the `operators` namespace. More information can be This installs the operator in the `operators` namespace. More information can be
found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator). found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator).
## Create a Postgres cluster ## Check if Postgres Operator is running
Starting the operator may take a few seconds. Check if the operator pod is Starting the operator may take a few seconds. Check if the operator pod is
running before applying a Postgres cluster manifest. running before applying a Postgres cluster manifest.
@ -115,7 +116,61 @@ kubectl get pod -l name=postgres-operator
# if you've created the operator using helm chart # if you've created the operator using helm chart
kubectl get pod -l app.kubernetes.io/name=postgres-operator kubectl get pod -l app.kubernetes.io/name=postgres-operator
```
If the operator doesn't get into `Running` state, either check the latest K8s
events of the deployment or pod with `kubectl describe` or inspect the operator
logs:
```bash
kubectl logs "$(kubectl get pod -l name=postgres-operator --output='name')"
```
## Deploy the operator UI
In the following paragraphs we describe how to access and manage PostgreSQL
clusters from the command line with kubectl. But it can also be done from the
browser-based [Postgres Operator UI](operator-ui.md). Before deploying the UI
make sure the operator is running and its REST API is reachable through a
[K8s service](../manifests/api-service.yaml). The URL to this API must be
configured in the [deployment manifest](../ui/manifests/deployment.yaml#L43)
of the UI.
To deploy the UI simply apply all its manifests files or use the UI helm chart:
```bash
# manual deployment
kubectl apply -f ui/manifests/
# or helm chart
helm install postgres-operator-ui ./charts/postgres-operator-ui
```
Like with the operator, check if the UI pod gets into `Running` state:
```bash
# if you've created the operator using yaml manifests
kubectl get pod -l name=postgres-operator-ui
# if you've created the operator using helm chart
kubectl get pod -l app.kubernetes.io/name=postgres-operator-ui
```
You can now access the web interface by port forwarding the UI pod (mind the
label selector) and enter `localhost:8081` in your browser:
```bash
kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081
```
Available option are explained in detail in the [UI docs](operator-ui.md).
## Create a Postgres cluster
If the operator pod is running it listens to new events regarding `postgresql`
resources. Now, it's time to submit your first Postgres cluster manifest.
```bash
# create a Postgres cluster # create a Postgres cluster
kubectl create -f manifests/minimal-postgres-manifest.yaml kubectl create -f manifests/minimal-postgres-manifest.yaml
``` ```

View File

@ -152,21 +152,22 @@ configuration they are grouped under the `kubernetes` key.
service account used by Patroni running on individual Pods to communicate service account used by Patroni running on individual Pods to communicate
with the operator. Required even if native Kubernetes support in Patroni is with the operator. Required even if native Kubernetes support in Patroni is
not used, because Patroni keeps pod labels in sync with the instance role. not used, because Patroni keeps pod labels in sync with the instance role.
The default is `operator`. The default is `postgres-pod`.
* **pod_service_account_definition** * **pod_service_account_definition**
The operator tries to create the pod Service Account in the namespace that On Postgres cluster creation the operator tries to create the service account
doesn't define such an account using the YAML definition provided by this for the Postgres pods if it does not exist in the namespace. The internal
option. If not defined, a simple definition that contains only the name will default service account definition (defines only the name) can be overwritten
be used. The default is empty. with this parameter. Make sure to provide a valid YAML or JSON string. The
default is empty.
* **pod_service_account_role_binding_definition** * **pod_service_account_role_binding_definition**
This definition must bind pod service account to a role with permission This definition must bind the pod service account to a role with permission
sufficient for the pods to start and for Patroni to access K8s endpoints; sufficient for the pods to start and for Patroni to access K8s endpoints;
service account on its own lacks any such rights starting with K8s v1.8. If service account on its own lacks any such rights starting with K8s v1.8. If
not explicitly defined by the user, a simple definition that binds the not explicitly defined by the user, a simple definition that binds the
account to the operator's own 'zalando-postgres-operator' cluster role will account to the 'postgres-pod' [cluster role](../../manifests/operator-service-account-rbac.yaml#L198)
be used. The default is empty. will be used. The default is empty.
* **pod_terminate_grace_period** * **pod_terminate_grace_period**
Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods)

View File

@ -65,7 +65,7 @@ our test cluster.
```bash ```bash
# get name of master pod of acid-minimal-cluster # get name of master pod of acid-minimal-cluster
export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,version=acid-minimal-cluster,spilo-role=master) export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master)
# set up port forward # set up port forward
kubectl port-forward $PGMASTER 6432:5432 kubectl port-forward $PGMASTER 6432:5432

View File

@ -71,7 +71,7 @@ class EndToEndTestCase(unittest.TestCase):
''' '''
k8s = self.k8s k8s = self.k8s
cluster_label = 'version=acid-minimal-cluster' cluster_label = 'cluster-name=acid-minimal-cluster'
# enable load balancer services # enable load balancer services
pg_patch_enable_lbs = { pg_patch_enable_lbs = {
@ -119,7 +119,7 @@ class EndToEndTestCase(unittest.TestCase):
Lower resource limits below configured minimum and let operator fix it Lower resource limits below configured minimum and let operator fix it
''' '''
k8s = self.k8s k8s = self.k8s
cluster_label = 'version=acid-minimal-cluster' cluster_label = 'cluster-name=acid-minimal-cluster'
_, failover_targets = k8s.get_pg_nodes(cluster_label) _, failover_targets = k8s.get_pg_nodes(cluster_label)
# configure minimum boundaries for CPU and memory limits # configure minimum boundaries for CPU and memory limits
@ -178,7 +178,7 @@ class EndToEndTestCase(unittest.TestCase):
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.namespace) k8s.wait_for_pod_start("spilo-role=master", self.namespace)
self.assert_master_is_unique(self.namespace, version="acid-test-cluster") self.assert_master_is_unique(self.namespace, "acid-test-cluster")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self): def test_scaling(self):
@ -186,7 +186,7 @@ class EndToEndTestCase(unittest.TestCase):
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
''' '''
k8s = self.k8s k8s = self.k8s
labels = "version=acid-minimal-cluster" labels = "cluster-name=acid-minimal-cluster"
k8s.wait_for_pg_to_scale(3) k8s.wait_for_pg_to_scale(3)
self.assertEqual(3, k8s.count_pods_with_label(labels)) self.assertEqual(3, k8s.count_pods_with_label(labels))
@ -202,7 +202,7 @@ class EndToEndTestCase(unittest.TestCase):
Add taint "postgres=:NoExecute" to node with master. This must cause a failover. Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
''' '''
k8s = self.k8s k8s = self.k8s
cluster_label = 'version=acid-minimal-cluster' cluster_label = 'cluster-name=acid-minimal-cluster'
# get nodes of master and replica(s) (expected target of new master) # get nodes of master and replica(s) (expected target of new master)
current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label) current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label)
@ -340,9 +340,9 @@ class EndToEndTestCase(unittest.TestCase):
"foo": "bar", "foo": "bar",
} }
self.assertTrue(k8s.check_service_annotations( self.assertTrue(k8s.check_service_annotations(
"version=acid-service-annotations,spilo-role=master", annotations)) "cluster-name=acid-service-annotations,spilo-role=master", annotations))
self.assertTrue(k8s.check_service_annotations( self.assertTrue(k8s.check_service_annotations(
"version=acid-service-annotations,spilo-role=replica", annotations)) "cluster-name=acid-service-annotations,spilo-role=replica", annotations))
# clean up # clean up
unpatch_custom_service_annotations = { unpatch_custom_service_annotations = {
@ -352,14 +352,14 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.update_config(unpatch_custom_service_annotations) k8s.update_config(unpatch_custom_service_annotations)
def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"): def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
''' '''
Check that there is a single pod in the k8s cluster with the label "spilo-role=master" Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
To be called manually after operations that affect pods To be called manually after operations that affect pods
''' '''
k8s = self.k8s k8s = self.k8s
labels = 'spilo-role=master,version=' + version labels = 'spilo-role=master,cluster-name=' + clusterName
num_of_master_pods = k8s.count_pods_with_label(labels, namespace) num_of_master_pods = k8s.count_pods_with_label(labels, namespace)
self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods)) self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods))
@ -471,7 +471,7 @@ class K8s:
_ = self.api.custom_objects_api.patch_namespaced_custom_object( _ = self.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body)
labels = 'version=acid-minimal-cluster' labels = 'cluster-name=acid-minimal-cluster'
while self.count_pods_with_label(labels) != number_of_instances: while self.count_pods_with_label(labels) != number_of_instances:
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
@ -481,7 +481,7 @@ class K8s:
def wait_for_master_failover(self, expected_master_nodes, namespace='default'): def wait_for_master_failover(self, expected_master_nodes, namespace='default'):
pod_phase = 'Failing over' pod_phase = 'Failing over'
new_master_node = '' new_master_node = ''
labels = 'spilo-role=master,version=acid-minimal-cluster' labels = 'spilo-role=master,cluster-name=acid-minimal-cluster'
while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes): while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: postgres-operator
spec:
type: ClusterIP
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
name: postgres-operator

View File

@ -10,7 +10,7 @@ data:
cluster_domain: cluster.local cluster_domain: cluster.local
cluster_history_entries: "1000" cluster_history_entries: "1000"
cluster_labels: application:spilo cluster_labels: application:spilo
cluster_name_label: version cluster_name_label: cluster-name
# connection_pool_default_cpu_limit: "1" # connection_pool_default_cpu_limit: "1"
# connection_pool_default_cpu_request: "1" # connection_pool_default_cpu_request: "1"
# connection_pool_default_memory_limit: 100m # connection_pool_default_memory_limit: 100m
@ -72,7 +72,7 @@ data:
pod_label_wait_timeout: 10m pod_label_wait_timeout: 10m
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
pod_role_label: spilo-role pod_role_label: spilo-role
pod_service_account_name: "zalando-postgres-operator" pod_service_account_name: "postgres-pod"
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# postgres_superuser_teams: "postgres_superusers" # postgres_superuser_teams: "postgres_superusers"
# protected_role_names: "admin" # protected_role_names: "admin"

View File

@ -4,3 +4,4 @@ resources:
- configmap.yaml - configmap.yaml
- operator-service-account-rbac.yaml - operator-service-account-rbac.yaml
- postgres-operator.yaml - postgres-operator.yaml
- api-service.yaml

View File

@ -1,14 +1,14 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: zalando-postgres-operator name: postgres-operator
namespace: default namespace: default
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: zalando-postgres-operator name: postgres-operator
rules: rules:
# all verbs allowed for custom operator resources # all verbs allowed for custom operator resources
- apiGroups: - apiGroups:
@ -18,7 +18,14 @@ rules:
- postgresqls/status - postgresqls/status
- operatorconfigurations - operatorconfigurations
verbs: verbs:
- "*" - create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# to create or get/update CRDs when starting up # to create or get/update CRDs when starting up
- apiGroups: - apiGroups:
- apiextensions.k8s.io - apiextensions.k8s.io
@ -48,7 +55,8 @@ rules:
- get - get
- list - list
- patch - patch
- watch # needed if zalando-postgres-operator account is used for pods as well - update
- watch
# to CRUD secrets for database access # to CRUD secrets for database access
- apiGroups: - apiGroups:
- "" - ""
@ -96,6 +104,7 @@ rules:
- get - get
- list - list
- watch - watch
- update
- patch - patch
# to resize the filesystem in Spilo pods when increasing volume size # to resize the filesystem in Spilo pods when increasing volume size
- apiGroups: - apiGroups:
@ -127,6 +136,18 @@ rules:
- get - get
- list - list
- patch - patch
# to CRUD cron jobs for logical backups
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- create
- delete
- get
- list
- patch
- update
# to get namespaces operator resources can run in # to get namespaces operator resources can run in
- apiGroups: - apiGroups:
- "" - ""
@ -151,39 +172,82 @@ rules:
verbs: verbs:
- get - get
- create - create
# to create role bindings to the operator service account # to create role bindings to the postgres-pod service account
- apiGroups: - apiGroups:
- "rbac.authorization.k8s.io" - rbac.authorization.k8s.io
resources: resources:
- rolebindings - rolebindings
verbs: verbs:
- get - get
- create - create
# to CRUD cron jobs for logical backups # to grant privilege to run privileged pods
- apiGroups: - apiGroups:
- batch - extensions
resources: resources:
- cronjobs - podsecuritypolicies
resourceNames:
- privileged
verbs: verbs:
- create - use
- delete
- get
- list
- patch
- update
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: zalando-postgres-operator name: postgres-operator
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: ClusterRole kind: ClusterRole
name: zalando-postgres-operator name: postgres-operator
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
# note: the cluster role binding needs to be defined name: postgres-operator
# for every namespace the operator service account lives in.
name: zalando-postgres-operator
namespace: default namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: postgres-pod
rules:
# Patroni needs to watch and manage endpoints
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# Patroni needs to watch pods
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- patch
- update
- watch
# to let Patroni create a headless service
- apiGroups:
- ""
resources:
- services
verbs:
- create
# to run privileged pods
- apiGroups:
- extensions
resources:
- podsecuritypolicies
resourceNames:
- privileged
verbs:
- use

View File

@ -12,7 +12,7 @@ spec:
labels: labels:
name: postgres-operator name: postgres-operator
spec: spec:
serviceAccountName: zalando-postgres-operator serviceAccountName: postgres-operator
containers: containers:
- name: postgres-operator - name: postgres-operator
image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.1 image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.1

View File

@ -45,7 +45,7 @@ configuration:
# pod_priority_class_name: "" # pod_priority_class_name: ""
pod_role_label: spilo-role pod_role_label: spilo-role
# pod_service_account_definition: "" # pod_service_account_definition: ""
pod_service_account_name: zalando-postgres-operator pod_service_account_name: postgres-pod
# pod_service_account_role_binding_definition: "" # pod_service_account_role_binding_definition: ""
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"

View File

@ -11,7 +11,14 @@ rules:
- postgresqls - postgresqls
- postgresqls/status - postgresqls/status
verbs: verbs:
- "*" - create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
@ -48,4 +55,3 @@ rules:
- get - get
- list - list
- watch - watch

View File

@ -1559,8 +1559,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
) )
labels := map[string]string{ labels := map[string]string{
"version": c.Name, c.OpConfig.ClusterNameLabel: c.Name,
"application": "spilo-logical-backup", "application": "spilo-logical-backup",
} }
podAffinityTerm := v1.PodAffinityTerm{ podAffinityTerm := v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{ LabelSelector: &metav1.LabelSelector{

View File

@ -161,11 +161,12 @@ func (c *Controller) initPodServiceAccount() {
if c.opConfig.PodServiceAccountDefinition == "" { if c.opConfig.PodServiceAccountDefinition == "" {
c.opConfig.PodServiceAccountDefinition = ` c.opConfig.PodServiceAccountDefinition = `
{ "apiVersion": "v1", {
"kind": "ServiceAccount", "apiVersion": "v1",
"metadata": { "kind": "ServiceAccount",
"name": "operator" "metadata": {
} "name": "postgres-pod"
}
}` }`
} }
@ -175,13 +176,13 @@ func (c *Controller) initPodServiceAccount() {
switch { switch {
case err != nil: case err != nil:
panic(fmt.Errorf("Unable to parse pod service account definition from the operator config map: %v", err)) panic(fmt.Errorf("Unable to parse pod service account definition from the operator configuration: %v", err))
case groupVersionKind.Kind != "ServiceAccount": case groupVersionKind.Kind != "ServiceAccount":
panic(fmt.Errorf("pod service account definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) panic(fmt.Errorf("pod service account definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind))
default: default:
c.PodServiceAccount = obj.(*v1.ServiceAccount) c.PodServiceAccount = obj.(*v1.ServiceAccount)
if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName { if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName {
c.logger.Warnf("in the operator config map, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name) c.logger.Warnf("in the operator configuration, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name)
c.PodServiceAccount.Name = c.opConfig.PodServiceAccountName c.PodServiceAccount.Name = c.opConfig.PodServiceAccountName
} }
c.PodServiceAccount.Namespace = "" c.PodServiceAccount.Namespace = ""
@ -223,9 +224,9 @@ func (c *Controller) initRoleBinding() {
switch { switch {
case err != nil: case err != nil:
panic(fmt.Errorf("Unable to parse the definition of the role binding for the pod service account definition from the operator config map: %v", err)) panic(fmt.Errorf("unable to parse the definition of the role binding for the pod service account definition from the operator configuration: %v", err))
case groupVersionKind.Kind != "RoleBinding": case groupVersionKind.Kind != "RoleBinding":
panic(fmt.Errorf("role binding definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) panic(fmt.Errorf("role binding definition in the operator configuration defines another type of resource: %v", groupVersionKind.Kind))
default: default:
c.PodServiceAccountRoleBinding = obj.(*rbacv1.RoleBinding) c.PodServiceAccountRoleBinding = obj.(*rbacv1.RoleBinding)
c.PodServiceAccountRoleBinding.Namespace = "" c.PodServiceAccountRoleBinding.Namespace = ""

View File

@ -110,7 +110,7 @@ type Config struct {
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16"` DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16"`
Sidecars map[string]string `name:"sidecar_docker_images"` Sidecars map[string]string `name:"sidecar_docker_images"`
// default name `operator` enables backward compatibility with the older ServiceAccountName field // default name `operator` enables backward compatibility with the older ServiceAccountName field
PodServiceAccountName string `name:"pod_service_account_name" default:"operator"` PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
// value of this string must be valid JSON or YAML; see initPodServiceAccount // value of this string must be valid JSON or YAML; see initPodServiceAccount
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""` PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""`

View File

@ -20,7 +20,7 @@ func newsService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.
return svc return svc
} }
func TestServiceAnnotations(t *testing.T) { func TestSameService(t *testing.T) {
tests := []struct { tests := []struct {
about string about string
current *v1.Service current *v1.Service
@ -267,8 +267,9 @@ func TestServiceAnnotations(t *testing.T) {
}, },
v1.ServiceTypeLoadBalancer, v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}), []string{"128.141.0.0/16", "137.138.0.0/16"}),
match: false, match: false,
reason: `new service's annotations doesn't match the current one: Removed 'foo'. Added 'bar' with value 'foo'. 'zalan' changed from 'do' to 'do.com'`, // Test just the prefix to avoid flakiness and map sorting
reason: `new service's annotations doesn't match the current one: Removed 'foo'.`,
}, },
{ {
about: "service add annotations", about: "service add annotations",
@ -301,7 +302,7 @@ func TestServiceAnnotations(t *testing.T) {
} }
if !match && !tt.match { if !match && !tt.match {
if !strings.HasPrefix(reason, tt.reason) { if !strings.HasPrefix(reason, tt.reason) {
t.Errorf("expected reason '%s', found '%s'", tt.reason, reason) t.Errorf("expected reason prefix '%s', found '%s'", tt.reason, reason)
return return
} }
} }

View File

@ -1,17 +1,6 @@
.PHONY: clean test appjs docker push mock .PHONY: clean test appjs docker push mock
BINARY ?= postgres-operator-ui IMAGE ?= registry.opensource.zalan.do/acid/postgres-operator-ui
BUILD_FLAGS ?= -v
CGO_ENABLED ?= 0
ifeq ($(RACE),1)
BUILD_FLAGS += -race -a
CGO_ENABLED=1
endif
LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS)
LDFLAGS ?= -X=main.version=$(VERSION)
IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY)
VERSION ?= $(shell git describe --tags --always --dirty) VERSION ?= $(shell git describe --tags --always --dirty)
TAG ?= $(VERSION) TAG ?= $(VERSION)
GITHEAD = $(shell git rev-parse --short HEAD) GITHEAD = $(shell git rev-parse --short HEAD)
@ -32,8 +21,11 @@ appjs:
docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:10.1.0-alpine npm run build docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:10.1.0-alpine npm run build
docker: appjs docker: appjs
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" . echo `(env)`
@echo 'Docker image $(IMAGE):$(TAG) can now be used.' echo "Tag ${TAG}"
echo "Version ${VERSION}"
echo "git describe $(shell git describe --tags --always --dirty)"
docker build --rm -t "$(IMAGE):$(TAG)" -f Dockerfile .
push: docker push: docker
docker push "$(IMAGE):$(TAG)" docker push "$(IMAGE):$(TAG)"

View File

@ -1,6 +1,6 @@
{ {
"name": "postgres-operator-ui", "name": "postgres-operator-ui",
"version": "1.0.0", "version": "1.3.0",
"description": "PostgreSQL Operator UI", "description": "PostgreSQL Operator UI",
"main": "src/app.js", "main": "src/app.js",
"config": { "config": {

View File

@ -408,7 +408,7 @@ new
ref='cpuLimit' ref='cpuLimit'
type='number' type='number'
placeholder='{ cpu.state.limit.initialValue }' placeholder='{ cpu.state.limit.initialValue }'
min='1' min='250'
required required
value='{ cpu.state.limit.state }' value='{ cpu.state.limit.state }'
onchange='{ cpu.state.limit.edit }' onchange='{ cpu.state.limit.edit }'
@ -434,7 +434,7 @@ new
onkeyup='{ memory.state.request.edit }' onkeyup='{ memory.state.request.edit }'
) )
.input-group-addon .input-group-addon
.input-units Gi .input-units Mi
.input-group .input-group
.input-group-addon.resource-type Limit .input-group-addon.resource-type Limit
@ -442,14 +442,14 @@ new
ref='memoryLimit' ref='memoryLimit'
type='number' type='number'
placeholder='{ memory.state.limit.initialValue }' placeholder='{ memory.state.limit.initialValue }'
min='1' min='250'
required required
value='{ memory.state.limit.state }' value='{ memory.state.limit.state }'
onchange='{ memory.state.limit.edit }' onchange='{ memory.state.limit.edit }'
onkeyup='{ memory.state.limit.edit }' onkeyup='{ memory.state.limit.edit }'
) )
.input-group-addon .input-group-addon
.input-units Gi .input-units Mi
.col-lg-3 .col-lg-3
help-general(config='{ opts.config }') help-general(config='{ opts.config }')
@ -519,10 +519,10 @@ new
resources: resources:
requests: requests:
cpu: {{ cpu.state.request.state }}m cpu: {{ cpu.state.request.state }}m
memory: {{ memory.state.request.state }}Gi memory: {{ memory.state.request.state }}Mi
limits: limits:
cpu: {{ cpu.state.limit.state }}m cpu: {{ cpu.state.limit.state }}m
memory: {{ memory.state.limit.state }}Gi{{#if restoring}} memory: {{ memory.state.limit.state }}Mi{{#if restoring}}
clone: clone:
cluster: "{{ backup.state.name.state }}" cluster: "{{ backup.state.name.state }}"
@ -786,8 +786,8 @@ new
return instance return instance
} }
this.cpu = DynamicResource({ request: 100, limit: 1000 }) this.cpu = DynamicResource({ request: 100, limit: 500 })
this.memory = DynamicResource({ request: 1, limit: 1 }) this.memory = DynamicResource({ request: 100, limit: 500 })
this.backup = DynamicSet({ this.backup = DynamicSet({
type: () => 'empty', type: () => 'empty',

View File

@ -76,6 +76,9 @@ postgresql
.alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed .alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed
.alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() }) .alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() })
.alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending
.alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created
.alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending .alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending
.alert.alert-success(if='{ progress.statefulSet }') StatefulSet created .alert.alert-success(if='{ progress.statefulSet }') StatefulSet created

View File

@ -45,12 +45,14 @@ postgresqls
thead thead
tr tr
th(style='width: 120px') Team th(style='width: 120px') Team
th(style='width: 130px') Namespace
th Name
th(style='width: 50px') Pods th(style='width: 50px') Pods
th(style='width: 140px') CPU th(style='width: 140px') CPU
th(style='width: 130px') Memory th(style='width: 130px') Memory
th(style='width: 100px') Size th(style='width: 100px') Size
th(style='width: 130px') Namespace th(style='width: 120px') Cost/Month
th Name th(stlye='width: 120px')
tbody tbody
tr( tr(
@ -58,19 +60,21 @@ postgresqls
hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }' hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }'
) )
td { team } td { team }
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td(style='white-space: pre') td(style='white-space: pre')
| { namespace } | { namespace }
td td
a( a(
href='/#/status/{ cluster_path(this) }' href='/#/status/{ cluster_path(this) }'
) )
| { name } | { name }
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td { calcCosts(nodes, cpu, memory, volume_size) }$
td
.btn-group.pull-right( .btn-group.pull-right(
aria-label='Cluster { qname } actions' aria-label='Cluster { qname } actions'
@ -124,12 +128,14 @@ postgresqls
thead thead
tr tr
th(style='width: 120px') Team th(style='width: 120px') Team
th(style='width: 130px') Namespace
th Name
th(style='width: 50px') Pods th(style='width: 50px') Pods
th(style='width: 140px') CPU th(style='width: 140px') CPU
th(style='width: 130px') Memory th(style='width: 130px') Memory
th(style='width: 100px') Size th(style='width: 100px') Size
th(style='width: 130px') Namespace th(style='width: 120px') Cost/Month
th Name th(stlye='width: 120px')
tbody tbody
tr( tr(
@ -137,20 +143,20 @@ postgresqls
hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }' hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }'
) )
td { team } td { team }
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td(style='white-space: pre') td(style='white-space: pre')
| { namespace } | { namespace }
td td
a( a(
href='/#/status/{ cluster_path(this) }' href='/#/status/{ cluster_path(this) }'
) )
| { name } | { name }
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td { calcCosts(nodes, cpu, memory, volume_size) }$
td
.btn-group.pull-right( .btn-group.pull-right(
aria-label='Cluster { qname } actions' aria-label='Cluster { qname } actions'
@ -223,6 +229,45 @@ postgresqls
+ '/' + encodeURI(cluster.name) + '/' + encodeURI(cluster.name)
) )
const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => {
costs = nodes * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs)
return costs.toFixed(2)
}
const toDisk = this.toDisk = value => {
if(value.endsWith("Gi")) {
value = value.substring(0, value.length-2)
value = Number(value)
return value
}
return value
}
const toMemory = this.toMemory = value => {
if (value.endsWith("Mi")) {
value = value.substring(0, value.length-2)
value = Number(value) / 1000.
return value
}
else if(value.endsWith("Gi")) {
value = value.substring(0, value.length-2)
value = Number(value)
return value
}
return value
}
const toCores = this.toCores = value => {
if (value.endsWith("m")) {
value = value.substring(0, value.length-1)
value = Number(value) / 1000.
return value
}
return value
}
this.on('mount', () => this.on('mount', () =>
jQuery jQuery
.get('/postgresqls') .get('/postgresqls')

View File

@ -4,23 +4,23 @@ metadata:
name: "postgres-operator-ui" name: "postgres-operator-ui"
namespace: "default" namespace: "default"
labels: labels:
application: "postgres-operator-ui" name: "postgres-operator-ui"
team: "acid" team: "acid"
spec: spec:
replicas: 1 replicas: 1
selector: selector:
matchLabels: matchLabels:
application: "postgres-operator-ui" name: "postgres-operator-ui"
template: template:
metadata: metadata:
labels: labels:
application: "postgres-operator-ui" name: "postgres-operator-ui"
team: "acid" team: "acid"
spec: spec:
serviceAccountName: postgres-operator-ui serviceAccountName: postgres-operator-ui
containers: containers:
- name: "service" - name: "service"
image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0
ports: ports:
- containerPort: 8081 - containerPort: 8081
protocol: "TCP" protocol: "TCP"
@ -32,8 +32,8 @@ spec:
timeoutSeconds: 1 timeoutSeconds: 1
resources: resources:
limits: limits:
cpu: "300m" cpu: "200m"
memory: "3000Mi" memory: "200Mi"
requests: requests:
cpu: "100m" cpu: "100m"
memory: "100Mi" memory: "100Mi"
@ -41,7 +41,9 @@ spec:
- name: "APP_URL" - name: "APP_URL"
value: "http://localhost:8081" value: "http://localhost:8081"
- name: "OPERATOR_API_URL" - name: "OPERATOR_API_URL"
value: "http://localhost:8080" value: "http://postgres-operator:8080"
- name: "OPERATOR_CLUSTER_NAME_LABEL"
value: "cluster-name"
- name: "TARGET_NAMESPACE" - name: "TARGET_NAMESPACE"
value: "default" value: "default"
- name: "TEAMS" - name: "TEAMS"
@ -60,9 +62,14 @@ spec:
"replica_load_balancer_visible": true, "replica_load_balancer_visible": true,
"resources_visible": true, "resources_visible": true,
"users_visible": true, "users_visible": true,
"cost_ebs": 0.119,
"cost_core": 0.0575,
"cost_memory": 0.014375,
"postgresql_versions": [ "postgresql_versions": [
"12",
"11", "11",
"10", "10",
"9.6" "9.6",
"9.5"
] ]
} }

View File

@ -61,7 +61,5 @@ roleRef:
name: postgres-operator-ui name: postgres-operator-ui
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
# note: the cluster role binding needs to be defined
# for every namespace the operator-ui service account lives in.
name: postgres-operator-ui name: postgres-operator-ui
namespace: default namespace: default

View File

@ -76,6 +76,7 @@ ACCESS_TOKEN_URL = getenv('ACCESS_TOKEN_URL')
TOKENINFO_URL = getenv('OAUTH2_TOKEN_INFO_URL') TOKENINFO_URL = getenv('OAUTH2_TOKEN_INFO_URL')
OPERATOR_API_URL = getenv('OPERATOR_API_URL', 'http://postgres-operator') OPERATOR_API_URL = getenv('OPERATOR_API_URL', 'http://postgres-operator')
OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name')
OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}') OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}')
OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}') OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}')
READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true'] READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true']
@ -84,6 +85,13 @@ SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid')
TARGET_NAMESPACE = getenv('TARGET_NAMESPACE') TARGET_NAMESPACE = getenv('TARGET_NAMESPACE')
GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False) GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False)
# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/
COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month
# compute costs, i.e. https://www.ec2instances.info/?region=eu-central-1&selected=m5.2xlarge
COST_CORE = 30.5 * 24 * float(getenv('COST_CORE', 0.0575)) # Core per hour m5.2xlarge / 8.
COST_MEMORY = 30.5 * 24 * float(getenv('COST_MEMORY', 0.014375)) # Memory GB m5.2xlarge / 32.
WALE_S3_ENDPOINT = getenv( WALE_S3_ENDPOINT = getenv(
'WALE_S3_ENDPOINT', 'WALE_S3_ENDPOINT',
'https+path://s3-eu-central-1.amazonaws.com:443', 'https+path://s3-eu-central-1.amazonaws.com:443',
@ -293,6 +301,9 @@ DEFAULT_UI_CONFIG = {
'dns_format_string': '{0}.{1}.{2}', 'dns_format_string': '{0}.{1}.{2}',
'pgui_link': '', 'pgui_link': '',
'static_network_whitelist': {}, 'static_network_whitelist': {},
'cost_ebs': COST_EBS,
'cost_core': COST_CORE,
'cost_memory': COST_MEMORY
} }
@ -1003,6 +1014,7 @@ def main(port, secret_key, debug, clusters: list):
logger.info(f'App URL: {APP_URL}') logger.info(f'App URL: {APP_URL}')
logger.info(f'Authorize URL: {AUTHORIZE_URL}') logger.info(f'Authorize URL: {AUTHORIZE_URL}')
logger.info(f'Operator API URL: {OPERATOR_API_URL}') logger.info(f'Operator API URL: {OPERATOR_API_URL}')
logger.info(f'Operator cluster name label: {OPERATOR_CLUSTER_NAME_LABEL}')
logger.info(f'Readonly mode: {"enabled" if READ_ONLY_MODE else "disabled"}') # noqa logger.info(f'Readonly mode: {"enabled" if READ_ONLY_MODE else "disabled"}') # noqa
logger.info(f'Spilo S3 backup bucket: {SPILO_S3_BACKUP_BUCKET}') logger.info(f'Spilo S3 backup bucket: {SPILO_S3_BACKUP_BUCKET}')
logger.info(f'Spilo S3 backup prefix: {SPILO_S3_BACKUP_PREFIX}') logger.info(f'Spilo S3 backup prefix: {SPILO_S3_BACKUP_PREFIX}')

View File

@ -3,7 +3,7 @@ from datetime import datetime, timezone
from furl import furl from furl import furl
from json import dumps from json import dumps
from logging import getLogger from logging import getLogger
from os import environ from os import environ, getenv
from requests import Session from requests import Session
from urllib.parse import urljoin from urllib.parse import urljoin
from uuid import UUID from uuid import UUID
@ -16,6 +16,8 @@ logger = getLogger(__name__)
session = Session() session = Session()
OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name')
def request(cluster, path, **kwargs): def request(cluster, path, **kwargs):
if 'timeout' not in kwargs: if 'timeout' not in kwargs:
@ -137,7 +139,7 @@ def read_pods(cluster, namespace, spilo_cluster):
cluster=cluster, cluster=cluster,
resource_type='pods', resource_type='pods',
namespace=namespace, namespace=namespace,
label_selector={'version': spilo_cluster}, label_selector={OPERATOR_CLUSTER_NAME_LABEL: spilo_cluster},
) )

View File

@ -1,14 +1,15 @@
Flask-OAuthlib==0.9.5 Flask-OAuthlib==0.9.5
Flask==1.0.2 Flask==1.1.1
backoff==1.5.0 backoff==1.8.1
boto3==1.5.14 boto3==1.10.4
boto==2.48.0 boto==2.49.0
click==6.7 click==6.7
furl==1.0.1 furl==1.0.2
gevent==1.2.2 gevent==1.2.2
jq==0.1.6 jq==0.1.6
json_delta>=2.0 json_delta>=2.0
kubernetes==3.0.0 kubernetes==3.0.0
requests==2.20.1 requests==2.22.0
stups-tokens>=1.1.19 stups-tokens>=1.1.19
wal_e==1.1.0 wal_e==1.1.0
werkzeug==0.16.1

View File

@ -19,10 +19,15 @@ default_operator_ui_config='{
"nat_gateways_visible": false, "nat_gateways_visible": false,
"resources_visible": true, "resources_visible": true,
"users_visible": true, "users_visible": true,
"cost_ebs": 0.119,
"cost_core": 0.0575,
"cost_memory": 0.014375,
"postgresql_versions": [ "postgresql_versions": [
"12",
"11", "11",
"10", "10",
"9.6" "9.6",
"9.5"
], ],
"static_network_whitelist": { "static_network_whitelist": {
"localhost": ["172.0.0.1/32"] "localhost": ["172.0.0.1/32"]