merge with master and resolve conflicts
This commit is contained in:
commit
b38d72d9a2
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator-ui
|
||||
version: 0.1.0
|
||||
appVersion: 1.2.0
|
||||
version: 1.4.0
|
||||
appVersion: 1.4.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
@ -12,6 +12,8 @@ keywords:
|
|||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- name: Zalando
|
||||
email: opensource@zalando.de
|
||||
- name: siku4
|
||||
email: sk@sik-net.de
|
||||
sources:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,29 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.0
|
||||
created: "2020-02-24T15:32:47.610967635+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 00e0eff7056d56467cd5c975657fbb76c8d01accd25a4b7aca81bc42aeac961d
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
- email: sk@sik-net.de
|
||||
name: siku4
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.4.0.tgz
|
||||
version: 1.4.0
|
||||
generated: "2020-02-24T15:32:47.610348278+01:00"
|
||||
Binary file not shown.
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator-ui
|
||||
tag: v1.2.0
|
||||
tag: v1.4.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
rbac:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator
|
||||
version: 1.3.0
|
||||
appVersion: 1.3.0
|
||||
version: 1.4.0
|
||||
appVersion: 1.4.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ spec:
|
|||
s3_secret_access_key:
|
||||
type: string
|
||||
s3_force_path_style:
|
||||
type: string
|
||||
type: boolean
|
||||
s3_wal_path:
|
||||
type: string
|
||||
timestamp:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,31 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.0
|
||||
created: "2020-02-20T17:39:25.443276193+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: b93ccde5581deb8ed0857136b8ce74ca3f1b7240438fa4415f705764a1300bed
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.4.0.tgz
|
||||
version: 1.4.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.3.0
|
||||
created: "2019-12-17T12:58:49.477140129+01:00"
|
||||
created: "2020-02-20T17:39:25.441532163+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 7e788fd37daec76a01f6d6f9fe5be5b54f5035e4eba0041e80a760d656537325
|
||||
|
|
@ -25,7 +47,7 @@ entries:
|
|||
version: 1.3.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.2.0
|
||||
created: "2019-12-17T12:58:49.475844233+01:00"
|
||||
created: "2020-02-20T17:39:25.440278302+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: d10710c7cf19f4e266e7704f5d1e98dcfc61bee3919522326c35c22ca7d2f2bf
|
||||
|
|
@ -47,4 +69,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.2.0.tgz
|
||||
version: 1.2.0
|
||||
generated: "2019-12-17T12:58:49.474719294+01:00"
|
||||
generated: "2020-02-20T17:39:25.439168098+01:00"
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -63,9 +63,9 @@ rules:
|
|||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- update
|
||||
# to check nodes for node readiness label
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ metadata:
|
|||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ template "postgres-operator.fullname" . }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
|
|
@ -15,7 +16,3 @@ spec:
|
|||
selector:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.3.1
|
||||
tag: v1.4.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -24,7 +24,7 @@ configGeneral:
|
|||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.3.1
|
||||
tag: v1.4.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -24,7 +24,7 @@ configGeneral:
|
|||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: "-1"
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -55,7 +55,7 @@ configKubernetes:
|
|||
# additional labels assigned to the cluster objects
|
||||
cluster_labels: application:spilo
|
||||
# label assigned to Kubernetes objects created by the operator
|
||||
cluster_name_label: version
|
||||
cluster_name_label: cluster-name
|
||||
# annotations attached to each database pod
|
||||
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
||||
|
||||
|
|
|
|||
|
|
@ -66,20 +66,13 @@ pipeline:
|
|||
- desc: 'Build and push Docker image'
|
||||
cmd: |
|
||||
cd ui
|
||||
image_base='registry-write.opensource.zalan.do/acid/postgres-operator-ui'
|
||||
if [[ "${CDP_TARGET_BRANCH}" == 'master' && -z "${CDP_PULL_REQUEST_NUMBER}" ]]
|
||||
IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"}
|
||||
if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]]
|
||||
then
|
||||
image="${image_base}"
|
||||
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui
|
||||
else
|
||||
image="${image_base}-test"
|
||||
IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui-test
|
||||
fi
|
||||
image_with_tag="${image}:c${CDP_BUILD_VERSION}"
|
||||
|
||||
if docker pull "${image}"
|
||||
then
|
||||
docker build --cache-from="${image}" -t "${image_with_tag}" .
|
||||
else
|
||||
docker build -t "${image_with_tag}" .
|
||||
fi
|
||||
|
||||
docker push "${image_with_tag}"
|
||||
export IMAGE
|
||||
make docker
|
||||
make push
|
||||
|
|
|
|||
|
|
@ -11,11 +11,11 @@ switchover (planned failover) of the master to the Pod with new minor version.
|
|||
The switch should usually take less than 5 seconds, still clients have to
|
||||
reconnect.
|
||||
|
||||
Major version upgrades are supported via [cloning](user.md#clone-directly). The
|
||||
new cluster manifest must have a higher `version` string than the source cluster
|
||||
and will be created from a basebackup. Depending of the cluster size, downtime
|
||||
in this case can be significant as writes to the database should be stopped and
|
||||
all WAL files should be archived first before cloning is started.
|
||||
Major version upgrades are supported via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster).
|
||||
The new cluster manifest must have a higher `version` string than the source
|
||||
cluster and will be created from a basebackup. Depending of the cluster size,
|
||||
downtime in this case can be significant as writes to the database should be
|
||||
stopped and all WAL files should be archived first before cloning is started.
|
||||
|
||||
Note, that simply changing the version string in the `postgresql` manifest does
|
||||
not work at present and leads to errors. Neither Patroni nor Postgres Operator
|
||||
|
|
@ -481,37 +481,71 @@ A secret can be pre-provisioned in different ways:
|
|||
|
||||
## Setting up the Postgres Operator UI
|
||||
|
||||
With the v1.2 release the Postgres Operator is shipped with a browser-based
|
||||
Since the v1.2 release the Postgres Operator is shipped with a browser-based
|
||||
configuration user interface (UI) that simplifies managing Postgres clusters
|
||||
with the operator. The UI runs with Node.js and comes with it's own Docker
|
||||
image.
|
||||
with the operator.
|
||||
|
||||
Run NPM to continuously compile `tags/js` code. Basically, it creates an
|
||||
`app.js` file in: `static/build/app.js`
|
||||
### Building the UI image
|
||||
|
||||
```
|
||||
(cd ui/app && npm start)
|
||||
```
|
||||
|
||||
To build the Docker image open a shell and change to the `ui` folder. Then run:
|
||||
The UI runs with Node.js and comes with it's own Docker
|
||||
image. However, installing Node.js to build the operator UI is not required. It
|
||||
is handled via Docker containers when running:
|
||||
|
||||
```bash
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0 .
|
||||
make docker
|
||||
```
|
||||
|
||||
Apply all manifests for the `ui/manifests` folder to deploy the Postgres
|
||||
Operator UI on K8s. For local tests you don't need the Ingress resource.
|
||||
### Configure endpoints and options
|
||||
|
||||
The UI talks to the K8s API server as well as the Postgres Operator [REST API](developer.md#debugging-the-operator).
|
||||
K8s API server URLs are loaded from the machine's kubeconfig environment by
|
||||
default. Alternatively, a list can also be passed when starting the Python
|
||||
application with the `--cluster` option.
|
||||
|
||||
The Operator API endpoint can be configured via the `OPERATOR_API_URL`
|
||||
environment variables in the [deployment manifest](../ui/manifests/deployment.yaml#L40).
|
||||
You can also expose the operator API through a [service](../manifests/api-service.yaml).
|
||||
Some displayed options can be disabled from UI using simple flags under the
|
||||
`OPERATOR_UI_CONFIG` field in the deployment.
|
||||
|
||||
### Deploy the UI on K8s
|
||||
|
||||
Now, apply all manifests from the `ui/manifests` folder to deploy the Postgres
|
||||
Operator UI on K8s. Replace the image tag in the deployment manifest if you
|
||||
want to test the image you've built with `make docker`. Make sure the pods for
|
||||
the operator and the UI are both running.
|
||||
|
||||
```bash
|
||||
kubectl apply -f ui/manifests
|
||||
sed -e "s/\(image\:.*\:\).*$/\1$TAG/" manifests/deployment.yaml | kubectl apply -f manifests/
|
||||
kubectl get all -l application=postgres-operator-ui
|
||||
```
|
||||
|
||||
Make sure the pods for the operator and the UI are both running. For local
|
||||
testing you need to apply proxying and port forwarding so that the UI can talk
|
||||
to the K8s and Postgres Operator REST API. You can use the provided
|
||||
`run_local.sh` script for this. Make sure it uses the correct URL to your K8s
|
||||
API server, e.g. for minikube it would be `https://192.168.99.100:8443`.
|
||||
### Local testing
|
||||
|
||||
For local testing you need to apply K8s proxying and operator pod port
|
||||
forwarding so that the UI can talk to the K8s and Postgres Operator REST API.
|
||||
The Ingress resource is not needed. You can use the provided `run_local.sh`
|
||||
script for this. Make sure that:
|
||||
|
||||
* Python dependencies are installed on your machine
|
||||
* the K8s API server URL is set for kubectl commands, e.g. for minikube it would usually be `https://192.168.99.100:8443`.
|
||||
* the pod label selectors for port forwarding are correct
|
||||
|
||||
When testing with minikube you have to build the image in its docker environment
|
||||
(running `make docker` doesn't do it for you). From the `ui` directory execute:
|
||||
|
||||
```bash
|
||||
# compile and build operator UI
|
||||
make docker
|
||||
|
||||
# build in image in minikube docker env
|
||||
eval $(minikube docker-env)
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0 .
|
||||
|
||||
# apply UI manifests next to a running Postgres Operator
|
||||
kubectl apply -f manifests/
|
||||
|
||||
# install python dependencies to run UI locally
|
||||
pip3 install -r requirements
|
||||
./run_local.sh
|
||||
```
|
||||
|
|
|
|||
|
|
@ -31,9 +31,13 @@ status page.
|
|||

|
||||
|
||||
Usually, the startup should only take up to 1 minute. If you feel the process
|
||||
got stuck click on the "Logs" button to inspect the operator logs. From the
|
||||
"Status" field in the top menu you can also retrieve the logs and queue of each
|
||||
worker the operator is using. The number of concurrent workers can be
|
||||
got stuck click on the "Logs" button to inspect the operator logs. If the logs
|
||||
look fine, but the UI seems to got stuck, check if you are have configured the
|
||||
same [cluster name label](../ui/manifests/deployment.yaml#L45) like for the
|
||||
[operator](../manifests/configmap.yaml#L13).
|
||||
|
||||
From the "Status" field in the top menu you can also retrieve the logs and queue
|
||||
of each worker the operator is using. The number of concurrent workers can be
|
||||
[configured](reference/operator_parameters.md#general).
|
||||
|
||||

|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ cd postgres-operator
|
|||
kubectl create -f manifests/configmap.yaml # configuration
|
||||
kubectl create -f manifests/operator-service-account-rbac.yaml # identity and permissions
|
||||
kubectl create -f manifests/postgres-operator.yaml # deployment
|
||||
kubectl create -f manifests/api-service.yaml # operator API to be used by UI
|
||||
```
|
||||
|
||||
There is a [Kustomization](https://github.com/kubernetes-sigs/kustomize)
|
||||
|
|
@ -104,7 +105,7 @@ kubectl create -f https://operatorhub.io/install/postgres-operator.yaml
|
|||
This installs the operator in the `operators` namespace. More information can be
|
||||
found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator).
|
||||
|
||||
## Create a Postgres cluster
|
||||
## Check if Postgres Operator is running
|
||||
|
||||
Starting the operator may take a few seconds. Check if the operator pod is
|
||||
running before applying a Postgres cluster manifest.
|
||||
|
|
@ -115,7 +116,61 @@ kubectl get pod -l name=postgres-operator
|
|||
|
||||
# if you've created the operator using helm chart
|
||||
kubectl get pod -l app.kubernetes.io/name=postgres-operator
|
||||
```
|
||||
|
||||
If the operator doesn't get into `Running` state, either check the latest K8s
|
||||
events of the deployment or pod with `kubectl describe` or inspect the operator
|
||||
logs:
|
||||
|
||||
```bash
|
||||
kubectl logs "$(kubectl get pod -l name=postgres-operator --output='name')"
|
||||
```
|
||||
|
||||
## Deploy the operator UI
|
||||
|
||||
In the following paragraphs we describe how to access and manage PostgreSQL
|
||||
clusters from the command line with kubectl. But it can also be done from the
|
||||
browser-based [Postgres Operator UI](operator-ui.md). Before deploying the UI
|
||||
make sure the operator is running and its REST API is reachable through a
|
||||
[K8s service](../manifests/api-service.yaml). The URL to this API must be
|
||||
configured in the [deployment manifest](../ui/manifests/deployment.yaml#L43)
|
||||
of the UI.
|
||||
|
||||
To deploy the UI simply apply all its manifests files or use the UI helm chart:
|
||||
|
||||
```bash
|
||||
# manual deployment
|
||||
kubectl apply -f ui/manifests/
|
||||
|
||||
# or helm chart
|
||||
helm install postgres-operator-ui ./charts/postgres-operator-ui
|
||||
```
|
||||
|
||||
Like with the operator, check if the UI pod gets into `Running` state:
|
||||
|
||||
```bash
|
||||
# if you've created the operator using yaml manifests
|
||||
kubectl get pod -l name=postgres-operator-ui
|
||||
|
||||
# if you've created the operator using helm chart
|
||||
kubectl get pod -l app.kubernetes.io/name=postgres-operator-ui
|
||||
```
|
||||
|
||||
You can now access the web interface by port forwarding the UI pod (mind the
|
||||
label selector) and enter `localhost:8081` in your browser:
|
||||
|
||||
```bash
|
||||
kubectl port-forward "$(kubectl get pod -l name=postgres-operator-ui --output='name')" 8081
|
||||
```
|
||||
|
||||
Available option are explained in detail in the [UI docs](operator-ui.md).
|
||||
|
||||
## Create a Postgres cluster
|
||||
|
||||
If the operator pod is running it listens to new events regarding `postgresql`
|
||||
resources. Now, it's time to submit your first Postgres cluster manifest.
|
||||
|
||||
```bash
|
||||
# create a Postgres cluster
|
||||
kubectl create -f manifests/minimal-postgres-manifest.yaml
|
||||
```
|
||||
|
|
|
|||
|
|
@ -110,8 +110,10 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
|
||||
* **min_instances**
|
||||
operator will run at least the number of instances for any given Postgres
|
||||
cluster equal to the value of this parameter. When `-1` is specified, no
|
||||
limits are applied. The default is `-1`.
|
||||
cluster equal to the value of this parameter. Standby clusters can still run
|
||||
with `numberOfInstances: 1` as this is the [recommended setup](../user.md#setting-up-a-standby-cluster).
|
||||
When `-1` is specified for `min_instances`, no limits are applied. The default
|
||||
is `-1`.
|
||||
|
||||
* **resync_period**
|
||||
period between consecutive sync requests. The default is `30m`.
|
||||
|
|
|
|||
139
docs/user.md
139
docs/user.md
|
|
@ -65,7 +65,7 @@ our test cluster.
|
|||
|
||||
```bash
|
||||
# get name of master pod of acid-minimal-cluster
|
||||
export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,version=acid-minimal-cluster,spilo-role=master)
|
||||
export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master)
|
||||
|
||||
# set up port forward
|
||||
kubectl port-forward $PGMASTER 6432:5432
|
||||
|
|
@ -254,29 +254,22 @@ spec:
|
|||
|
||||
## How to clone an existing PostgreSQL cluster
|
||||
|
||||
You can spin up a new cluster as a clone of the existing one, using a clone
|
||||
You can spin up a new cluster as a clone of the existing one, using a `clone`
|
||||
section in the spec. There are two options here:
|
||||
|
||||
* Clone directly from a source cluster using `pg_basebackup`
|
||||
* Clone from an S3 bucket
|
||||
* Clone from an S3 bucket (recommended)
|
||||
* Clone directly from a source cluster
|
||||
|
||||
### Clone directly
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
clone:
|
||||
cluster: "acid-batman"
|
||||
```
|
||||
|
||||
Here `cluster` is a name of a source cluster that is going to be cloned. The
|
||||
cluster to clone is assumed to be running and the clone procedure invokes
|
||||
`pg_basebackup` from it. The operator will setup the cluster to be cloned to
|
||||
connect to the service of the source cluster by name (if the cluster is called
|
||||
test, then the connection string will look like host=test port=5432), which
|
||||
means that you can clone only from clusters within the same namespace.
|
||||
Note, that cloning can also be used for [major version upgrades](administrator.md#minor-and-major-version-upgrade)
|
||||
of PostgreSQL.
|
||||
|
||||
### Clone from S3
|
||||
|
||||
Cloning from S3 has the advantage that there is no impact on your production
|
||||
database. A new Postgres cluster is created by restoring the data of another
|
||||
source cluster. If you create it in the same Kubernetes environment, use a
|
||||
different name.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
clone:
|
||||
|
|
@ -287,7 +280,8 @@ spec:
|
|||
|
||||
Here `cluster` is a name of a source cluster that is going to be cloned. A new
|
||||
cluster will be cloned from S3, using the latest backup before the `timestamp`.
|
||||
In this case, `uid` field is also mandatory - operator will use it to find a
|
||||
Note, that a time zone is required for `timestamp` in the format of +00:00 which
|
||||
is UTC. The `uid` field is also mandatory. The operator will use it to find a
|
||||
correct key inside an S3 bucket. You can find this field in the metadata of the
|
||||
source cluster:
|
||||
|
||||
|
|
@ -299,9 +293,6 @@ metadata:
|
|||
uid: efd12e58-5786-11e8-b5a7-06148230260c
|
||||
```
|
||||
|
||||
Note that timezone is required for `timestamp`. Otherwise, offset is relative
|
||||
to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
|
||||
For non AWS S3 following settings can be set to support cloning from other S3
|
||||
implementations:
|
||||
|
||||
|
|
@ -317,14 +308,35 @@ spec:
|
|||
s3_force_path_style: true
|
||||
```
|
||||
|
||||
### Clone directly
|
||||
|
||||
Another way to get a fresh copy of your source DB cluster is via basebackup. To
|
||||
use this feature simply leave out the timestamp field from the clone section.
|
||||
The operator will connect to the service of the source cluster by name. If the
|
||||
cluster is called test, then the connection string will look like host=test
|
||||
port=5432), which means that you can clone only from clusters within the same
|
||||
namespace.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
clone:
|
||||
cluster: "acid-batman"
|
||||
```
|
||||
|
||||
Be aware that on a busy source database this can result in an elevated load!
|
||||
|
||||
## Setting up a standby cluster
|
||||
|
||||
Standby clusters are like normal cluster but they are streaming from a remote
|
||||
cluster. As the first version of this feature, the only scenario covered by
|
||||
operator is to stream from a WAL archive of the master. Following the more
|
||||
popular infrastructure of using Amazon's S3 buckets, it is mentioned as
|
||||
`s3_wal_path` here. To start a cluster as standby add the following `standby`
|
||||
section in the YAML file:
|
||||
Standby cluster is a [Patroni feature](https://github.com/zalando/patroni/blob/master/docs/replica_bootstrap.rst#standby-cluster)
|
||||
that first clones a database, and keeps replicating changes afterwards. As the
|
||||
replication is happening by the means of archived WAL files (stored on S3 or
|
||||
the equivalent of other cloud providers), the standby cluster can exist in a
|
||||
different location than its source database. Unlike cloning, the PostgreSQL
|
||||
version between source and target cluster has to be the same.
|
||||
|
||||
To start a cluster as standby, add the following `standby` section in the YAML
|
||||
file and specify the S3 bucket path. An empty path will result in an error and
|
||||
no statefulset will be created.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
|
|
@ -332,20 +344,65 @@ spec:
|
|||
s3_wal_path: "s3 bucket path to the master"
|
||||
```
|
||||
|
||||
Things to note:
|
||||
At the moment, the operator only allows to stream from the WAL archive of the
|
||||
master. Thus, it is recommended to deploy standby clusters with only [one pod](../manifests/standby-manifest.yaml#L10).
|
||||
You can raise the instance count when detaching. Note, that the same pod role
|
||||
labels like for normal clusters are used: The standby leader is labeled as
|
||||
`master`.
|
||||
|
||||
- An empty string in the `s3_wal_path` field of the standby cluster will result
|
||||
in an error and no statefulset will be created.
|
||||
- Only one pod can be deployed for stand-by cluster.
|
||||
- To manually promote the standby_cluster, use `patronictl` and remove config
|
||||
entry.
|
||||
- There is no way to transform a non-standby cluster to a standby cluster
|
||||
through the operator. Adding the standby section to the manifest of a running
|
||||
Postgres cluster will have no effect. However, it can be done through Patroni
|
||||
by adding the [standby_cluster](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster)
|
||||
section using `patronictl edit-config`. Note that the transformed standby
|
||||
cluster will not be doing any streaming. It will be in standby mode and allow
|
||||
read-only transactions only.
|
||||
### Providing credentials of source cluster
|
||||
|
||||
A standby cluster is replicating the data (including users and passwords) from
|
||||
the source database and is read-only. The system and application users (like
|
||||
standby, postgres etc.) all have a password that does not match the credentials
|
||||
stored in secrets which are created by the operator. One solution is to create
|
||||
secrets beforehand and paste in the credentials of the source cluster.
|
||||
Otherwise, you will see errors in the Postgres logs saying users cannot log in
|
||||
and the operator logs will complain about not being able to sync resources.
|
||||
|
||||
When you only run a standby leader, you can safely ignore this, as it will be
|
||||
sorted out once the cluster is detached from the source. It is also harmless if
|
||||
you don’t plan it. But, when you created a standby replica, too, fix the
|
||||
credentials right away. WAL files will pile up on the standby leader if no
|
||||
connection can be established between standby replica(s). You can also edit the
|
||||
secrets after their creation. Find them by:
|
||||
|
||||
```bash
|
||||
kubectl get secrets --all-namespaces | grep <standby-cluster-name>
|
||||
```
|
||||
|
||||
### Promote the standby
|
||||
|
||||
One big advantage of standby clusters is that they can be promoted to a proper
|
||||
database cluster. This means it will stop replicating changes from the source,
|
||||
and start accept writes itself. This mechanism makes it possible to move
|
||||
databases from one place to another with minimal downtime. Currently, the
|
||||
operator does not support promoting a standby cluster. It has to be done
|
||||
manually using `patronictl edit-config` inside the postgres container of the
|
||||
standby leader pod. Remove the following lines from the YAML structure and the
|
||||
leader promotion happens immediately. Before doing so, make sure that the
|
||||
standby is not behind the source database.
|
||||
|
||||
```yaml
|
||||
standby_cluster:
|
||||
create_replica_methods:
|
||||
- bootstrap_standby_with_wale
|
||||
- basebackup_fast_xlog
|
||||
restore_command: envdir "/home/postgres/etc/wal-e.d/env-standby" /scripts/restore_command.sh
|
||||
"%f" "%p"
|
||||
```
|
||||
|
||||
Finally, remove the `standby` section from the postgres cluster manifest.
|
||||
|
||||
### Turn a normal cluster into a standby
|
||||
|
||||
There is no way to transform a non-standby cluster to a standby cluster through
|
||||
the operator. Adding the `standby` section to the manifest of a running
|
||||
Postgres cluster will have no effect. But, as explained in the previous
|
||||
paragraph it can be done manually through `patronictl edit-config`. This time,
|
||||
by adding the `standby_cluster` section to the Patroni configuration. However,
|
||||
the transformed standby cluster will not be doing any streaming. It will be in
|
||||
standby mode and allow read-only transactions only.
|
||||
|
||||
## Sidecar Support
|
||||
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'''
|
||||
|
||||
k8s = self.k8s
|
||||
cluster_label = 'version=acid-minimal-cluster'
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
|
||||
# enable load balancer services
|
||||
pg_patch_enable_lbs = {
|
||||
|
|
@ -113,7 +113,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
Lower resource limits below configured minimum and let operator fix it
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'version=acid-minimal-cluster'
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
_, failover_targets = k8s.get_pg_nodes(cluster_label)
|
||||
|
||||
# configure minimum boundaries for CPU and memory limits
|
||||
|
|
@ -172,7 +172,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
||||
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
|
||||
self.assert_master_is_unique(self.namespace, version="acid-test-cluster")
|
||||
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_scaling(self):
|
||||
|
|
@ -180,7 +180,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
labels = "version=acid-minimal-cluster"
|
||||
labels = "cluster-name=acid-minimal-cluster"
|
||||
|
||||
k8s.wait_for_pg_to_scale(3)
|
||||
self.assertEqual(3, k8s.count_pods_with_label(labels))
|
||||
|
|
@ -196,7 +196,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'version=acid-minimal-cluster'
|
||||
cluster_label = 'cluster-name=acid-minimal-cluster'
|
||||
|
||||
# get nodes of master and replica(s) (expected target of new master)
|
||||
current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label)
|
||||
|
|
@ -334,9 +334,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"foo": "bar",
|
||||
}
|
||||
self.assertTrue(k8s.check_service_annotations(
|
||||
"version=acid-service-annotations,spilo-role=master", annotations))
|
||||
"cluster-name=acid-service-annotations,spilo-role=master", annotations))
|
||||
self.assertTrue(k8s.check_service_annotations(
|
||||
"version=acid-service-annotations,spilo-role=replica", annotations))
|
||||
"cluster-name=acid-service-annotations,spilo-role=replica", annotations))
|
||||
|
||||
# clean up
|
||||
unpatch_custom_service_annotations = {
|
||||
|
|
@ -346,14 +346,14 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.update_config(unpatch_custom_service_annotations)
|
||||
|
||||
def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"):
|
||||
def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
|
||||
'''
|
||||
Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
|
||||
To be called manually after operations that affect pods
|
||||
'''
|
||||
|
||||
k8s = self.k8s
|
||||
labels = 'spilo-role=master,version=' + version
|
||||
labels = 'spilo-role=master,cluster-name=' + clusterName
|
||||
|
||||
num_of_master_pods = k8s.count_pods_with_label(labels, namespace)
|
||||
self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods))
|
||||
|
|
@ -438,7 +438,7 @@ class K8s:
|
|||
_ = self.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body)
|
||||
|
||||
labels = 'version=acid-minimal-cluster'
|
||||
labels = 'cluster-name=acid-minimal-cluster'
|
||||
while self.count_pods_with_label(labels) != number_of_instances:
|
||||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
|
|
@ -448,7 +448,7 @@ class K8s:
|
|||
def wait_for_master_failover(self, expected_master_nodes, namespace='default'):
|
||||
pod_phase = 'Failing over'
|
||||
new_master_node = ''
|
||||
labels = 'spilo-role=master,version=acid-minimal-cluster'
|
||||
labels = 'spilo-role=master,cluster-name=acid-minimal-cluster'
|
||||
|
||||
while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: postgres-operator
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
name: postgres-operator
|
||||
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
# labels:
|
||||
# environment: demo
|
||||
spec:
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
teamId: "acid"
|
||||
volume:
|
||||
size: 1Gi
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ data:
|
|||
cluster_domain: cluster.local
|
||||
cluster_history_entries: "1000"
|
||||
cluster_labels: application:spilo
|
||||
cluster_name_label: version
|
||||
cluster_name_label: cluster-name
|
||||
# custom_service_annotations: "keyx:valuez,keya:valuea"
|
||||
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
||||
db_hosted_zone: db.example.com
|
||||
|
|
@ -19,7 +19,7 @@ data:
|
|||
# default_cpu_request: 100m
|
||||
# default_memory_limit: 500Mi
|
||||
# default_memory_request: 100Mi
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# enable_admin_role_for_users: "true"
|
||||
# enable_crd_validation: "true"
|
||||
# enable_database_access: "true"
|
||||
|
|
|
|||
|
|
@ -4,3 +4,4 @@ resources:
|
|||
- configmap.yaml
|
||||
- operator-service-account-rbac.yaml
|
||||
- postgres-operator.yaml
|
||||
- api-service.yaml
|
||||
|
|
|
|||
|
|
@ -64,9 +64,9 @@ rules:
|
|||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- get
|
||||
- update
|
||||
# to check nodes for node readiness label
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.3.1
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.4.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
configuration:
|
||||
# enable_crd_validation: true
|
||||
etcd_host: ""
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# enable_shm_volume: true
|
||||
max_instances: -1
|
||||
min_instances: -1
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ spec:
|
|||
s3_secret_access_key:
|
||||
type: string
|
||||
s3_force_path_style:
|
||||
type: string
|
||||
type: boolean
|
||||
s3_wal_path:
|
||||
type: string
|
||||
timestamp:
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
Type: "string",
|
||||
},
|
||||
"s3_force_path_style": {
|
||||
Type: "string",
|
||||
Type: "boolean",
|
||||
},
|
||||
"s3_wal_path": {
|
||||
Type: "string",
|
||||
|
|
|
|||
|
|
@ -1048,11 +1048,13 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
|||
cur := spec.NumberOfInstances
|
||||
newcur := cur
|
||||
|
||||
/* Limit the max number of pods to one, if this is standby-cluster */
|
||||
if spec.StandbyCluster != nil {
|
||||
c.logger.Info("Standby cluster can have maximum of 1 pod")
|
||||
min = 1
|
||||
max = 1
|
||||
if newcur == 1 {
|
||||
min = newcur
|
||||
max = newcur
|
||||
} else {
|
||||
c.logger.Warningf("operator only supports standby clusters with 1 pod")
|
||||
}
|
||||
}
|
||||
if max >= 0 && newcur > max {
|
||||
newcur = max
|
||||
|
|
@ -1498,8 +1500,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
)
|
||||
|
||||
labels := map[string]string{
|
||||
"version": c.Name,
|
||||
"application": "spilo-logical-backup",
|
||||
c.OpConfig.ClusterNameLabel: c.Name,
|
||||
"application": "spilo-logical-backup",
|
||||
}
|
||||
podAffinityTerm := v1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
|
|
|
|||
|
|
@ -508,14 +508,6 @@ func (c *Controller) submitRBACCredentials(event ClusterEvent) error {
|
|||
return fmt.Errorf("could not create pod service account %q : %v", c.opConfig.PodServiceAccountName, err)
|
||||
}
|
||||
|
||||
// create role only if binding references a role
|
||||
// if not role is empty and we rely on an existing cluster role
|
||||
if c.PodServiceAccountRole != nil {
|
||||
if err := c.createRole(namespace); err != nil {
|
||||
return fmt.Errorf("could not create role %q : %v", c.PodServiceAccountRole.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.createRoleBindings(namespace); err != nil {
|
||||
return fmt.Errorf("could not create role binding %q : %v", c.PodServiceAccountRoleBinding.Name, err)
|
||||
}
|
||||
|
|
@ -525,13 +517,14 @@ func (c *Controller) submitRBACCredentials(event ClusterEvent) error {
|
|||
func (c *Controller) createPodServiceAccount(namespace string) error {
|
||||
|
||||
podServiceAccountName := c.opConfig.PodServiceAccountName
|
||||
// get a separate copy of service account
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
sa := *c.PodServiceAccount
|
||||
|
||||
_, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace))
|
||||
|
||||
// get a separate copy of service account
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
sa := *c.PodServiceAccount
|
||||
if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil {
|
||||
return fmt.Errorf("cannot deploy the pod service account %q defined in the configuration to the %q namespace: %v", podServiceAccountName, namespace, err)
|
||||
}
|
||||
|
|
@ -546,13 +539,14 @@ func (c *Controller) createPodServiceAccount(namespace string) error {
|
|||
func (c *Controller) createRole(namespace string) error {
|
||||
|
||||
podServiceAccountRoleName := c.PodServiceAccountRole.Name
|
||||
// get a separate copy of the role
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
role := *c.PodServiceAccountRole
|
||||
|
||||
_, err := c.KubeClient.Roles(namespace).Get(podServiceAccountRoleName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Infof("creating role %q in the %q namespace", podServiceAccountRoleName, namespace)
|
||||
|
||||
// get a separate copy of the role
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
role := *c.PodServiceAccountRole
|
||||
_, err = c.KubeClient.Roles(namespace).Create(&role)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create role %q in the %q namespace: %v", podServiceAccountRoleName, namespace, err)
|
||||
|
|
@ -569,13 +563,14 @@ func (c *Controller) createRoleBindings(namespace string) error {
|
|||
|
||||
podServiceAccountName := c.opConfig.PodServiceAccountName
|
||||
podServiceAccountRoleBindingName := c.PodServiceAccountRoleBinding.Name
|
||||
// get a separate copy of role binding
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
rb := *c.PodServiceAccountRoleBinding
|
||||
|
||||
_, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Infof("creating the role binding %q in the %q namespace", podServiceAccountRoleBindingName, namespace)
|
||||
c.logger.Infof("Creating the role binding %q in the %q namespace", podServiceAccountRoleBindingName, namespace)
|
||||
|
||||
// get a separate copy of role binding
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
rb := *c.PodServiceAccountRoleBinding
|
||||
_, err = c.KubeClient.RoleBindings(namespace).Create(&rb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot bind the pod service account %q defined in the configuration to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err)
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ type Config struct {
|
|||
|
||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p16"`
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"`
|
||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||
|
|
|
|||
31
ui/Makefile
31
ui/Makefile
|
|
@ -1,24 +1,17 @@
|
|||
.PHONY: clean test appjs docker push mock
|
||||
|
||||
BINARY ?= postgres-operator-ui
|
||||
BUILD_FLAGS ?= -v
|
||||
CGO_ENABLED ?= 0
|
||||
ifeq ($(RACE),1)
|
||||
BUILD_FLAGS += -race -a
|
||||
CGO_ENABLED=1
|
||||
endif
|
||||
|
||||
LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS)
|
||||
LDFLAGS ?= -X=main.version=$(VERSION)
|
||||
|
||||
IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY)
|
||||
IMAGE ?= registry.opensource.zalan.do/acid/postgres-operator-ui
|
||||
VERSION ?= $(shell git describe --tags --always --dirty)
|
||||
TAG ?= $(VERSION)
|
||||
GITHEAD = $(shell git rev-parse --short HEAD)
|
||||
GITURL = $(shell git config --get remote.origin.url)
|
||||
GITSTATU = $(shell git status --porcelain || echo 'no changes')
|
||||
GITSTATUS = $(shell git status --porcelain || echo 'no changes')
|
||||
TTYFLAGS = $(shell test -t 0 && echo '-it')
|
||||
|
||||
ifdef CDP_PULL_REQUEST_NUMBER
|
||||
CDP_TAG := -${CDP_BUILD_VERSION}
|
||||
endif
|
||||
|
||||
default: docker
|
||||
|
||||
clean:
|
||||
|
|
@ -32,11 +25,15 @@ appjs:
|
|||
docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:10.1.0-alpine npm run build
|
||||
|
||||
docker: appjs
|
||||
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" .
|
||||
@echo 'Docker image $(IMAGE):$(TAG) can now be used.'
|
||||
echo `(env)`
|
||||
echo "Tag ${TAG}"
|
||||
echo "Version ${VERSION}"
|
||||
echo "CDP tag ${CDP_TAG}"
|
||||
echo "git describe $(shell git describe --tags --always --dirty)"
|
||||
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)" -f Dockerfile .
|
||||
|
||||
push: docker
|
||||
docker push "$(IMAGE):$(TAG)"
|
||||
push:
|
||||
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
||||
|
||||
mock:
|
||||
docker run -it -p 8080:8080 "$(IMAGE):$(TAG)" --mock
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "postgres-operator-ui",
|
||||
"version": "1.0.0",
|
||||
"version": "1.3.0",
|
||||
"description": "PostgreSQL Operator UI",
|
||||
"main": "src/app.js",
|
||||
"config": {
|
||||
|
|
|
|||
|
|
@ -408,7 +408,7 @@ new
|
|||
ref='cpuLimit'
|
||||
type='number'
|
||||
placeholder='{ cpu.state.limit.initialValue }'
|
||||
min='1'
|
||||
min='250'
|
||||
required
|
||||
value='{ cpu.state.limit.state }'
|
||||
onchange='{ cpu.state.limit.edit }'
|
||||
|
|
@ -434,7 +434,7 @@ new
|
|||
onkeyup='{ memory.state.request.edit }'
|
||||
)
|
||||
.input-group-addon
|
||||
.input-units Gi
|
||||
.input-units Mi
|
||||
|
||||
.input-group
|
||||
.input-group-addon.resource-type Limit
|
||||
|
|
@ -442,14 +442,14 @@ new
|
|||
ref='memoryLimit'
|
||||
type='number'
|
||||
placeholder='{ memory.state.limit.initialValue }'
|
||||
min='1'
|
||||
min='250'
|
||||
required
|
||||
value='{ memory.state.limit.state }'
|
||||
onchange='{ memory.state.limit.edit }'
|
||||
onkeyup='{ memory.state.limit.edit }'
|
||||
)
|
||||
.input-group-addon
|
||||
.input-units Gi
|
||||
.input-units Mi
|
||||
|
||||
.col-lg-3
|
||||
help-general(config='{ opts.config }')
|
||||
|
|
@ -519,10 +519,10 @@ new
|
|||
resources:
|
||||
requests:
|
||||
cpu: {{ cpu.state.request.state }}m
|
||||
memory: {{ memory.state.request.state }}Gi
|
||||
memory: {{ memory.state.request.state }}Mi
|
||||
limits:
|
||||
cpu: {{ cpu.state.limit.state }}m
|
||||
memory: {{ memory.state.limit.state }}Gi{{#if restoring}}
|
||||
memory: {{ memory.state.limit.state }}Mi{{#if restoring}}
|
||||
|
||||
clone:
|
||||
cluster: "{{ backup.state.name.state }}"
|
||||
|
|
@ -786,8 +786,8 @@ new
|
|||
return instance
|
||||
}
|
||||
|
||||
this.cpu = DynamicResource({ request: 100, limit: 1000 })
|
||||
this.memory = DynamicResource({ request: 1, limit: 1 })
|
||||
this.cpu = DynamicResource({ request: 100, limit: 500 })
|
||||
this.memory = DynamicResource({ request: 100, limit: 500 })
|
||||
|
||||
this.backup = DynamicSet({
|
||||
type: () => 'empty',
|
||||
|
|
|
|||
|
|
@ -76,6 +76,9 @@ postgresql
|
|||
.alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed
|
||||
.alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() })
|
||||
|
||||
.alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending
|
||||
.alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created
|
||||
|
||||
.alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending
|
||||
.alert.alert-success(if='{ progress.statefulSet }') StatefulSet created
|
||||
|
||||
|
|
|
|||
|
|
@ -45,12 +45,14 @@ postgresqls
|
|||
thead
|
||||
tr
|
||||
th(style='width: 120px') Team
|
||||
th(style='width: 130px') Namespace
|
||||
th Name
|
||||
th(style='width: 50px') Pods
|
||||
th(style='width: 140px') CPU
|
||||
th(style='width: 130px') Memory
|
||||
th(style='width: 100px') Size
|
||||
th(style='width: 130px') Namespace
|
||||
th Name
|
||||
th(style='width: 120px') Cost/Month
|
||||
th(stlye='width: 120px')
|
||||
|
||||
tbody
|
||||
tr(
|
||||
|
|
@ -58,19 +60,21 @@ postgresqls
|
|||
hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }'
|
||||
)
|
||||
td { team }
|
||||
td { nodes }
|
||||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
|
||||
td(style='white-space: pre')
|
||||
| { namespace }
|
||||
|
||||
td
|
||||
a(
|
||||
href='/#/status/{ cluster_path(this) }'
|
||||
)
|
||||
| { name }
|
||||
td { nodes }
|
||||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
td { calcCosts(nodes, cpu, memory, volume_size) }$
|
||||
|
||||
td
|
||||
|
||||
|
||||
.btn-group.pull-right(
|
||||
aria-label='Cluster { qname } actions'
|
||||
|
|
@ -124,12 +128,14 @@ postgresqls
|
|||
thead
|
||||
tr
|
||||
th(style='width: 120px') Team
|
||||
th(style='width: 130px') Namespace
|
||||
th Name
|
||||
th(style='width: 50px') Pods
|
||||
th(style='width: 140px') CPU
|
||||
th(style='width: 130px') Memory
|
||||
th(style='width: 100px') Size
|
||||
th(style='width: 130px') Namespace
|
||||
th Name
|
||||
th(style='width: 120px') Cost/Month
|
||||
th(stlye='width: 120px')
|
||||
|
||||
tbody
|
||||
tr(
|
||||
|
|
@ -137,20 +143,20 @@ postgresqls
|
|||
hidden='{ !namespaced_name.toLowerCase().includes(filter.state.toLowerCase()) }'
|
||||
)
|
||||
td { team }
|
||||
td { nodes }
|
||||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
|
||||
td(style='white-space: pre')
|
||||
| { namespace }
|
||||
|
||||
td
|
||||
|
||||
a(
|
||||
href='/#/status/{ cluster_path(this) }'
|
||||
)
|
||||
| { name }
|
||||
td { nodes }
|
||||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
td { calcCosts(nodes, cpu, memory, volume_size) }$
|
||||
|
||||
td
|
||||
|
||||
.btn-group.pull-right(
|
||||
aria-label='Cluster { qname } actions'
|
||||
|
|
@ -223,6 +229,45 @@ postgresqls
|
|||
+ '/' + encodeURI(cluster.name)
|
||||
)
|
||||
|
||||
const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => {
|
||||
costs = nodes * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs)
|
||||
return costs.toFixed(2)
|
||||
}
|
||||
|
||||
const toDisk = this.toDisk = value => {
|
||||
if(value.endsWith("Gi")) {
|
||||
value = value.substring(0, value.length-2)
|
||||
value = Number(value)
|
||||
return value
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
const toMemory = this.toMemory = value => {
|
||||
if (value.endsWith("Mi")) {
|
||||
value = value.substring(0, value.length-2)
|
||||
value = Number(value) / 1000.
|
||||
return value
|
||||
}
|
||||
else if(value.endsWith("Gi")) {
|
||||
value = value.substring(0, value.length-2)
|
||||
value = Number(value)
|
||||
return value
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
const toCores = this.toCores = value => {
|
||||
if (value.endsWith("m")) {
|
||||
value = value.substring(0, value.length-1)
|
||||
value = Number(value) / 1000.
|
||||
return value
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
this.on('mount', () =>
|
||||
jQuery
|
||||
.get('/postgresqls')
|
||||
|
|
|
|||
|
|
@ -4,23 +4,23 @@ metadata:
|
|||
name: "postgres-operator-ui"
|
||||
namespace: "default"
|
||||
labels:
|
||||
application: "postgres-operator-ui"
|
||||
name: "postgres-operator-ui"
|
||||
team: "acid"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
application: "postgres-operator-ui"
|
||||
name: "postgres-operator-ui"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
application: "postgres-operator-ui"
|
||||
name: "postgres-operator-ui"
|
||||
team: "acid"
|
||||
spec:
|
||||
serviceAccountName: postgres-operator-ui
|
||||
containers:
|
||||
- name: "service"
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.2.0
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.3.0
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
protocol: "TCP"
|
||||
|
|
@ -32,8 +32,8 @@ spec:
|
|||
timeoutSeconds: 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: "300m"
|
||||
memory: "3000Mi"
|
||||
cpu: "200m"
|
||||
memory: "200Mi"
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "100Mi"
|
||||
|
|
@ -41,7 +41,9 @@ spec:
|
|||
- name: "APP_URL"
|
||||
value: "http://localhost:8081"
|
||||
- name: "OPERATOR_API_URL"
|
||||
value: "http://localhost:8080"
|
||||
value: "http://postgres-operator:8080"
|
||||
- name: "OPERATOR_CLUSTER_NAME_LABEL"
|
||||
value: "cluster-name"
|
||||
- name: "TARGET_NAMESPACE"
|
||||
value: "default"
|
||||
- name: "TEAMS"
|
||||
|
|
@ -60,9 +62,14 @@ spec:
|
|||
"replica_load_balancer_visible": true,
|
||||
"resources_visible": true,
|
||||
"users_visible": true,
|
||||
"cost_ebs": 0.119,
|
||||
"cost_core": 0.0575,
|
||||
"cost_memory": 0.014375,
|
||||
"postgresql_versions": [
|
||||
"12",
|
||||
"11",
|
||||
"10",
|
||||
"9.6"
|
||||
"9.6",
|
||||
"9.5"
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,6 +76,7 @@ ACCESS_TOKEN_URL = getenv('ACCESS_TOKEN_URL')
|
|||
TOKENINFO_URL = getenv('OAUTH2_TOKEN_INFO_URL')
|
||||
|
||||
OPERATOR_API_URL = getenv('OPERATOR_API_URL', 'http://postgres-operator')
|
||||
OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name')
|
||||
OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}')
|
||||
OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}')
|
||||
READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true']
|
||||
|
|
@ -84,6 +85,13 @@ SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid')
|
|||
TARGET_NAMESPACE = getenv('TARGET_NAMESPACE')
|
||||
GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False)
|
||||
|
||||
# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/
|
||||
COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month
|
||||
|
||||
# compute costs, i.e. https://www.ec2instances.info/?region=eu-central-1&selected=m5.2xlarge
|
||||
COST_CORE = 30.5 * 24 * float(getenv('COST_CORE', 0.0575)) # Core per hour m5.2xlarge / 8.
|
||||
COST_MEMORY = 30.5 * 24 * float(getenv('COST_MEMORY', 0.014375)) # Memory GB m5.2xlarge / 32.
|
||||
|
||||
WALE_S3_ENDPOINT = getenv(
|
||||
'WALE_S3_ENDPOINT',
|
||||
'https+path://s3-eu-central-1.amazonaws.com:443',
|
||||
|
|
@ -293,6 +301,9 @@ DEFAULT_UI_CONFIG = {
|
|||
'dns_format_string': '{0}.{1}.{2}',
|
||||
'pgui_link': '',
|
||||
'static_network_whitelist': {},
|
||||
'cost_ebs': COST_EBS,
|
||||
'cost_core': COST_CORE,
|
||||
'cost_memory': COST_MEMORY
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1003,6 +1014,7 @@ def main(port, secret_key, debug, clusters: list):
|
|||
logger.info(f'App URL: {APP_URL}')
|
||||
logger.info(f'Authorize URL: {AUTHORIZE_URL}')
|
||||
logger.info(f'Operator API URL: {OPERATOR_API_URL}')
|
||||
logger.info(f'Operator cluster name label: {OPERATOR_CLUSTER_NAME_LABEL}')
|
||||
logger.info(f'Readonly mode: {"enabled" if READ_ONLY_MODE else "disabled"}') # noqa
|
||||
logger.info(f'Spilo S3 backup bucket: {SPILO_S3_BACKUP_BUCKET}')
|
||||
logger.info(f'Spilo S3 backup prefix: {SPILO_S3_BACKUP_PREFIX}')
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from datetime import datetime, timezone
|
|||
from furl import furl
|
||||
from json import dumps
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from os import environ, getenv
|
||||
from requests import Session
|
||||
from urllib.parse import urljoin
|
||||
from uuid import UUID
|
||||
|
|
@ -16,6 +16,8 @@ logger = getLogger(__name__)
|
|||
|
||||
session = Session()
|
||||
|
||||
OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name')
|
||||
|
||||
|
||||
def request(cluster, path, **kwargs):
|
||||
if 'timeout' not in kwargs:
|
||||
|
|
@ -137,7 +139,7 @@ def read_pods(cluster, namespace, spilo_cluster):
|
|||
cluster=cluster,
|
||||
resource_type='pods',
|
||||
namespace=namespace,
|
||||
label_selector={'version': spilo_cluster},
|
||||
label_selector={OPERATOR_CLUSTER_NAME_LABEL: spilo_cluster},
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,15 @@
|
|||
Flask-OAuthlib==0.9.5
|
||||
Flask==1.0.2
|
||||
backoff==1.5.0
|
||||
boto3==1.5.14
|
||||
boto==2.48.0
|
||||
Flask==1.1.1
|
||||
backoff==1.8.1
|
||||
boto3==1.10.4
|
||||
boto==2.49.0
|
||||
click==6.7
|
||||
furl==1.0.1
|
||||
furl==1.0.2
|
||||
gevent==1.2.2
|
||||
jq==0.1.6
|
||||
json_delta>=2.0
|
||||
kubernetes==3.0.0
|
||||
requests==2.20.1
|
||||
requests==2.22.0
|
||||
stups-tokens>=1.1.19
|
||||
wal_e==1.1.0
|
||||
wal_e==1.1.0
|
||||
werkzeug==0.16.1
|
||||
|
|
|
|||
|
|
@ -19,10 +19,15 @@ default_operator_ui_config='{
|
|||
"nat_gateways_visible": false,
|
||||
"resources_visible": true,
|
||||
"users_visible": true,
|
||||
"cost_ebs": 0.119,
|
||||
"cost_core": 0.0575,
|
||||
"cost_memory": 0.014375,
|
||||
"postgresql_versions": [
|
||||
"12",
|
||||
"11",
|
||||
"10",
|
||||
"9.6"
|
||||
"9.6",
|
||||
"9.5"
|
||||
],
|
||||
"static_network_whitelist": {
|
||||
"localhost": ["172.0.0.1/32"]
|
||||
|
|
|
|||
Loading…
Reference in New Issue