Initial commit for new 1.6 release with Postgres 13 support. (#1257)

* Initial commit for new 1.6 release with Postgres 13 support.
* Updating maintainers, Go version, Codeowners.
* Use lazy upgrade image that contains pg13.
* fix typo for ownerReference
* fix clusterrole in helm chart
* reflect GCP logical backup in validation
* improve PostgresTeam docs
* change defaults for enable_pgversion_env_var and storage_resize_mode
* explain manual part of in-place upgrade
* remove gsoc docs

Co-authored-by: Felix Kunde <felix-kunde@gmx.de>
This commit is contained in:
Jan Mussler 2020-12-17 15:00:29 +01:00 committed by GitHub
parent 5f3f6988bc
commit a63ad49ef8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 233 additions and 159 deletions

View File

@ -14,7 +14,7 @@ jobs:
- uses: actions/checkout@v1
- uses: actions/setup-go@v2
with:
go-version: "^1.15.5"
go-version: "^1.15.6"
- name: Make dependencies
run: make deps mocks
- name: Compile

View File

@ -14,7 +14,7 @@ jobs:
- uses: actions/checkout@v1
- uses: actions/setup-go@v2
with:
go-version: "^1.15.5"
go-version: "^1.15.6"
- name: Make dependencies
run: make deps mocks
- name: Compile

View File

@ -1,2 +1,2 @@
# global owners
* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih
* @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih

View File

@ -1,3 +1,5 @@
Oleksii Kliukin <oleksii.kliukin@zalando.de>
Dmitrii Dolgov <dmitrii.dolgov@zalando.de>
Sergey Dudoladov <sergey.dudoladov@zalando.de>
Felix Kunde <felix.kunde@zalando.de>
Jan Mussler <jan.mussler@zalando.de>
Rafia Sabih <rafia.sabih@zalando.de>

View File

@ -14,7 +14,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
### Operator features
* Rolling updates on Postgres cluster changes, incl. quick minor version updates
* Live volume resize without pod restarts (AWS EBS, others pending)
* Live volume resize without pod restarts (AWS EBS, PvC)
* Database connection pooler with PGBouncer
* Restore and cloning Postgres clusters (incl. major version upgrade)
* Additionally logical backups to S3 bucket can be configured
@ -23,10 +23,12 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
* Basic credential and user management on K8s, eases application deployments
* UI to create and edit Postgres cluster manifests
* Works well on Amazon AWS, Google Cloud, OpenShift and locally on Kind
* Support for custom TLS certificates
* Base support for AWS EBS gp3 migration (iops, throughput pending)
### PostgreSQL features
* Supports PostgreSQL 12, starting from 9.6+
* Supports PostgreSQL 13, starting from 9.6+
* Streaming replication cluster via Patroni
* Point-In-Time-Recovery with
[pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) /
@ -48,7 +50,25 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
[timescaledb](https://github.com/timescale/timescaledb)
The Postgres Operator has been developed at Zalando and is being used in
production for over two years.
production for over three years.
## Notes on Postgres 13 support
If you are new to the operator, you can skip this and just start using the Postgres operator as is, Postgres 13 is ready to go.
The Postgres operator supports Postgres 13 with the new Spilo Image that includes also the recent Patroni version to support PG13 settings.
More work on optimizing restarts and rolling upgrades is pending.
If you are already using the Postgres operator in older version with a Spilo 12 Docker Image you need to be aware of the changes for the backup path.
We introduce the major version into the backup path to smooth the major version upgrade that is now supported manually.
The new operator configuration, sets a compatilibty flag *enable_spilo_wal_path_compat* to make Spilo look in current path but also old format paths for wal segments.
This comes at potential perf. costs, and should be disabled after a few days.
The new Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p1`
The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`
## Getting started

View File

@ -319,6 +319,10 @@ spec:
properties:
logical_backup_docker_image:
type: string
logical_backup_google_application_credentials:
type: string
logical_backup_provider:
type: string
logical_backup_s3_access_key_id:
type: string
logical_backup_s3_bucket:

View File

@ -44,13 +44,6 @@ rules:
- get
- patch
- update
# to read configuration from ConfigMaps
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
# to send events to the CRs
- apiGroups:
- ""
@ -64,14 +57,11 @@ rules:
- update
- watch
# to manage endpoints/configmaps which are also used by Patroni
{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }}
- apiGroups:
- ""
resources:
{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }}
- configmaps
{{- else }}
- endpoints
{{- end }}
verbs:
- create
- delete
@ -81,6 +71,34 @@ rules:
- patch
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
{{- else }}
# to read configuration from ConfigMaps
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
{{- end }}
# to CRUD secrets for database access
- apiGroups:
- ""

View File

@ -22,7 +22,7 @@ configGeneral:
# update only the statefulsets without immediately doing the rolling update
enable_lazy_spilo_upgrade: false
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
enable_pgversion_env_var: false
enable_pgversion_env_var: true
# start any new database pod without limitations on shm memory
enable_shm_volume: true
# enables backwards compatible path between Spilo 12 and Spilo 13 images

View File

@ -25,7 +25,7 @@ configGeneral:
# update only the statefulsets without immediately doing the rolling update
enable_lazy_spilo_upgrade: "false"
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
enable_pgversion_env_var: "false"
enable_pgversion_env_var: "true"
# start any new database pod without limitations on shm memory
enable_shm_volume: "true"
# enables backwards compatible path between Spilo 12 and Spilo 13 images

View File

@ -16,7 +16,7 @@ pipeline:
- desc: 'Install go'
cmd: |
cd /tmp
wget -q https://storage.googleapis.com/golang/go1.15.5.linux-amd64.tar.gz -O go.tar.gz
wget -q https://storage.googleapis.com/golang/go1.15.6.linux-amd64.tar.gz -O go.tar.gz
tar -xf go.tar.gz
mv go /usr/local
ln -s /usr/local/go/bin/go /usr/bin/go

View File

@ -11,15 +11,29 @@ switchover (planned failover) of the master to the Pod with new minor version.
The switch should usually take less than 5 seconds, still clients have to
reconnect.
Major version upgrades are supported either via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)or in-place.
Major version upgrades are supported either via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
or in-place.
With cloning, the new cluster manifest must have a higher `version` string than the source
cluster and will be created from a basebackup. Depending of the cluster size,
downtime in this case can be significant as writes to the database should be
stopped and all WAL files should be archived first before cloning is started.
With cloning, the new cluster manifest must have a higher `version` string than
the source cluster and will be created from a basebackup. Depending of the
cluster size, downtime in this case can be significant as writes to the database
should be stopped and all WAL files should be archived first before cloning is
started.
Starting with Spilo 13, Postgres Operator can do in-place major version upgrade, which should be faster than cloning. To trigger the upgrade, simply increase the version in the cluster manifest. As the very last step of
processing the manifest update event, the operator will call the `inplace_upgrade.py` script in Spilo. The upgrade is usually fast, well under one minute for most DBs. Note the changes become irrevertible once `pg_upgrade` is called. To understand the upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
Starting with Spilo 13, Postgres Operator can do in-place major version upgrade,
which should be faster than cloning. However, it is not fully automatic yet.
First, you need to make sure, that setting the PG_VERSION environment variable
is enabled in the configuration. Since `v1.6.0`, `enable_pgversion_env_var` is
enabled by default.
To trigger the upgrade, increase the version in the cluster manifest. After
Pods are rotated `configure_spilo` will notice the version mismatch and start
the old version again. You can then exec into the Postgres container of the
master instance and call `python3 /scripts/inplace_upgrade.py N` where `N`
is the number of members of your cluster (see `number_of_instances`). The
upgrade is usually fast, well under one minute for most DBs. Note, that changes
become irrevertible once `pg_upgrade` is called. To understand the upgrade
procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
## CRD Validation

View File

@ -286,7 +286,7 @@ manifest files:
Postgres manifest parameters are defined in the [api package](../pkg/apis/acid.zalan.do/v1/postgresql_type.go).
The operator behavior has to be implemented at least in [k8sres.go](../pkg/cluster/k8sres.go).
Validation of CRD parameters is controlled in [crd.go](../pkg/apis/acid.zalan.do/v1/crds.go).
Validation of CRD parameters is controlled in [crds.go](../pkg/apis/acid.zalan.do/v1/crds.go).
Please, reflect your changes in tests, for example in:
* [config_test.go](../pkg/util/config/config_test.go)
* [k8sres_test.go](../pkg/cluster/k8sres_test.go)

View File

@ -1,63 +0,0 @@
<h1>Google Summer of Code 2019</h1>
## Applications steps
1. Please carefully read the official [Google Summer of Code Student Guide](https://google.github.io/gsocguides/student/)
2. Join the #postgres-operator slack channel under [Postgres Slack](https://postgres-slack.herokuapp.com) to introduce yourself to the community and get quick feedback on your application.
3. Select a project from the list of ideas below or propose your own.
4. Write a proposal draft. Please open an issue with the label `gsoc2019_application` in the [operator repository](https://github.com/zalando/postgres-operator/issues) so that the community members can publicly review it. See proposal instructions below for details.
5. Submit proposal and the proof of enrollment before April 9 2019 18:00 UTC through the web site of the Program.
## Project ideas
### Place database pods into the "Guaranteed" Quality-of-Service class
* **Description**: Kubernetes runtime does not kill pods in this class on condition they stay within their resource limits, which is desirable for the DB pods serving production workloads. To be assigned to that class, pod's resources must equal its limits. The task is to add the `enableGuaranteedQoSClass` or the like option to the Postgres manifest and the operator configmap that forcibly re-write pod resources to match the limits.
* **Recommended skills**: golang, basic Kubernetes abstractions
* **Difficulty**: moderate
* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
### Implement the kubectl plugin for the Postgres CustomResourceDefinition
* **Description**: [kubectl plugins](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/) enable extending the Kubernetes command-line client `kubectl` with commands to manage custom resources. The task is to design and implement a plugin for the `kubectl postgres` command,
that can enable, for example, correct deletion or major version upgrade of Postgres clusters.
* **Recommended skills**: golang, shell scripting, operational experience with Kubernetes
* **Difficulty**: moderate to medium, depending on the plugin design
* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
### Implement the openAPIV3Schema for the Postgres CRD
* **Description**: at present the operator validates a database manifest on its own.
It will be helpful to reject erroneous manifests before they reach the operator using the [native Kubernetes CRD validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation). It is up to the student to decide whether to write the schema manually or to adopt existing [schema generator developed for the Prometheus project](https://github.com/ant31/crd-validation).
* **Recommended skills**: golang, JSON schema
* **Difficulty**: medium
* **Mentor(s)**: Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#388](https://github.com/zalando/postgres-operator/issues/388)
### Design a solution for the local testing of the operator
* **Description**: The current way of testing is to run minikube, either manually or with some tooling around it like `/run-operator_locally.sh` or Vagrant. This has at least three problems:
First, minikube is a single node cluster, so it is unsuitable for testing vital functions such as pod migration between nodes. Second, minikube starts slowly; that prolongs local testing.
Third, every contributor needs to come up with their own solution for local testing. The task is to come up with a better option which will enable us to conveniently and uniformly run e2e tests locally / potentially in Travis CI.
A promising option is the Kubernetes own [kind](https://github.com/kubernetes-sigs/kind)
* **Recommended skills**: Docker, shell scripting, basic Kubernetes abstractions
* **Difficulty**: medium to hard depending on the selected desing
* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#475](https://github.com/zalando/postgres-operator/issues/475)
### Detach a Postgres cluster from the operator for maintenance
* **Description**: sometimes a Postgres cluster requires manual maintenance. During such maintenance the operator should ignore all the changes manually applied to the cluster.
Currently the only way to achieve this behavior is to shutdown the operator altogether, for instance by scaling down the operator's own deployment to zero pods. That approach evidently affects all Postgres databases under the operator control and thus is highly undesirable in production Kubernetes clusters. It would be much better to be able to detach only the desired Postgres cluster from the operator for the time being and re-attach it again after maintenance.
* **Recommended skills**: golang, architecture of a Kubernetes operator
* **Difficulty**: hard - requires significant modification of the operator's internals and careful consideration of the corner cases.
* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
* **Issue**: [#421](https://github.com/zalando/postgres-operator/issues/421)
### Propose your own idea
Feel free to come up with your own ideas. For inspiration,
see [our bug tracker](https://github.com/zalando/postgres-operator/issues),
the [official `CustomResouceDefinition` docs](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)
and [other operators](https://github.com/operator-framework/awesome-operators).

View File

@ -80,7 +80,7 @@ Those are top-level keys, containing both leaf keys and groups.
The default is `false`.
* **enable_pgversion_env_var**
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `false`.
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`.
* **enable_spilo_wal_path_compat**
enables backwards compatible path between Spilo 12 and Spilo 13 images. The default is `false`.
@ -375,7 +375,7 @@ configuration they are grouped under the `kubernetes` key.
* **storage_resize_mode**
defines how operator handels the difference between requested volume size and
actual size. Available options are: ebs - tries to resize EBS volume, pvc -
changes PVC definition, off - disables resize of the volumes. Default is "ebs".
changes PVC definition, off - disables resize of the volumes. Default is "pvc".
When using OpenShift please use one of the other available options.
## Kubernetes resource requests

View File

@ -275,9 +275,18 @@ Postgres clusters are associated with one team by providing the `teamID` in
the manifest. Additional superuser teams can be configured as mentioned in
the previous paragraph. However, this is a global setting. To assign
additional teams, superuser teams and single users to clusters of a given
team, use the [PostgresTeam CRD](../manifests/postgresteam.yaml). It provides
a simple mapping structure.
team, use the [PostgresTeam CRD](../manifests/postgresteam.yaml).
Note, by default the `PostgresTeam` support is disabled in the configuration.
Switch `enable_postgres_team_crd` flag to `true` and the operator will start to
watch for this CRD. Make sure, the cluster role is up to date and contains a
section for [PostgresTeam](../manifests/operator-service-account-rbac.yaml#L30).
#### Additional teams
To assign additional teams and single users to clusters of a given team,
define a mapping with the `PostgresTeam` Kubernetes resource. The Postgres
Operator will read such team mappings each time it syncs all Postgres clusters.
```yaml
apiVersion: "acid.zalan.do/v1"
@ -285,55 +294,118 @@ kind: PostgresTeam
metadata:
name: custom-team-membership
spec:
additionalSuperuserTeams:
acid:
- "postgres_superusers"
additionalTeams:
acid: []
additionalMembers:
acid:
- "elephant"
a-team:
- "b-team"
```
One `PostgresTeam` resource could contain mappings of multiple teams but you
can choose to create separate CRDs, alternatively. On each CRD creation or
update the operator will gather all mappings to create additional human users
in databases the next time they are synced. Additional teams are resolved
transitively, meaning you will also add users for their `additionalTeams`
or (not and) `additionalSuperuserTeams`.
With the example above the operator will create login roles for all members
of `b-team` in every cluster owned by `a-team`. It's possible to do vice versa
for clusters of `b-team` in one manifest:
For each additional team the Teams API would be queried. Additional members
will be added either way. There can be "virtual teams" that do not exists in
your Teams API but users of associated teams as well as members will get
created. With `PostgresTeams` it's also easy to cover team name changes. Just
add the mapping between old and new team name and the rest can stay the same.
```yaml
spec:
additionalTeams:
a-team:
- "b-team"
b-team:
- "a-team"
```
You see, the `PostgresTeam` CRD is a global team mapping and independent from
the Postgres manifests. It is possible to define multiple mappings, even with
redundant content - the Postgres operator will create one internal cache from
it. Additional teams are resolved transitively, meaning you will also add
users for their `additionalTeams`, e.g.:
```yaml
spec:
additionalTeams:
a-team:
- "b-team"
- "c-team"
b-team:
- "a-team"
```
This creates roles for members of the `c-team` team not only in all clusters
owned by `a-team`, but as well in cluster owned by `b-team`, as `a-team` is
an `additionalTeam` to `b-team`
Not, you can also define `additionalSuperuserTeams` in the `PostgresTeam`
manifest. By default, this option is disabled and must be configured with
`enable_postgres_team_crd_superusers` to make it work.
#### Virtual teams
There can be "virtual teams" that do not exist in the Teams API. It can make
it easier to map a group of teams to many other teams:
```yaml
spec:
additionalTeams:
a-team:
- "virtual-team"
b-team:
- "virtual-team"
virtual-team:
- "c-team"
- "d-team"
```
This example would create roles for members of `c-team` and `d-team` plus
additional `virtual-team` members in clusters owned by `a-team` or `b-team`.
#### Teams changing their names
With `PostgresTeams` it is also easy to cover team name changes. Just add
the mapping between old and new team name and the rest can stay the same.
E.g. if team `a-team`'s name would change to `f-team` in the teams API it
could be reflected in a `PostgresTeam` mapping with just two lines:
```yaml
spec:
additionalTeams:
a-team:
- "f-team"
```
This is helpful, because Postgres cluster names are immutable and can not
be changed. Only via cloning it could get a different name starting with the
new `teamID`.
#### Additional members
Single members might be excluded from teams although they continue to work
with the same people. However, the teams API would not reflect this anymore.
To still add a database role for former team members list their role under
the `additionalMembers` section of the `PostgresTeam` resource:
```yaml
apiVersion: "acid.zalan.do/v1"
kind: PostgresTeam
metadata:
name: virtualteam-membership
name: custom-team-membership
spec:
additionalSuperuserTeams:
acid:
- "virtual_superusers"
virtual_superusers:
- "real_teamA"
- "real_teamB"
real_teamA:
- "real_teamA_renamed"
additionalTeams:
real_teamA:
- "real_teamA_renamed"
additionalMembers:
virtual_superusers:
- "foo"
a-team:
- "tia"
```
Note, by default the `PostgresTeam` support is disabled in the configuration.
Switch `enable_postgres_team_crd` flag to `true` and the operator will start to
watch for this CRD. Make sure, the cluster role is up to date and contains a
section for [PostgresTeam](../manifests/operator-service-account-rbac.yaml#L30).
This will create the login role `tia` in every cluster owned by `a-team`.
The user can connect to databases like the other team members.
The `additionalMembers` map can also be used to define users of virtual
teams, e.g. for `virtual-team` we used above:
```yaml
spec:
additionalMembers:
virtual-team:
- "flynch"
- "rdecker"
- "briggs"
```
## Prepared databases with roles and default privileges

View File

@ -8,7 +8,7 @@ IFS=$'\n\t'
readonly cluster_name="postgres-operator-e2e-tests"
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-13:2.0-p1"
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3"
export GOPATH=${GOPATH-~/go}

View File

@ -11,8 +11,8 @@ from kubernetes import client
from tests.k8s_api import K8s
from kubernetes.client.rest import ApiException
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114"
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-13:2.0-p1"
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-13:2.0-p145"
def to_selector(labels):

View File

@ -36,7 +36,7 @@ spec:
defaultRoles: true
defaultUsers: false
postgresql:
version: "12"
version: "13"
parameters: # Expert section
shared_buffers: "32MB"
max_connections: "10"
@ -93,9 +93,9 @@ spec:
encoding: "UTF8"
locale: "en_US.UTF-8"
data-checksums: "true"
pg_hba:
- hostssl all all 0.0.0.0/0 md5
- host all all 0.0.0.0/0 md5
# pg_hba:
# - hostssl all all 0.0.0.0/0 md5
# - host all all 0.0.0.0/0 md5
# slots:
# permanent_physical_1:
# type: physical

View File

@ -31,7 +31,7 @@ data:
# default_memory_request: 100Mi
# delete_annotation_date_key: delete-date
# delete_annotation_name_key: delete-clustername
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p5
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p1
# downscaler_annotations: "deployment-time,downscaler/*"
# enable_admin_role_for_users: "true"
# enable_crd_validation: "true"
@ -41,16 +41,16 @@ data:
# enable_init_containers: "true"
# enable_lazy_spilo_upgrade: "false"
enable_master_load_balancer: "false"
# enable_pgversion_env_var: "false"
enable_pgversion_env_var: "true"
# enable_pod_antiaffinity: "false"
# enable_pod_disruption_budget: "true"
# enable_postgres_team_crd: "false"
# enable_postgres_team_crd_superusers: "false"
enable_replica_load_balancer: "false"
# enable_shm_volume: "true"
# enable_pgversion_env_var: "false"
enable_pgversion_env_var: "true"
# enable_sidecars: "true"
enable_spilo_wal_path_compat: "false"
enable_spilo_wal_path_compat: "true"
# enable_team_superuser: "false"
enable_teams_api: "false"
# etcd_host: ""
@ -122,4 +122,4 @@ data:
# wal_gs_bucket: ""
# wal_s3_bucket: ""
watched_namespace: "*" # listen to all namespaces
workers: "8"
workers: "16"

View File

@ -18,4 +18,4 @@ spec:
preparedDatabases:
bar: {}
postgresql:
version: "12"
version: "13"

View File

@ -315,6 +315,10 @@ spec:
properties:
logical_backup_docker_image:
type: string
logical_backup_google_application_credentials:
type: string
logical_backup_provider:
type: string
logical_backup_s3_access_key_id:
type: string
logical_backup_s3_bucket:

View File

@ -6,7 +6,7 @@ configuration:
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
# enable_crd_validation: true
# enable_lazy_spilo_upgrade: false
# enable_pgversion_env_var: false
enable_pgversion_env_var: true
# enable_shm_volume: true
enable_spilo_wal_path_compat: false
etcd_host: ""

View File

@ -13,4 +13,3 @@ nav:
- Config parameters: 'reference/operator_parameters.md'
- Manifest parameters: 'reference/cluster_manifest.md'
- CLI options and environment: 'reference/command_line_and_environment.md'
- Google Summer of Code 2019: 'gsoc-2019/ideas.md'

View File

@ -1291,6 +1291,12 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"logical_backup_docker_image": {
Type: "string",
},
"logical_backup_google_application_credentials": {
Type: "string",
},
"logical_backup_provider": {
Type: "string",
},
"logical_backup_s3_access_key_id": {
Type: "string",
},

View File

@ -778,7 +778,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
},
expected: nil,
cluster: cluster,
check: testDeploymentOwnwerReference,
check: testDeploymentOwnerReference,
},
{
subTest: "selector",
@ -931,7 +931,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
ConnectionPooler: &acidv1.ConnectionPooler{},
},
cluster: cluster,
check: testServiceOwnwerReference,
check: testServiceOwnerReference,
},
{
subTest: "selector",

View File

@ -939,7 +939,7 @@ func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error
return nil
}
func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployment) error {
func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error {
owner := deployment.ObjectMeta.OwnerReferences[0]
if owner.Name != cluster.Statefulset.ObjectMeta.Name {
@ -950,7 +950,7 @@ func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployme
return nil
}
func testServiceOwnwerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error {
func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error {
owner := service.ObjectMeta.OwnerReferences[0]
if owner.Name != cluster.Statefulset.ObjectMeta.Name {

View File

@ -70,7 +70,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True())
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "ebs")
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc")
result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True())
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate

View File

@ -182,7 +182,7 @@ type Config struct {
CustomPodAnnotations map[string]string `name:"custom_pod_annotations"`
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`
PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"`
StorageResizeMode string `name:"storage_resize_mode" default:"ebs"`
StorageResizeMode string `name:"storage_resize_mode" default:"pvc"`
EnableLoadBalancer *bool `name:"enable_load_balancer"` // deprecated and kept for backward compatibility
ExternalTrafficPolicy string `name:"external_traffic_policy" default:"Cluster"`
MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"`
@ -202,7 +202,7 @@ type Config struct {
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"false"`
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"`
EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"`
}

View File

@ -68,10 +68,8 @@ spec:
"cost_core": 0.0575,
"cost_memory": 0.014375,
"postgresql_versions": [
"13",
"12",
"11",
"10",
"9.6",
"9.5"
"11"
]
}

View File

@ -303,7 +303,7 @@ DEFAULT_UI_CONFIG = {
'users_visible': True,
'databases_visible': True,
'resources_visible': True,
'postgresql_versions': ['9.6', '10', '11'],
'postgresql_versions': ['11','12','13'],
'dns_format_string': '{0}.{1}.{2}',
'pgui_link': '',
'static_network_whitelist': {},