Merge branch 'master' into kubectl-pg-fixes
This commit is contained in:
commit
36ba5178a9
|
|
@ -9,7 +9,7 @@ assignees: ''
|
|||
|
||||
Please, answer some short questions which should help us to understand your problem / question better?
|
||||
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.5.0
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.0
|
||||
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
||||
- **Are you running Postgres Operator in production?** [yes | no]
|
||||
- **Type of issue?** [Bug report, question, feature request, etc.]
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
- uses: actions/checkout@v1
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.15.5"
|
||||
go-version: "^1.15.6"
|
||||
- name: Make dependencies
|
||||
run: make deps mocks
|
||||
- name: Compile
|
||||
|
|
|
|||
|
|
@ -11,10 +11,10 @@ jobs:
|
|||
name: Unit tests and coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.15.5"
|
||||
go-version: "^1.15.6"
|
||||
- name: Make dependencies
|
||||
run: make deps mocks
|
||||
- name: Compile
|
||||
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
- name: Run unit tests
|
||||
run: go test -race -covermode atomic -coverprofile=coverage.out ./...
|
||||
- name: Convert coverage to lcov
|
||||
uses: jandelgado/gcov2lcov-action@v1.0.5
|
||||
uses: jandelgado/gcov2lcov-action@v1.0.8
|
||||
- name: Coveralls
|
||||
uses: coverallsapp/github-action@master
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -3,3 +3,11 @@ X-Zalando-Team: "acid"
|
|||
# type should be one of [code, doc, config, tools, secrets]
|
||||
# code will be the default value, if X-Zalando-Type is not found in .zappr.yml
|
||||
X-Zalando-Type: code
|
||||
|
||||
approvals:
|
||||
groups:
|
||||
zalando:
|
||||
minimum: 2
|
||||
from:
|
||||
orgs:
|
||||
- zalando
|
||||
|
|
@ -1,2 +1,2 @@
|
|||
# global owners
|
||||
* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih
|
||||
* @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih
|
||||
|
|
|
|||
2
LICENSE
2
LICENSE
|
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2020 Zalando SE
|
||||
Copyright (c) 2021 Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
Oleksii Kliukin <oleksii.kliukin@zalando.de>
|
||||
Dmitrii Dolgov <dmitrii.dolgov@zalando.de>
|
||||
Sergey Dudoladov <sergey.dudoladov@zalando.de>
|
||||
Felix Kunde <felix.kunde@zalando.de>
|
||||
Jan Mussler <jan.mussler@zalando.de>
|
||||
Rafia Sabih <rafia.sabih@zalando.de>
|
||||
26
README.md
26
README.md
|
|
@ -14,19 +14,21 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
|||
### Operator features
|
||||
|
||||
* Rolling updates on Postgres cluster changes, incl. quick minor version updates
|
||||
* Live volume resize without pod restarts (AWS EBS, others pending)
|
||||
* Live volume resize without pod restarts (AWS EBS, PVC)
|
||||
* Database connection pooler with PGBouncer
|
||||
* Restore and cloning Postgres clusters (incl. major version upgrade)
|
||||
* Additionally logical backups to S3 bucket can be configured
|
||||
* Standby cluster from S3 WAL archive
|
||||
* Configurable for non-cloud environments
|
||||
* Basic credential and user management on K8s, eases application deployments
|
||||
* Support for custom TLS certificates
|
||||
* UI to create and edit Postgres cluster manifests
|
||||
* Works well on Amazon AWS, Google Cloud, OpenShift and locally on Kind
|
||||
* Base support for AWS EBS gp3 migration (iops, throughput pending)
|
||||
|
||||
### PostgreSQL features
|
||||
|
||||
* Supports PostgreSQL 12, starting from 9.6+
|
||||
* Supports PostgreSQL 13, starting from 9.5+
|
||||
* Streaming replication cluster via Patroni
|
||||
* Point-In-Time-Recovery with
|
||||
[pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) /
|
||||
|
|
@ -48,7 +50,25 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
|||
[timescaledb](https://github.com/timescale/timescaledb)
|
||||
|
||||
The Postgres Operator has been developed at Zalando and is being used in
|
||||
production for over two years.
|
||||
production for over three years.
|
||||
|
||||
## Notes on Postgres 13 support
|
||||
|
||||
If you are new to the operator, you can skip this and just start using the Postgres operator as is, Postgres 13 is ready to go.
|
||||
|
||||
The Postgres operator supports Postgres 13 with the new Spilo Image that includes also the recent Patroni version to support PG13 settings.
|
||||
More work on optimizing restarts and rolling upgrades is pending.
|
||||
|
||||
If you are already using the Postgres operator in older version with a Spilo 12 Docker image you need to be aware of the changes for the backup path.
|
||||
We introduce the major version into the backup path to smoothen the [major version upgrade](docs/administrator.md#minor-and-major-version-upgrade) that is now supported manually.
|
||||
|
||||
The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths.
|
||||
This comes at potential performance costs and should be disabled after a few days.
|
||||
|
||||
The new Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p2`
|
||||
|
||||
The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`
|
||||
|
||||
|
||||
## Getting started
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator-ui
|
||||
version: 1.5.0
|
||||
appVersion: 1.5.0
|
||||
version: 1.6.0
|
||||
appVersion: 1.6.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,32 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2020-12-18T14:19:25.464717041+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: d7813a235dd1015377c38fd5a14e7679a411c7340a25cfcf5f5294405f9a2eb2
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.6.0.tgz
|
||||
version: 1.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2020-06-04T17:06:37.153522579+02:00"
|
||||
created: "2020-12-18T14:19:25.464015993+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61
|
||||
|
|
@ -24,29 +47,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-ui-1.5.0.tgz
|
||||
version: 1.5.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.0
|
||||
created: "2020-06-04T17:06:37.15302073+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 00e0eff7056d56467cd5c975657fbb76c8d01accd25a4b7aca81bc42aeac961d
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
- email: sk@sik-net.de
|
||||
name: siku4
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.4.0.tgz
|
||||
version: 1.4.0
|
||||
generated: "2020-06-04T17:06:37.152369987+02:00"
|
||||
generated: "2020-12-18T14:19:25.463104102+01:00"
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
|
|
@ -68,10 +68,8 @@ spec:
|
|||
"resources_visible": true,
|
||||
"users_visible": true,
|
||||
"postgresql_versions": [
|
||||
"13",
|
||||
"12",
|
||||
"11",
|
||||
"10",
|
||||
"9.6",
|
||||
"9.5"
|
||||
"11"
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator-ui
|
||||
tag: v1.5.0-dirty
|
||||
tag: v1.6.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -49,10 +49,10 @@ envs:
|
|||
# configure UI service
|
||||
service:
|
||||
type: "ClusterIP"
|
||||
port: "8081"
|
||||
port: "80"
|
||||
# If the type of the service is NodePort a port can be specified using the nodePort field
|
||||
# If the nodePort field is not specified, or if it has no value, then a random port is used
|
||||
# notePort: 32521
|
||||
# nodePort: 32521
|
||||
|
||||
# configure UI ingress. If needed: "enabled: true"
|
||||
ingress:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator
|
||||
version: 1.5.0
|
||||
appVersion: 1.5.0
|
||||
version: 1.6.0
|
||||
appVersion: 1.6.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -65,32 +65,45 @@ spec:
|
|||
properties:
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p2"
|
||||
enable_crd_validation:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_lazy_spilo_upgrade:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_pgversion_env_var:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_shm_volume:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_spilo_wal_path_compat:
|
||||
type: boolean
|
||||
default: false
|
||||
etcd_host:
|
||||
type: string
|
||||
default: ""
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
default: false
|
||||
max_instances:
|
||||
type: integer
|
||||
minimum: -1 # -1 = disabled
|
||||
default: -1
|
||||
min_instances:
|
||||
type: integer
|
||||
minimum: -1 # -1 = disabled
|
||||
default: -1
|
||||
resync_period:
|
||||
type: string
|
||||
default: "30m"
|
||||
repair_period:
|
||||
type: string
|
||||
default: "5m"
|
||||
set_memory_request_to_limit:
|
||||
type: boolean
|
||||
default: false
|
||||
sidecar_docker_images:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -104,24 +117,35 @@ spec:
|
|||
workers:
|
||||
type: integer
|
||||
minimum: 1
|
||||
default: 8
|
||||
users:
|
||||
type: object
|
||||
properties:
|
||||
replication_username:
|
||||
type: string
|
||||
default: standby
|
||||
super_username:
|
||||
type: string
|
||||
default: postgres
|
||||
kubernetes:
|
||||
type: object
|
||||
properties:
|
||||
additional_pod_capabilities:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
cluster_domain:
|
||||
type: string
|
||||
default: "cluster.local"
|
||||
cluster_labels:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
default:
|
||||
application: spilo
|
||||
cluster_name_label:
|
||||
type: string
|
||||
default: "cluster-name"
|
||||
custom_pod_annotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -136,12 +160,16 @@ spec:
|
|||
type: string
|
||||
enable_init_containers:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_pod_antiaffinity:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_pod_disruption_budget:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_sidecars:
|
||||
type: boolean
|
||||
default: true
|
||||
infrastructure_roles_secret_name:
|
||||
type: string
|
||||
infrastructure_roles_secrets:
|
||||
|
|
@ -180,16 +208,20 @@ spec:
|
|||
type: string
|
||||
master_pod_move_timeout:
|
||||
type: string
|
||||
default: "20m"
|
||||
node_readiness_label:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
oauth_token_secret_name:
|
||||
type: string
|
||||
default: "postgresql-operator"
|
||||
pdb_name_format:
|
||||
type: string
|
||||
default: "postgres-{cluster}-pdb"
|
||||
pod_antiaffinity_topology_key:
|
||||
type: string
|
||||
default: "kubernetes.io/hostname"
|
||||
pod_environment_configmap:
|
||||
type: string
|
||||
pod_environment_secret:
|
||||
|
|
@ -199,20 +231,27 @@ spec:
|
|||
enum:
|
||||
- "ordered_ready"
|
||||
- "parallel"
|
||||
default: "ordered_ready"
|
||||
pod_priority_class_name:
|
||||
type: string
|
||||
pod_role_label:
|
||||
type: string
|
||||
default: "spilo-role"
|
||||
pod_service_account_definition:
|
||||
type: string
|
||||
default: ""
|
||||
pod_service_account_name:
|
||||
type: string
|
||||
default: "postgres-pod"
|
||||
pod_service_account_role_binding_definition:
|
||||
type: string
|
||||
default: ""
|
||||
pod_terminate_grace_period:
|
||||
type: string
|
||||
default: "5m"
|
||||
secret_name_template:
|
||||
type: string
|
||||
default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
spilo_runasuser:
|
||||
type: integer
|
||||
spilo_runasgroup:
|
||||
|
|
@ -221,12 +260,14 @@ spec:
|
|||
type: integer
|
||||
spilo_privileged:
|
||||
type: boolean
|
||||
default: false
|
||||
storage_resize_mode:
|
||||
type: string
|
||||
enum:
|
||||
- "ebs"
|
||||
- "pvc"
|
||||
- "off"
|
||||
default: "pvc"
|
||||
toleration:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -239,36 +280,48 @@ spec:
|
|||
default_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "1"
|
||||
default_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "100m"
|
||||
default_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "500Mi"
|
||||
default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "100Mi"
|
||||
min_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "250m"
|
||||
min_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "250Mi"
|
||||
timeouts:
|
||||
type: object
|
||||
properties:
|
||||
pod_label_wait_timeout:
|
||||
type: string
|
||||
default: "10m"
|
||||
pod_deletion_wait_timeout:
|
||||
type: string
|
||||
default: "10m"
|
||||
ready_wait_interval:
|
||||
type: string
|
||||
default: "4s"
|
||||
ready_wait_timeout:
|
||||
type: string
|
||||
default: "30s"
|
||||
resource_check_interval:
|
||||
type: string
|
||||
default: "3s"
|
||||
resource_check_timeout:
|
||||
type: string
|
||||
default: "10m"
|
||||
load_balancer:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -278,19 +331,25 @@ spec:
|
|||
type: string
|
||||
db_hosted_zone:
|
||||
type: string
|
||||
default: "db.example.com"
|
||||
enable_master_load_balancer:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_replica_load_balancer:
|
||||
type: boolean
|
||||
default: false
|
||||
external_traffic_policy:
|
||||
type: string
|
||||
enum:
|
||||
- "Cluster"
|
||||
- "Local"
|
||||
default: "Cluster"
|
||||
master_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}-repl.{team}.{hostedzone}"
|
||||
aws_or_gcp:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -298,12 +357,16 @@ spec:
|
|||
type: string
|
||||
additional_secret_mount_path:
|
||||
type: string
|
||||
default: "/meta/credentials"
|
||||
aws_region:
|
||||
type: string
|
||||
default: "eu-central-1"
|
||||
enable_ebs_gp3_migration:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_ebs_gp3_migration_max_size:
|
||||
type: integer
|
||||
default: 1000
|
||||
gcp_credentials:
|
||||
type: string
|
||||
kube_iam_role:
|
||||
|
|
@ -319,6 +382,15 @@ spec:
|
|||
properties:
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
type: string
|
||||
default: "logical-backup-"
|
||||
logical_backup_provider:
|
||||
type: string
|
||||
default: "s3"
|
||||
logical_backup_s3_access_key_id:
|
||||
type: string
|
||||
logical_backup_s3_bucket:
|
||||
|
|
@ -334,30 +406,40 @@ spec:
|
|||
logical_backup_schedule:
|
||||
type: string
|
||||
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
|
||||
default: "30 00 * * *"
|
||||
debug:
|
||||
type: object
|
||||
properties:
|
||||
debug_logging:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_database_access:
|
||||
type: boolean
|
||||
default: true
|
||||
teams_api:
|
||||
type: object
|
||||
properties:
|
||||
enable_admin_role_for_users:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_postgres_team_crd:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_postgres_team_crd_superusers:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_team_superuser:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_teams_api:
|
||||
type: boolean
|
||||
default: true
|
||||
pam_configuration:
|
||||
type: string
|
||||
default: "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"
|
||||
pam_role_name:
|
||||
type: string
|
||||
default: "zalandos"
|
||||
postgres_superuser_teams:
|
||||
type: array
|
||||
items:
|
||||
|
|
@ -366,23 +448,32 @@ spec:
|
|||
type: array
|
||||
items:
|
||||
type: string
|
||||
default:
|
||||
- admin
|
||||
team_admin_role:
|
||||
type: string
|
||||
default: "admin"
|
||||
team_api_role_configuration:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
default:
|
||||
log_statement: all
|
||||
teams_api_url:
|
||||
type: string
|
||||
default: "https://teams.example.com/api/"
|
||||
logging_rest_api:
|
||||
type: object
|
||||
properties:
|
||||
api_port:
|
||||
type: integer
|
||||
default: 8080
|
||||
cluster_history_entries:
|
||||
type: integer
|
||||
default: 1000
|
||||
ring_log_lines:
|
||||
type: integer
|
||||
default: 100
|
||||
scalyr: # deprecated
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -391,60 +482,65 @@ spec:
|
|||
scalyr_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "1"
|
||||
scalyr_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "100m"
|
||||
scalyr_image:
|
||||
type: string
|
||||
scalyr_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "500Mi"
|
||||
scalyr_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "50Mi"
|
||||
scalyr_server_url:
|
||||
type: string
|
||||
default: "https://upload.eu.scalyr.com"
|
||||
connection_pooler:
|
||||
type: object
|
||||
properties:
|
||||
connection_pooler_schema:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
default: "pooler"
|
||||
connection_pooler_user:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
#default: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-12"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
#default: 60
|
||||
default: 60
|
||||
connection_pooler_mode:
|
||||
type: string
|
||||
enum:
|
||||
- "session"
|
||||
- "transaction"
|
||||
#default: "transaction"
|
||||
default: "transaction"
|
||||
connection_pooler_number_of_instances:
|
||||
type: integer
|
||||
minimum: 2
|
||||
#default: 2
|
||||
minimum: 1
|
||||
default: 2
|
||||
connection_pooler_default_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "1"
|
||||
default: "1"
|
||||
connection_pooler_default_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "500m"
|
||||
default: "500m"
|
||||
connection_pooler_default_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
default: "100Mi"
|
||||
connection_pooler_default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
default: "100Mi"
|
||||
status:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
|
|||
|
|
@ -557,6 +557,8 @@ spec:
|
|||
required:
|
||||
- size
|
||||
properties:
|
||||
iops:
|
||||
type: integer
|
||||
size:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
|
|
@ -565,6 +567,8 @@ spec:
|
|||
type: string
|
||||
subPath:
|
||||
type: string
|
||||
throughput:
|
||||
type: integer
|
||||
status:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,31 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2020-12-17T16:16:25.639708821+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 2f5f527bae0a22b02f2f7b1e2352665cecf489a990e18212444fa34450b97604
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.6.0.tgz
|
||||
version: 1.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2020-06-04T17:06:49.41741489+02:00"
|
||||
created: "2020-12-17T16:16:25.637262877+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531
|
||||
|
|
@ -23,26 +45,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.5.0.tgz
|
||||
version: 1.5.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.4.0
|
||||
created: "2020-06-04T17:06:49.416001109+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: f8b90fecfc3cb825b94ed17edd9d5cefc36ae61801d4568597b4a79bcd73b2e9
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.4.0.tgz
|
||||
version: 1.4.0
|
||||
generated: "2020-06-04T17:06:49.414521538+02:00"
|
||||
generated: "2020-12-17T16:16:25.635647131+01:00"
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
|
|
@ -63,6 +63,7 @@ rules:
|
|||
- services
|
||||
verbs:
|
||||
- create
|
||||
{{- if toString .Values.configKubernetes.spilo_privileged | eq "true" }}
|
||||
# to run privileged pods
|
||||
- apiGroups:
|
||||
- extensions
|
||||
|
|
@ -72,4 +73,5 @@ rules:
|
|||
- privileged
|
||||
verbs:
|
||||
- use
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
|
|
|
|||
|
|
@ -44,13 +44,6 @@ rules:
|
|||
- get
|
||||
- patch
|
||||
- update
|
||||
# to read configuration from ConfigMaps
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
# to send events to the CRs
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
@ -64,14 +57,11 @@ rules:
|
|||
- update
|
||||
- watch
|
||||
# to manage endpoints/configmaps which are also used by Patroni
|
||||
{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }}
|
||||
- configmaps
|
||||
{{- else }}
|
||||
- endpoints
|
||||
{{- end }}
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
|
|
@ -81,6 +71,34 @@ rules:
|
|||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
{{- else }}
|
||||
# to read configuration from ConfigMaps
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- deletecollection
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
{{- end }}
|
||||
# to CRUD secrets for database access
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
@ -210,7 +228,8 @@ rules:
|
|||
verbs:
|
||||
- get
|
||||
- create
|
||||
# to grant privilege to run privileged pods
|
||||
{{- if toString .Values.configKubernetes.spilo_privileged | eq "true" }}
|
||||
# to run privileged pods
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
|
|
@ -219,4 +238,5 @@ rules:
|
|||
- privileged
|
||||
verbs:
|
||||
- use
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
|
|
|
|||
|
|
@ -54,6 +54,8 @@ spec:
|
|||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 10 }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext | indent 10 }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ toYaml .Values.imagePullSecrets | indent 8 }}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.5.0
|
||||
tag: v1.6.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -22,7 +22,7 @@ configGeneral:
|
|||
# update only the statefulsets without immediately doing the rolling update
|
||||
enable_lazy_spilo_upgrade: false
|
||||
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
|
||||
enable_pgversion_env_var: false
|
||||
enable_pgversion_env_var: true
|
||||
# start any new database pod without limitations on shm memory
|
||||
enable_shm_volume: true
|
||||
# enables backwards compatible path between Spilo 12 and Spilo 13 images
|
||||
|
|
@ -32,7 +32,7 @@ configGeneral:
|
|||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -59,6 +59,10 @@ configUsers:
|
|||
super_username: postgres
|
||||
|
||||
configKubernetes:
|
||||
# list of additional capabilities for postgres container
|
||||
# additional_pod_capabilities:
|
||||
# - "SYS_NICE"
|
||||
|
||||
# default DNS domain of K8s cluster where operator is running
|
||||
cluster_domain: cluster.local
|
||||
# additional labels assigned to the cluster objects
|
||||
|
|
@ -248,7 +252,14 @@ configAwsOrGcp:
|
|||
# configure K8s cron job managed by the operator
|
||||
configLogicalBackup:
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
# prefix for the backup job name
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
# storage provider - either "s3" or "gcs"
|
||||
logical_backup_provider: "s3"
|
||||
# S3 Access Key ID
|
||||
logical_backup_s3_access_key_id: ""
|
||||
# S3 bucket to store backup results
|
||||
|
|
@ -352,18 +363,24 @@ resources:
|
|||
cpu: 100m
|
||||
memory: 250Mi
|
||||
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# Affinity for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
affinity: {}
|
||||
|
||||
# Tolerations for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
tolerations: []
|
||||
|
||||
# Node labels for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
nodeSelector: {}
|
||||
|
||||
# Tolerations for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
tolerations: []
|
||||
|
||||
controllerID:
|
||||
# Specifies whether a controller ID should be defined for the operator
|
||||
# Note, all postgres manifest must then contain the following annotation to be found by this operator
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.5.0
|
||||
tag: v1.6.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -25,7 +25,7 @@ configGeneral:
|
|||
# update only the statefulsets without immediately doing the rolling update
|
||||
enable_lazy_spilo_upgrade: "false"
|
||||
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
|
||||
enable_pgversion_env_var: "false"
|
||||
enable_pgversion_env_var: "true"
|
||||
# start any new database pod without limitations on shm memory
|
||||
enable_shm_volume: "true"
|
||||
# enables backwards compatible path between Spilo 12 and Spilo 13 images
|
||||
|
|
@ -35,7 +35,7 @@ configGeneral:
|
|||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: "-1"
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -61,6 +61,9 @@ configUsers:
|
|||
super_username: postgres
|
||||
|
||||
configKubernetes:
|
||||
# list of additional capabilities for postgres container
|
||||
# additional_pod_capabilities: "SYS_NICE"
|
||||
|
||||
# default DNS domain of K8s cluster where operator is running
|
||||
cluster_domain: cluster.local
|
||||
# additional labels assigned to the cluster objects
|
||||
|
|
@ -239,15 +242,22 @@ configAwsOrGcp:
|
|||
# configure K8s cron job managed by the operator
|
||||
configLogicalBackup:
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
# prefix for the backup job name
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
# storage provider - either "s3" or "gcs"
|
||||
logical_backup_provider: "s3"
|
||||
# S3 Access Key ID
|
||||
logical_backup_s3_access_key_id: ""
|
||||
# S3 bucket to store backup results
|
||||
logical_backup_s3_bucket: "my-bucket-url"
|
||||
# S3 region of bucket
|
||||
logical_backup_s3_region: ""
|
||||
# S3 endpoint url when not using AWS
|
||||
logical_backup_s3_endpoint: ""
|
||||
# S3 region of bucket
|
||||
logical_backup_s3_region: ""
|
||||
# S3 Secret Access Key
|
||||
logical_backup_s3_secret_access_key: ""
|
||||
# S3 server side encryption
|
||||
|
|
@ -255,6 +265,7 @@ configLogicalBackup:
|
|||
# backup schedule in the cron format
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
|
||||
|
||||
# automate creation of human users with teams API service
|
||||
configTeamsApi:
|
||||
# team_admin_role will have the rights to grant roles coming from PG manifests
|
||||
|
|
@ -346,18 +357,24 @@ resources:
|
|||
cpu: 100m
|
||||
memory: 250Mi
|
||||
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# Affinity for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
affinity: {}
|
||||
|
||||
# Tolerations for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
tolerations: []
|
||||
|
||||
# Node labels for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
nodeSelector: {}
|
||||
|
||||
# Tolerations for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
tolerations: []
|
||||
|
||||
controllerID:
|
||||
# Specifies whether a controller ID should be defined for the operator
|
||||
# Note, all postgres manifest must then contain the following annotation to be found by this operator
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ pipeline:
|
|||
- desc: 'Install go'
|
||||
cmd: |
|
||||
cd /tmp
|
||||
wget -q https://storage.googleapis.com/golang/go1.15.5.linux-amd64.tar.gz -O go.tar.gz
|
||||
wget -q https://storage.googleapis.com/golang/go1.15.6.linux-amd64.tar.gz -O go.tar.gz
|
||||
tar -xf go.tar.gz
|
||||
mv go /usr/local
|
||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||
|
|
@ -80,3 +80,15 @@ pipeline:
|
|||
export IMAGE
|
||||
make docker
|
||||
make push
|
||||
|
||||
- id: build-logical-backup
|
||||
type: script
|
||||
|
||||
commands:
|
||||
- desc: Build image
|
||||
cmd: |
|
||||
cd docker/logical-backup
|
||||
export TAG=$(git describe --tags --always --dirty)
|
||||
IMAGE="registry-write.opensource.zalan.do/acid/logical-backup"
|
||||
docker build --rm -t "$IMAGE:$TAG$CDP_TAG" .
|
||||
docker push "$IMAGE:$TAG$CDP_TAG"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
FROM alpine
|
||||
MAINTAINER Team ACID @ Zalando <team-acid@zalando.de>
|
||||
FROM registry.opensource.zalan.do/library/alpine-3.12:latest
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
# We need root certificates to deal with teams api over https
|
||||
RUN apk --no-cache add ca-certificates go git musl-dev
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
FROM alpine
|
||||
MAINTAINER Team ACID @ Zalando <team-acid@zalando.de>
|
||||
FROM registry.opensource.zalan.do/library/alpine-3.12:latest
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
# We need root certificates to deal with teams api over https
|
||||
RUN apk --no-cache add curl
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM ubuntu:18.04
|
||||
FROM registry.opensource.zalan.do/library/ubuntu-18.04:latest
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
|
@ -15,6 +15,7 @@ RUN apt-get update \
|
|||
gnupg \
|
||||
gcc \
|
||||
libffi-dev \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install --no-cache-dir awscli --upgrade \
|
||||
&& pip3 install --no-cache-dir gsutil --upgrade \
|
||||
&& echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list \
|
||||
|
|
@ -22,6 +23,7 @@ RUN apt-get update \
|
|||
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
||||
&& apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
postgresql-client-13 \
|
||||
postgresql-client-12 \
|
||||
postgresql-client-11 \
|
||||
postgresql-client-10 \
|
||||
|
|
|
|||
|
|
@ -11,15 +11,29 @@ switchover (planned failover) of the master to the Pod with new minor version.
|
|||
The switch should usually take less than 5 seconds, still clients have to
|
||||
reconnect.
|
||||
|
||||
Major version upgrades are supported either via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)or in-place.
|
||||
Major version upgrades are supported either via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
||||
or in-place.
|
||||
|
||||
With cloning, the new cluster manifest must have a higher `version` string than the source
|
||||
cluster and will be created from a basebackup. Depending of the cluster size,
|
||||
downtime in this case can be significant as writes to the database should be
|
||||
stopped and all WAL files should be archived first before cloning is started.
|
||||
With cloning, the new cluster manifest must have a higher `version` string than
|
||||
the source cluster and will be created from a basebackup. Depending of the
|
||||
cluster size, downtime in this case can be significant as writes to the database
|
||||
should be stopped and all WAL files should be archived first before cloning is
|
||||
started.
|
||||
|
||||
Starting with Spilo 13, Postgres Operator can do in-place major version upgrade, which should be faster than cloning. To trigger the upgrade, simply increase the version in the cluster manifest. As the very last step of
|
||||
processing the manifest update event, the operator will call the `inplace_upgrade.py` script in Spilo. The upgrade is usually fast, well under one minute for most DBs. Note the changes become irrevertible once `pg_upgrade` is called. To understand the upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
|
||||
Starting with Spilo 13, Postgres Operator can do in-place major version upgrade,
|
||||
which should be faster than cloning. However, it is not fully automatic yet.
|
||||
First, you need to make sure, that setting the `PGVERSION` environment variable
|
||||
is enabled in the configuration. Since `v1.6.0`, `enable_pgversion_env_var` is
|
||||
enabled by default.
|
||||
|
||||
To trigger the upgrade, increase the version in the cluster manifest. After
|
||||
Pods are rotated `configure_spilo` will notice the version mismatch and start
|
||||
the old version again. You can then exec into the Postgres container of the
|
||||
master instance and call `python3 /scripts/inplace_upgrade.py N` where `N`
|
||||
is the number of members of your cluster (see [`numberOfInstances`](https://github.com/zalando/postgres-operator/blob/50cb5898ea715a1db7e634de928b2d16dc8cd969/manifests/minimal-postgres-manifest.yaml#L10)).
|
||||
The upgrade is usually fast, well under one minute for most DBs. Note, that
|
||||
changes become irrevertible once `pg_upgrade` is called. To understand the
|
||||
upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
|
||||
|
||||
## CRD Validation
|
||||
|
||||
|
|
|
|||
|
|
@ -235,6 +235,24 @@ Then you can for example check the Patroni logs:
|
|||
kubectl logs acid-minimal-cluster-0
|
||||
```
|
||||
|
||||
## Unit tests with Mocks and K8s Fake API
|
||||
|
||||
Whenever possible you should rely on leveraging proper mocks and K8s fake client that allows full fledged testing of K8s objects in your unit tests.
|
||||
|
||||
To enable mocks, a code annotation is needed:
|
||||
[Mock code gen annotation](https://github.com/zalando/postgres-operator/blob/master/pkg/util/volumes/volumes.go#L3)
|
||||
|
||||
To generate mocks run:
|
||||
```bash
|
||||
make mocks
|
||||
```
|
||||
|
||||
Examples for mocks can be found in:
|
||||
[Example mock usage](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/volumes_test.go#L248)
|
||||
|
||||
Examples for fake K8s objects can be found in:
|
||||
[Example fake K8s client usage](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/volumes_test.go#L166)
|
||||
|
||||
## End-to-end tests
|
||||
|
||||
The operator provides reference end-to-end (e2e) tests to
|
||||
|
|
@ -286,7 +304,7 @@ manifest files:
|
|||
|
||||
Postgres manifest parameters are defined in the [api package](../pkg/apis/acid.zalan.do/v1/postgresql_type.go).
|
||||
The operator behavior has to be implemented at least in [k8sres.go](../pkg/cluster/k8sres.go).
|
||||
Validation of CRD parameters is controlled in [crd.go](../pkg/apis/acid.zalan.do/v1/crds.go).
|
||||
Validation of CRD parameters is controlled in [crds.go](../pkg/apis/acid.zalan.do/v1/crds.go).
|
||||
Please, reflect your changes in tests, for example in:
|
||||
* [config_test.go](../pkg/util/config/config_test.go)
|
||||
* [k8sres_test.go](../pkg/cluster/k8sres_test.go)
|
||||
|
|
|
|||
|
|
@ -1,63 +0,0 @@
|
|||
<h1>Google Summer of Code 2019</h1>
|
||||
|
||||
## Applications steps
|
||||
|
||||
1. Please carefully read the official [Google Summer of Code Student Guide](https://google.github.io/gsocguides/student/)
|
||||
2. Join the #postgres-operator slack channel under [Postgres Slack](https://postgres-slack.herokuapp.com) to introduce yourself to the community and get quick feedback on your application.
|
||||
3. Select a project from the list of ideas below or propose your own.
|
||||
4. Write a proposal draft. Please open an issue with the label `gsoc2019_application` in the [operator repository](https://github.com/zalando/postgres-operator/issues) so that the community members can publicly review it. See proposal instructions below for details.
|
||||
5. Submit proposal and the proof of enrollment before April 9 2019 18:00 UTC through the web site of the Program.
|
||||
|
||||
## Project ideas
|
||||
|
||||
|
||||
### Place database pods into the "Guaranteed" Quality-of-Service class
|
||||
|
||||
* **Description**: Kubernetes runtime does not kill pods in this class on condition they stay within their resource limits, which is desirable for the DB pods serving production workloads. To be assigned to that class, pod's resources must equal its limits. The task is to add the `enableGuaranteedQoSClass` or the like option to the Postgres manifest and the operator configmap that forcibly re-write pod resources to match the limits.
|
||||
* **Recommended skills**: golang, basic Kubernetes abstractions
|
||||
* **Difficulty**: moderate
|
||||
* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
|
||||
|
||||
### Implement the kubectl plugin for the Postgres CustomResourceDefinition
|
||||
|
||||
* **Description**: [kubectl plugins](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/) enable extending the Kubernetes command-line client `kubectl` with commands to manage custom resources. The task is to design and implement a plugin for the `kubectl postgres` command,
|
||||
that can enable, for example, correct deletion or major version upgrade of Postgres clusters.
|
||||
* **Recommended skills**: golang, shell scripting, operational experience with Kubernetes
|
||||
* **Difficulty**: moderate to medium, depending on the plugin design
|
||||
* **Mentor(s)**: Felix Kunde [@FxKu](https://github.com/fxku), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
|
||||
|
||||
### Implement the openAPIV3Schema for the Postgres CRD
|
||||
|
||||
* **Description**: at present the operator validates a database manifest on its own.
|
||||
It will be helpful to reject erroneous manifests before they reach the operator using the [native Kubernetes CRD validation](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#validation). It is up to the student to decide whether to write the schema manually or to adopt existing [schema generator developed for the Prometheus project](https://github.com/ant31/crd-validation).
|
||||
* **Recommended skills**: golang, JSON schema
|
||||
* **Difficulty**: medium
|
||||
* **Mentor(s)**: Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
|
||||
* **Issue**: [#388](https://github.com/zalando/postgres-operator/issues/388)
|
||||
|
||||
### Design a solution for the local testing of the operator
|
||||
|
||||
* **Description**: The current way of testing is to run minikube, either manually or with some tooling around it like `/run-operator_locally.sh` or Vagrant. This has at least three problems:
|
||||
First, minikube is a single node cluster, so it is unsuitable for testing vital functions such as pod migration between nodes. Second, minikube starts slowly; that prolongs local testing.
|
||||
Third, every contributor needs to come up with their own solution for local testing. The task is to come up with a better option which will enable us to conveniently and uniformly run e2e tests locally / potentially in Travis CI.
|
||||
A promising option is the Kubernetes own [kind](https://github.com/kubernetes-sigs/kind)
|
||||
* **Recommended skills**: Docker, shell scripting, basic Kubernetes abstractions
|
||||
* **Difficulty**: medium to hard depending on the selected desing
|
||||
* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
|
||||
* **Issue**: [#475](https://github.com/zalando/postgres-operator/issues/475)
|
||||
|
||||
### Detach a Postgres cluster from the operator for maintenance
|
||||
|
||||
* **Description**: sometimes a Postgres cluster requires manual maintenance. During such maintenance the operator should ignore all the changes manually applied to the cluster.
|
||||
Currently the only way to achieve this behavior is to shutdown the operator altogether, for instance by scaling down the operator's own deployment to zero pods. That approach evidently affects all Postgres databases under the operator control and thus is highly undesirable in production Kubernetes clusters. It would be much better to be able to detach only the desired Postgres cluster from the operator for the time being and re-attach it again after maintenance.
|
||||
* **Recommended skills**: golang, architecture of a Kubernetes operator
|
||||
* **Difficulty**: hard - requires significant modification of the operator's internals and careful consideration of the corner cases.
|
||||
* **Mentor(s)**: Dmitry Dolgov [@erthalion](https://github.com/erthalion), Sergey Dudoladov [@sdudoladov](https://github.com/sdudoladov)
|
||||
* **Issue**: [#421](https://github.com/zalando/postgres-operator/issues/421)
|
||||
|
||||
### Propose your own idea
|
||||
|
||||
Feel free to come up with your own ideas. For inspiration,
|
||||
see [our bug tracker](https://github.com/zalando/postgres-operator/issues),
|
||||
the [official `CustomResouceDefinition` docs](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)
|
||||
and [other operators](https://github.com/operator-framework/awesome-operators).
|
||||
|
|
@ -34,8 +34,8 @@ Postgres cluster. This can work in two ways: via a ConfigMap or a custom
|
|||
The Postgres Operator can be deployed in the following ways:
|
||||
|
||||
* Manual deployment
|
||||
* Kustomization
|
||||
* Helm chart
|
||||
* Operator Lifecycle Manager (OLM)
|
||||
|
||||
### Manual deployment setup
|
||||
|
||||
|
|
@ -91,20 +91,6 @@ The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will
|
|||
be skipped with warning when using v3. Documentation for installing applications
|
||||
with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/).
|
||||
|
||||
### Operator Lifecycle Manager (OLM)
|
||||
|
||||
The [Operator Lifecycle Manager (OLM)](https://github.com/operator-framework/operator-lifecycle-manager)
|
||||
has been designed to facilitate management of K8s operators. It has to be
|
||||
installed in your K8s environment. When OLM is set up simply download and deploy
|
||||
the Postgres Operator with the following command:
|
||||
|
||||
```bash
|
||||
kubectl create -f https://operatorhub.io/install/postgres-operator.yaml
|
||||
```
|
||||
|
||||
This installs the operator in the `operators` namespace. More information can be
|
||||
found on [operatorhub.io](https://operatorhub.io/operator/postgres-operator).
|
||||
|
||||
## Check if Postgres Operator is running
|
||||
|
||||
Starting the operator may take a few seconds. Check if the operator pod is
|
||||
|
|
@ -142,6 +128,9 @@ To deploy the UI simply apply all its manifests files or use the UI helm chart:
|
|||
# manual deployment
|
||||
kubectl apply -f ui/manifests/
|
||||
|
||||
# or kustomization
|
||||
kubectl apply -k github.com/zalando/postgres-operator/ui/manifests
|
||||
|
||||
# or helm chart
|
||||
helm install postgres-operator-ui ./charts/postgres-operator-ui
|
||||
```
|
||||
|
|
|
|||
|
|
@ -338,13 +338,13 @@ archive is supported.
|
|||
the url to S3 bucket containing the WAL archive of the remote primary.
|
||||
Required when the `standby` section is present.
|
||||
|
||||
## EBS volume resizing
|
||||
## Volume properties
|
||||
|
||||
Those parameters are grouped under the `volume` top-level key and define the
|
||||
properties of the persistent storage that stores Postgres data.
|
||||
|
||||
* **size**
|
||||
the size of the target EBS volume. Usual Kubernetes size modifiers, i.e. `Gi`
|
||||
the size of the target volume. Usual Kubernetes size modifiers, i.e. `Gi`
|
||||
or `Mi`, apply. Required.
|
||||
|
||||
* **storageClass**
|
||||
|
|
@ -356,6 +356,14 @@ properties of the persistent storage that stores Postgres data.
|
|||
* **subPath**
|
||||
Subpath to use when mounting volume into Spilo container. Optional.
|
||||
|
||||
* **iops**
|
||||
When running the operator on AWS the latest generation of EBS volumes (`gp3`)
|
||||
allows for configuring the number of IOPS. Maximum is 16000. Optional.
|
||||
|
||||
* **throughput**
|
||||
When running the operator on AWS the latest generation of EBS volumes (`gp3`)
|
||||
allows for configuring the throughput in MB/s. Maximum is 1000. Optional.
|
||||
|
||||
## Sidecar definitions
|
||||
|
||||
Those parameters are defined under the `sidecars` key. They consist of a list
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
The default is `false`.
|
||||
|
||||
* **enable_pgversion_env_var**
|
||||
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `false`.
|
||||
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`.
|
||||
|
||||
* **enable_spilo_wal_path_compat**
|
||||
enables backwards compatible path between Spilo 12 and Spilo 13 images. The default is `false`.
|
||||
|
|
@ -126,7 +126,7 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
|
||||
* **workers**
|
||||
number of working routines the operator spawns to process requests to
|
||||
create/update/delete/sync clusters concurrently. The default is `4`.
|
||||
create/update/delete/sync clusters concurrently. The default is `8`.
|
||||
|
||||
* **max_instances**
|
||||
operator will cap the number of instances in any managed Postgres cluster up
|
||||
|
|
@ -351,6 +351,12 @@ configuration they are grouped under the `kubernetes` key.
|
|||
used for AWS volume resizing and not required if you don't need that
|
||||
capability. The default is `false`.
|
||||
|
||||
* **additional_pod_capabilities**
|
||||
list of additional capabilities to be added to the postgres container's
|
||||
SecurityContext (e.g. SYS_NICE etc.). Please, make sure first that the
|
||||
PodSecruityPolicy allows the capabilities listed here. Otherwise, the
|
||||
container will not start. The default is empty.
|
||||
|
||||
* **master_pod_move_timeout**
|
||||
The period of time to wait for the success of migration of master pods from
|
||||
an unschedulable node. The migration includes Patroni switchovers to
|
||||
|
|
@ -373,10 +379,13 @@ configuration they are grouped under the `kubernetes` key.
|
|||
possible value is `parallel`.
|
||||
|
||||
* **storage_resize_mode**
|
||||
defines how operator handels the difference between requested volume size and
|
||||
actual size. Available options are: ebs - tries to resize EBS volume, pvc -
|
||||
changes PVC definition, off - disables resize of the volumes. Default is "ebs".
|
||||
When using OpenShift please use one of the other available options.
|
||||
defines how operator handles the difference between the requested volume size and
|
||||
the actual size. Available options are:
|
||||
1. `ebs` : operator resizes EBS volumes directly and executes `resizefs` within a pod
|
||||
2. `pvc` : operator only changes PVC definition
|
||||
3. `off` : disables resize of the volumes.
|
||||
4. `mixed` :operator uses AWS API to adjust size, throughput, and IOPS, and calls pvc change for file system resize
|
||||
Default is "pvc".
|
||||
|
||||
## Kubernetes resource requests
|
||||
|
||||
|
|
@ -551,44 +560,47 @@ These parameters configure a K8s cron job managed by the operator to produce
|
|||
Postgres logical backups. In the CRD-based configuration those parameters are
|
||||
grouped under the `logical_backup` key.
|
||||
|
||||
* **logical_backup_schedule**
|
||||
Backup schedule in the cron format. Please take the
|
||||
[reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule)
|
||||
into account. Default: "30 00 \* \* \*"
|
||||
|
||||
* **logical_backup_docker_image**
|
||||
An image for pods of the logical backup job. The [example image](../../docker/logical-backup/Dockerfile)
|
||||
runs `pg_dumpall` on a replica if possible and uploads compressed results to
|
||||
an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`.
|
||||
The default image is the same image built with the Zalando-internal CI
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup"
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
|
||||
|
||||
* **logical_backup_google_application_credentials**
|
||||
Specifies the path of the google cloud service account json file. Default is empty.
|
||||
|
||||
* **logical_backup_job_prefix**
|
||||
The prefix to be prepended to the name of a k8s CronJob running the backups. Beware the prefix counts towards the name length restrictions imposed by k8s. Empty string is a legitimate value. Operator does not do the actual renaming: It simply creates the job with the new prefix. You will have to delete the old cron job manually. Default: "logical-backup-".
|
||||
|
||||
* **logical_backup_provider**
|
||||
Specifies the storage provider to which the backup should be uploaded (`s3` or `gcs`).
|
||||
Default: "s3"
|
||||
|
||||
* **logical_backup_s3_access_key_id**
|
||||
When set, value will be in AWS_ACCESS_KEY_ID env variable. The Default is empty.
|
||||
|
||||
* **logical_backup_s3_bucket**
|
||||
S3 bucket to store backup results. The bucket has to be present and
|
||||
accessible by Postgres pods. Default: empty.
|
||||
|
||||
* **logical_backup_s3_endpoint**
|
||||
When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty.
|
||||
|
||||
* **logical_backup_s3_region**
|
||||
Specifies the region of the bucket which is required with some non-AWS S3 storage services. The default is empty.
|
||||
|
||||
* **logical_backup_s3_endpoint**
|
||||
When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty.
|
||||
* **logical_backup_s3_secret_access_key**
|
||||
When set, value will be in AWS_SECRET_ACCESS_KEY env variable. The Default is empty.
|
||||
|
||||
* **logical_backup_s3_sse**
|
||||
Specify server side encryption that S3 storage is using. If empty string
|
||||
is specified, no argument will be passed to `aws s3` command. Default: "AES256".
|
||||
|
||||
* **logical_backup_s3_access_key_id**
|
||||
When set, value will be in AWS_ACCESS_KEY_ID env variable. The Default is empty.
|
||||
|
||||
* **logical_backup_s3_secret_access_key**
|
||||
When set, value will be in AWS_SECRET_ACCESS_KEY env variable. The Default is empty.
|
||||
|
||||
* **logical_backup_google_application_credentials**
|
||||
Specifies the path of the google cloud service account json file. Default is empty.
|
||||
* **logical_backup_schedule**
|
||||
Backup schedule in the cron format. Please take the
|
||||
[reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule)
|
||||
into account. Default: "30 00 \* \* \*"
|
||||
|
||||
## Debugging the operator
|
||||
|
||||
|
|
|
|||
166
docs/user.md
166
docs/user.md
|
|
@ -71,26 +71,26 @@ kubectl describe postgresql acid-minimal-cluster
|
|||
## Connect to PostgreSQL
|
||||
|
||||
With a `port-forward` on one of the database pods (e.g. the master) you can
|
||||
connect to the PostgreSQL database. Use labels to filter for the master pod of
|
||||
our test cluster.
|
||||
connect to the PostgreSQL database from your machine. Use labels to filter for
|
||||
the master pod of our test cluster.
|
||||
|
||||
```bash
|
||||
# get name of master pod of acid-minimal-cluster
|
||||
export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master)
|
||||
export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master -n default)
|
||||
|
||||
# set up port forward
|
||||
kubectl port-forward $PGMASTER 6432:5432
|
||||
kubectl port-forward $PGMASTER 6432:5432 -n default
|
||||
```
|
||||
|
||||
Open another CLI and connect to the database. Use the generated secret of the
|
||||
`postgres` robot user to connect to our `acid-minimal-cluster` master running
|
||||
in Minikube. As non-encrypted connections are rejected by default set the SSL
|
||||
mode to require:
|
||||
Open another CLI and connect to the database using e.g. the psql client.
|
||||
When connecting with the `postgres` user read its password from the K8s secret
|
||||
which was generated when creating the `acid-minimal-cluster`. As non-encrypted
|
||||
connections are rejected by default set the SSL mode to `require`:
|
||||
|
||||
```bash
|
||||
export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d)
|
||||
export PGSSLMODE=require
|
||||
psql -U postgres -p 6432
|
||||
psql -U postgres -h localhost -p 6432
|
||||
```
|
||||
|
||||
## Defining database roles in the operator
|
||||
|
|
@ -275,9 +275,18 @@ Postgres clusters are associated with one team by providing the `teamID` in
|
|||
the manifest. Additional superuser teams can be configured as mentioned in
|
||||
the previous paragraph. However, this is a global setting. To assign
|
||||
additional teams, superuser teams and single users to clusters of a given
|
||||
team, use the [PostgresTeam CRD](../manifests/postgresteam.yaml). It provides
|
||||
a simple mapping structure.
|
||||
team, use the [PostgresTeam CRD](../manifests/postgresteam.yaml).
|
||||
|
||||
Note, by default the `PostgresTeam` support is disabled in the configuration.
|
||||
Switch `enable_postgres_team_crd` flag to `true` and the operator will start to
|
||||
watch for this CRD. Make sure, the cluster role is up to date and contains a
|
||||
section for [PostgresTeam](../manifests/operator-service-account-rbac.yaml#L30).
|
||||
|
||||
#### Additional teams
|
||||
|
||||
To assign additional teams and single users to clusters of a given team,
|
||||
define a mapping with the `PostgresTeam` Kubernetes resource. The Postgres
|
||||
Operator will read such team mappings each time it syncs all Postgres clusters.
|
||||
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
|
|
@ -285,55 +294,118 @@ kind: PostgresTeam
|
|||
metadata:
|
||||
name: custom-team-membership
|
||||
spec:
|
||||
additionalSuperuserTeams:
|
||||
acid:
|
||||
- "postgres_superusers"
|
||||
additionalTeams:
|
||||
acid: []
|
||||
additionalMembers:
|
||||
acid:
|
||||
- "elephant"
|
||||
a-team:
|
||||
- "b-team"
|
||||
```
|
||||
|
||||
One `PostgresTeam` resource could contain mappings of multiple teams but you
|
||||
can choose to create separate CRDs, alternatively. On each CRD creation or
|
||||
update the operator will gather all mappings to create additional human users
|
||||
in databases the next time they are synced. Additional teams are resolved
|
||||
transitively, meaning you will also add users for their `additionalTeams`
|
||||
or (not and) `additionalSuperuserTeams`.
|
||||
With the example above the operator will create login roles for all members
|
||||
of `b-team` in every cluster owned by `a-team`. It's possible to do vice versa
|
||||
for clusters of `b-team` in one manifest:
|
||||
|
||||
For each additional team the Teams API would be queried. Additional members
|
||||
will be added either way. There can be "virtual teams" that do not exists in
|
||||
your Teams API but users of associated teams as well as members will get
|
||||
created. With `PostgresTeams` it's also easy to cover team name changes. Just
|
||||
add the mapping between old and new team name and the rest can stay the same.
|
||||
```yaml
|
||||
spec:
|
||||
additionalTeams:
|
||||
a-team:
|
||||
- "b-team"
|
||||
b-team:
|
||||
- "a-team"
|
||||
```
|
||||
|
||||
You see, the `PostgresTeam` CRD is a global team mapping and independent from
|
||||
the Postgres manifests. It is possible to define multiple mappings, even with
|
||||
redundant content - the Postgres operator will create one internal cache from
|
||||
it. Additional teams are resolved transitively, meaning you will also add
|
||||
users for their `additionalTeams`, e.g.:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
additionalTeams:
|
||||
a-team:
|
||||
- "b-team"
|
||||
- "c-team"
|
||||
b-team:
|
||||
- "a-team"
|
||||
```
|
||||
|
||||
This creates roles for members of the `c-team` team not only in all clusters
|
||||
owned by `a-team`, but as well in cluster owned by `b-team`, as `a-team` is
|
||||
an `additionalTeam` to `b-team`
|
||||
|
||||
Not, you can also define `additionalSuperuserTeams` in the `PostgresTeam`
|
||||
manifest. By default, this option is disabled and must be configured with
|
||||
`enable_postgres_team_crd_superusers` to make it work.
|
||||
|
||||
#### Virtual teams
|
||||
|
||||
There can be "virtual teams" that do not exist in the Teams API. It can make
|
||||
it easier to map a group of teams to many other teams:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
additionalTeams:
|
||||
a-team:
|
||||
- "virtual-team"
|
||||
b-team:
|
||||
- "virtual-team"
|
||||
virtual-team:
|
||||
- "c-team"
|
||||
- "d-team"
|
||||
```
|
||||
|
||||
This example would create roles for members of `c-team` and `d-team` plus
|
||||
additional `virtual-team` members in clusters owned by `a-team` or `b-team`.
|
||||
|
||||
#### Teams changing their names
|
||||
|
||||
With `PostgresTeams` it is also easy to cover team name changes. Just add
|
||||
the mapping between old and new team name and the rest can stay the same.
|
||||
E.g. if team `a-team`'s name would change to `f-team` in the teams API it
|
||||
could be reflected in a `PostgresTeam` mapping with just two lines:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
additionalTeams:
|
||||
a-team:
|
||||
- "f-team"
|
||||
```
|
||||
|
||||
This is helpful, because Postgres cluster names are immutable and can not
|
||||
be changed. Only via cloning it could get a different name starting with the
|
||||
new `teamID`.
|
||||
|
||||
#### Additional members
|
||||
|
||||
Single members might be excluded from teams although they continue to work
|
||||
with the same people. However, the teams API would not reflect this anymore.
|
||||
To still add a database role for former team members list their role under
|
||||
the `additionalMembers` section of the `PostgresTeam` resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: PostgresTeam
|
||||
metadata:
|
||||
name: virtualteam-membership
|
||||
name: custom-team-membership
|
||||
spec:
|
||||
additionalSuperuserTeams:
|
||||
acid:
|
||||
- "virtual_superusers"
|
||||
virtual_superusers:
|
||||
- "real_teamA"
|
||||
- "real_teamB"
|
||||
real_teamA:
|
||||
- "real_teamA_renamed"
|
||||
additionalTeams:
|
||||
real_teamA:
|
||||
- "real_teamA_renamed"
|
||||
additionalMembers:
|
||||
virtual_superusers:
|
||||
- "foo"
|
||||
a-team:
|
||||
- "tia"
|
||||
```
|
||||
|
||||
Note, by default the `PostgresTeam` support is disabled in the configuration.
|
||||
Switch `enable_postgres_team_crd` flag to `true` and the operator will start to
|
||||
watch for this CRD. Make sure, the cluster role is up to date and contains a
|
||||
section for [PostgresTeam](../manifests/operator-service-account-rbac.yaml#L30).
|
||||
This will create the login role `tia` in every cluster owned by `a-team`.
|
||||
The user can connect to databases like the other team members.
|
||||
|
||||
The `additionalMembers` map can also be used to define users of virtual
|
||||
teams, e.g. for `virtual-team` we used above:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
additionalMembers:
|
||||
virtual-team:
|
||||
- "flynch"
|
||||
- "rdecker"
|
||||
- "briggs"
|
||||
```
|
||||
|
||||
## Prepared databases with roles and default privileges
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ To run the end 2 end test and keep the kind state execute:
|
|||
NOCLEANUP=True ./run.sh main
|
||||
```
|
||||
|
||||
## Run indidual test
|
||||
## Run individual test
|
||||
|
||||
After having executed a normal E2E run with `NOCLEANUP=True` Kind still continues to run, allowing you subsequent test runs.
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ IFS=$'\n\t'
|
|||
|
||||
readonly cluster_name="postgres-operator-e2e-tests"
|
||||
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
|
||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-13-e2e:0.3"
|
||||
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3"
|
||||
|
||||
export GOPATH=${GOPATH-~/go}
|
||||
|
|
|
|||
|
|
@ -182,6 +182,10 @@ class K8s:
|
|||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||
|
||||
def count_pods_with_container_capabilities(self, capabilities, labels, namespace='default'):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: x.spec.containers[0].security_context.capabilities.add == capabilities, pods)))
|
||||
|
||||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||
pod_phase = 'Failing over'
|
||||
new_pod_node = ''
|
||||
|
|
@ -433,6 +437,10 @@ class K8sBase:
|
|||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||
|
||||
def count_pods_with_container_capabilities(self, capabilities, labels, namespace='default'):
|
||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||
return len(list(filter(lambda x: x.spec.containers[0].security_context.capabilities.add == capabilities, pods)))
|
||||
|
||||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||
pod_phase = 'Failing over'
|
||||
new_pod_node = ''
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ from kubernetes import client
|
|||
from tests.k8s_api import K8s
|
||||
from kubernetes.client.rest import ApiException
|
||||
|
||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
|
||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114"
|
||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-13-e2e:0.3"
|
||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-13-e2e:0.4"
|
||||
|
||||
|
||||
def to_selector(labels):
|
||||
|
|
@ -112,6 +112,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
with open("manifests/configmap.yaml", 'r+') as f:
|
||||
configmap = yaml.safe_load(f)
|
||||
configmap["data"]["workers"] = "1"
|
||||
configmap["data"]["docker_image"] = SPILO_CURRENT
|
||||
|
||||
with open("manifests/configmap.yaml", 'w') as f:
|
||||
yaml.dump(configmap, f, Dumper=yaml.Dumper)
|
||||
|
|
@ -121,7 +122,6 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"operatorconfiguration.crd.yaml",
|
||||
"postgresteam.crd.yaml",
|
||||
"configmap.yaml",
|
||||
"postgresql-operator-default-configuration.yaml",
|
||||
"postgres-operator.yaml",
|
||||
"api-service.yaml",
|
||||
"infrastructure-roles.yaml",
|
||||
|
|
@ -155,12 +155,31 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_additional_pod_capabilities(self):
|
||||
'''
|
||||
Extend postgres container capabilities
|
||||
'''
|
||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||
capabilities = ["SYS_NICE","CHOWN"]
|
||||
patch_capabilities = {
|
||||
"data": {
|
||||
"additional_pod_capabilities": ','.join(capabilities),
|
||||
},
|
||||
}
|
||||
self.k8s.update_config(patch_capabilities)
|
||||
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
|
||||
self.eventuallyEqual(lambda: self.k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
|
||||
2, "Container capabilities not updated")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_overwrite_pooler_deployment(self):
|
||||
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")
|
||||
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1,
|
||||
"Initial broken deplyment not rolled out")
|
||||
"Initial broken deployment not rolled out")
|
||||
|
||||
self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
|
|
@ -221,6 +240,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: k8s.count_services_with_label(
|
||||
'application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
2, "No pooler service found")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
1, "Pooler secret not created")
|
||||
|
||||
# Turn off only master connection pooler
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -246,6 +267,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: k8s.count_services_with_label(
|
||||
'application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
1, "No pooler service found")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
1, "Secret not created")
|
||||
|
||||
# Turn off only replica connection pooler
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -268,6 +291,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
0, "Pooler replica pods not deleted")
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
1, "No pooler service found")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
1, "Secret not created")
|
||||
|
||||
# scale up connection pooler deployment
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -301,6 +326,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
0, "Pooler pods not scaled down")
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
0, "Pooler service not removed")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=spilo,cluster-name=acid-minimal-cluster'),
|
||||
4, "Secrets not deleted")
|
||||
|
||||
# Verify that all the databases have pooler schema installed.
|
||||
# Do this via psql, since otherwise we need to deal with
|
||||
|
|
@ -1034,7 +1061,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_zzzz_cluster_deletion(self):
|
||||
'''
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -3,7 +3,7 @@ module github.com/zalando/postgres-operator
|
|||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.36.3
|
||||
github.com/aws/aws-sdk-go v1.36.29
|
||||
github.com/golang/mock v1.4.4
|
||||
github.com/lib/pq v1.9.0
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||
|
|
|
|||
4
go.sum
4
go.sum
|
|
@ -45,8 +45,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
|
|||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.36.3 h1:KYpG5OegwW3xgOsMxy01nj/Td281yxi1Ha2lJQJs4tI=
|
||||
github.com/aws/aws-sdk-go v1.36.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.36.29 h1:lM1G3AF1+7vzFm0n7hfH8r2+750BTo+6Lo6FtPB7kzk=
|
||||
github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||
spec:
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
|
|
@ -36,7 +36,7 @@ spec:
|
|||
defaultRoles: true
|
||||
defaultUsers: false
|
||||
postgresql:
|
||||
version: "12"
|
||||
version: "13"
|
||||
parameters: # Expert section
|
||||
shared_buffers: "32MB"
|
||||
max_connections: "10"
|
||||
|
|
@ -44,6 +44,8 @@ spec:
|
|||
volume:
|
||||
size: 1Gi
|
||||
# storageClass: my-sc
|
||||
# iops: 1000 # for EBS gp3
|
||||
# throughput: 250 # in MB/s for EBS gp3
|
||||
additionalVolumes:
|
||||
- name: empty
|
||||
mountPath: /opt/empty
|
||||
|
|
@ -93,9 +95,9 @@ spec:
|
|||
encoding: "UTF8"
|
||||
locale: "en_US.UTF-8"
|
||||
data-checksums: "true"
|
||||
pg_hba:
|
||||
- hostssl all all 0.0.0.0/0 md5
|
||||
- host all all 0.0.0.0/0 md5
|
||||
# pg_hba:
|
||||
# - hostssl all all 0.0.0.0/0 md5
|
||||
# - host all all 0.0.0.0/0 md5
|
||||
# slots:
|
||||
# permanent_physical_1:
|
||||
# type: physical
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ kind: ConfigMap
|
|||
metadata:
|
||||
name: postgres-operator
|
||||
data:
|
||||
# additional_pod_capabilities: "SYS_NICE"
|
||||
# additional_secret_mount: "some-secret-name"
|
||||
# additional_secret_mount_path: "/some/dir"
|
||||
api_port: "8080"
|
||||
|
|
@ -31,7 +32,7 @@ data:
|
|||
# default_memory_request: 100Mi
|
||||
# delete_annotation_date_key: delete-date
|
||||
# delete_annotation_name_key: delete-clustername
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p5
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
|
||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
# enable_admin_role_for_users: "true"
|
||||
# enable_crd_validation: "true"
|
||||
|
|
@ -41,16 +42,15 @@ data:
|
|||
# enable_init_containers: "true"
|
||||
# enable_lazy_spilo_upgrade: "false"
|
||||
enable_master_load_balancer: "false"
|
||||
# enable_pgversion_env_var: "false"
|
||||
enable_pgversion_env_var: "true"
|
||||
# enable_pod_antiaffinity: "false"
|
||||
# enable_pod_disruption_budget: "true"
|
||||
# enable_postgres_team_crd: "false"
|
||||
# enable_postgres_team_crd_superusers: "false"
|
||||
enable_replica_load_balancer: "false"
|
||||
# enable_shm_volume: "true"
|
||||
# enable_pgversion_env_var: "false"
|
||||
# enable_sidecars: "true"
|
||||
enable_spilo_wal_path_compat: "false"
|
||||
enable_spilo_wal_path_compat: "true"
|
||||
# enable_team_superuser: "false"
|
||||
enable_teams_api: "false"
|
||||
# etcd_host: ""
|
||||
|
|
@ -63,7 +63,10 @@ data:
|
|||
# inherited_labels: application,environment
|
||||
# kube_iam_role: ""
|
||||
# log_s3_bucket: ""
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
# logical_backup_s3_access_key_id: ""
|
||||
logical_backup_s3_bucket: "my-bucket-url"
|
||||
# logical_backup_s3_region: ""
|
||||
|
|
@ -122,4 +125,4 @@ data:
|
|||
# wal_gs_bucket: ""
|
||||
# wal_s3_bucket: ""
|
||||
watched_namespace: "*" # listen to all namespaces
|
||||
workers: "8"
|
||||
workers: "16"
|
||||
|
|
|
|||
|
|
@ -18,4 +18,4 @@ spec:
|
|||
preparedDatabases:
|
||||
bar: {}
|
||||
postgresql:
|
||||
version: "12"
|
||||
version: "13"
|
||||
|
|
|
|||
|
|
@ -203,15 +203,15 @@ rules:
|
|||
verbs:
|
||||
- get
|
||||
- create
|
||||
# to grant privilege to run privileged pods
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- privileged
|
||||
verbs:
|
||||
- use
|
||||
# to grant privilege to run privileged pods (not needed by default)
|
||||
#- apiGroups:
|
||||
# - extensions
|
||||
# resources:
|
||||
# - podsecuritypolicies
|
||||
# resourceNames:
|
||||
# - privileged
|
||||
# verbs:
|
||||
# - use
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
|
@ -265,12 +265,12 @@ rules:
|
|||
- services
|
||||
verbs:
|
||||
- create
|
||||
# to run privileged pods
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- privileged
|
||||
verbs:
|
||||
- use
|
||||
# to grant privilege to run privileged pods (not needed by default)
|
||||
#- apiGroups:
|
||||
# - extensions
|
||||
# resources:
|
||||
# - podsecuritypolicies
|
||||
# resourceNames:
|
||||
# - privileged
|
||||
# verbs:
|
||||
# - use
|
||||
|
|
|
|||
|
|
@ -61,32 +61,45 @@ spec:
|
|||
properties:
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p2"
|
||||
enable_crd_validation:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_lazy_spilo_upgrade:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_pgversion_env_var:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_shm_volume:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_spilo_wal_path_compat:
|
||||
type: boolean
|
||||
default: false
|
||||
etcd_host:
|
||||
type: string
|
||||
default: ""
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
default: false
|
||||
max_instances:
|
||||
type: integer
|
||||
minimum: -1 # -1 = disabled
|
||||
default: -1
|
||||
min_instances:
|
||||
type: integer
|
||||
minimum: -1 # -1 = disabled
|
||||
default: -1
|
||||
resync_period:
|
||||
type: string
|
||||
default: "30m"
|
||||
repair_period:
|
||||
type: string
|
||||
default: "5m"
|
||||
set_memory_request_to_limit:
|
||||
type: boolean
|
||||
default: false
|
||||
sidecar_docker_images:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -100,24 +113,35 @@ spec:
|
|||
workers:
|
||||
type: integer
|
||||
minimum: 1
|
||||
default: 8
|
||||
users:
|
||||
type: object
|
||||
properties:
|
||||
replication_username:
|
||||
type: string
|
||||
default: standby
|
||||
super_username:
|
||||
type: string
|
||||
default: postgres
|
||||
kubernetes:
|
||||
type: object
|
||||
properties:
|
||||
additional_pod_capabilities:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
cluster_domain:
|
||||
type: string
|
||||
default: "cluster.local"
|
||||
cluster_labels:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
default:
|
||||
application: spilo
|
||||
cluster_name_label:
|
||||
type: string
|
||||
default: "cluster-name"
|
||||
custom_pod_annotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -132,12 +156,16 @@ spec:
|
|||
type: string
|
||||
enable_init_containers:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_pod_antiaffinity:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_pod_disruption_budget:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_sidecars:
|
||||
type: boolean
|
||||
default: true
|
||||
infrastructure_roles_secret_name:
|
||||
type: string
|
||||
infrastructure_roles_secrets:
|
||||
|
|
@ -176,16 +204,20 @@ spec:
|
|||
type: string
|
||||
master_pod_move_timeout:
|
||||
type: string
|
||||
default: "20m"
|
||||
node_readiness_label:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
oauth_token_secret_name:
|
||||
type: string
|
||||
default: "postgresql-operator"
|
||||
pdb_name_format:
|
||||
type: string
|
||||
default: "postgres-{cluster}-pdb"
|
||||
pod_antiaffinity_topology_key:
|
||||
type: string
|
||||
default: "kubernetes.io/hostname"
|
||||
pod_environment_configmap:
|
||||
type: string
|
||||
pod_environment_secret:
|
||||
|
|
@ -195,20 +227,27 @@ spec:
|
|||
enum:
|
||||
- "ordered_ready"
|
||||
- "parallel"
|
||||
default: "ordered_ready"
|
||||
pod_priority_class_name:
|
||||
type: string
|
||||
pod_role_label:
|
||||
type: string
|
||||
default: "spilo-role"
|
||||
pod_service_account_definition:
|
||||
type: string
|
||||
default: ""
|
||||
pod_service_account_name:
|
||||
type: string
|
||||
default: "postgres-pod"
|
||||
pod_service_account_role_binding_definition:
|
||||
type: string
|
||||
default: ""
|
||||
pod_terminate_grace_period:
|
||||
type: string
|
||||
default: "5m"
|
||||
secret_name_template:
|
||||
type: string
|
||||
default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
spilo_runasuser:
|
||||
type: integer
|
||||
spilo_runasgroup:
|
||||
|
|
@ -217,12 +256,14 @@ spec:
|
|||
type: integer
|
||||
spilo_privileged:
|
||||
type: boolean
|
||||
default: false
|
||||
storage_resize_mode:
|
||||
type: string
|
||||
enum:
|
||||
- "ebs"
|
||||
- "pvc"
|
||||
- "off"
|
||||
default: "pvc"
|
||||
toleration:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -235,36 +276,48 @@ spec:
|
|||
default_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "1"
|
||||
default_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "100m"
|
||||
default_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "500Mi"
|
||||
default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "100Mi"
|
||||
min_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "250m"
|
||||
min_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "250Mi"
|
||||
timeouts:
|
||||
type: object
|
||||
properties:
|
||||
pod_label_wait_timeout:
|
||||
type: string
|
||||
default: "10m"
|
||||
pod_deletion_wait_timeout:
|
||||
type: string
|
||||
default: "10m"
|
||||
ready_wait_interval:
|
||||
type: string
|
||||
default: "4s"
|
||||
ready_wait_timeout:
|
||||
type: string
|
||||
default: "30s"
|
||||
resource_check_interval:
|
||||
type: string
|
||||
default: "3s"
|
||||
resource_check_timeout:
|
||||
type: string
|
||||
default: "10m"
|
||||
load_balancer:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -274,19 +327,25 @@ spec:
|
|||
type: string
|
||||
db_hosted_zone:
|
||||
type: string
|
||||
default: "db.example.com"
|
||||
enable_master_load_balancer:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_replica_load_balancer:
|
||||
type: boolean
|
||||
default: false
|
||||
external_traffic_policy:
|
||||
type: string
|
||||
enum:
|
||||
- "Cluster"
|
||||
- "Local"
|
||||
default: "Cluster"
|
||||
master_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}-repl.{team}.{hostedzone}"
|
||||
aws_or_gcp:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -294,12 +353,16 @@ spec:
|
|||
type: string
|
||||
additional_secret_mount_path:
|
||||
type: string
|
||||
default: "/meta/credentials"
|
||||
aws_region:
|
||||
type: string
|
||||
default: "eu-central-1"
|
||||
enable_ebs_gp3_migration:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_ebs_gp3_migration_max_size:
|
||||
type: integer
|
||||
default: 1000
|
||||
gcp_credentials:
|
||||
type: string
|
||||
kube_iam_role:
|
||||
|
|
@ -315,6 +378,15 @@ spec:
|
|||
properties:
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
type: string
|
||||
default: "logical-backup-"
|
||||
logical_backup_provider:
|
||||
type: string
|
||||
default: "s3"
|
||||
logical_backup_s3_access_key_id:
|
||||
type: string
|
||||
logical_backup_s3_bucket:
|
||||
|
|
@ -330,30 +402,40 @@ spec:
|
|||
logical_backup_schedule:
|
||||
type: string
|
||||
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
|
||||
default: "30 00 * * *"
|
||||
debug:
|
||||
type: object
|
||||
properties:
|
||||
debug_logging:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_database_access:
|
||||
type: boolean
|
||||
default: true
|
||||
teams_api:
|
||||
type: object
|
||||
properties:
|
||||
enable_admin_role_for_users:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_postgres_team_crd:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_postgres_team_crd_superusers:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_team_superuser:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_teams_api:
|
||||
type: boolean
|
||||
default: true
|
||||
pam_configuration:
|
||||
type: string
|
||||
default: "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"
|
||||
pam_role_name:
|
||||
type: string
|
||||
default: "zalandos"
|
||||
postgres_superuser_teams:
|
||||
type: array
|
||||
items:
|
||||
|
|
@ -362,23 +444,32 @@ spec:
|
|||
type: array
|
||||
items:
|
||||
type: string
|
||||
default:
|
||||
- admin
|
||||
team_admin_role:
|
||||
type: string
|
||||
default: "admin"
|
||||
team_api_role_configuration:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
default:
|
||||
log_statement: all
|
||||
teams_api_url:
|
||||
type: string
|
||||
default: "https://teams.example.com/api/"
|
||||
logging_rest_api:
|
||||
type: object
|
||||
properties:
|
||||
api_port:
|
||||
type: integer
|
||||
default: 8080
|
||||
cluster_history_entries:
|
||||
type: integer
|
||||
default: 1000
|
||||
ring_log_lines:
|
||||
type: integer
|
||||
default: 100
|
||||
scalyr: # deprecated
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -387,60 +478,65 @@ spec:
|
|||
scalyr_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "1"
|
||||
scalyr_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
default: "100m"
|
||||
scalyr_image:
|
||||
type: string
|
||||
scalyr_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "500Mi"
|
||||
scalyr_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "50Mi"
|
||||
scalyr_server_url:
|
||||
type: string
|
||||
default: "https://upload.eu.scalyr.com"
|
||||
connection_pooler:
|
||||
type: object
|
||||
properties:
|
||||
connection_pooler_schema:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
default: "pooler"
|
||||
connection_pooler_user:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
#default: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-12"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
#default: 60
|
||||
default: 60
|
||||
connection_pooler_mode:
|
||||
type: string
|
||||
enum:
|
||||
- "session"
|
||||
- "transaction"
|
||||
#default: "transaction"
|
||||
default: "transaction"
|
||||
connection_pooler_number_of_instances:
|
||||
type: integer
|
||||
minimum: 2
|
||||
#default: 2
|
||||
minimum: 1
|
||||
default: 2
|
||||
connection_pooler_default_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "1"
|
||||
default: "1"
|
||||
connection_pooler_default_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "500m"
|
||||
default: "500m"
|
||||
connection_pooler_default_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
default: "100Mi"
|
||||
connection_pooler_default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
default: "100Mi"
|
||||
status:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.5.0
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
@ -32,6 +32,7 @@ spec:
|
|||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
env:
|
||||
# provided additional ENV vars can overwrite individual config map entries
|
||||
- name: CONFIG_MAP_NAME
|
||||
|
|
|
|||
|
|
@ -3,10 +3,10 @@ kind: OperatorConfiguration
|
|||
metadata:
|
||||
name: postgresql-operator-default-configuration
|
||||
configuration:
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p2
|
||||
# enable_crd_validation: true
|
||||
# enable_lazy_spilo_upgrade: false
|
||||
# enable_pgversion_env_var: false
|
||||
enable_pgversion_env_var: true
|
||||
# enable_shm_volume: true
|
||||
enable_spilo_wal_path_compat: false
|
||||
etcd_host: ""
|
||||
|
|
@ -26,6 +26,8 @@ configuration:
|
|||
replication_username: standby
|
||||
super_username: postgres
|
||||
kubernetes:
|
||||
# additional_pod_capabilities:
|
||||
# - "SYS_NICE"
|
||||
cluster_domain: cluster.local
|
||||
cluster_labels:
|
||||
application: spilo
|
||||
|
|
@ -115,7 +117,10 @@ configuration:
|
|||
# wal_gs_bucket: ""
|
||||
# wal_s3_bucket: ""
|
||||
logical_backup:
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:master-58"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.0"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
# logical_backup_s3_access_key_id: ""
|
||||
logical_backup_s3_bucket: "my-bucket-url"
|
||||
# logical_backup_s3_endpoint: ""
|
||||
|
|
|
|||
|
|
@ -553,6 +553,8 @@ spec:
|
|||
required:
|
||||
- size
|
||||
properties:
|
||||
iops:
|
||||
type: integer
|
||||
size:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
|
|
@ -561,6 +563,8 @@ spec:
|
|||
type: string
|
||||
subPath:
|
||||
type: string
|
||||
throughput:
|
||||
type: integer
|
||||
status:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ spec:
|
|||
size: 1Gi
|
||||
numberOfInstances: 1
|
||||
postgresql:
|
||||
version: "12"
|
||||
version: "13"
|
||||
# Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming.
|
||||
standby:
|
||||
s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/"
|
||||
|
|
|
|||
|
|
@ -13,4 +13,3 @@ nav:
|
|||
- Config parameters: 'reference/operator_parameters.md'
|
||||
- Manifest parameters: 'reference/cluster_manifest.md'
|
||||
- CLI options and environment: 'reference/command_line_and_environment.md'
|
||||
- Google Summer of Code 2019: 'gsoc-2019/ideas.md'
|
||||
|
|
|
|||
|
|
@ -106,7 +106,6 @@ var OperatorConfigCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition
|
|||
|
||||
var min0 = 0.0
|
||||
var min1 = 1.0
|
||||
var min2 = 2.0
|
||||
var minDisable = -1.0
|
||||
|
||||
// PostgresCRDResourceValidation to check applied manifest parameters
|
||||
|
|
@ -232,7 +231,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
"numberOfInstances": {
|
||||
Type: "integer",
|
||||
Minimum: &min2,
|
||||
Minimum: &min1,
|
||||
},
|
||||
"resources": {
|
||||
Type: "object",
|
||||
|
|
@ -836,6 +835,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
Type: "object",
|
||||
Required: []string{"size"},
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"iops": {
|
||||
Type: "integer",
|
||||
},
|
||||
"size": {
|
||||
Type: "string",
|
||||
Description: "Value must not be zero",
|
||||
|
|
@ -847,6 +849,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"subPath": {
|
||||
Type: "string",
|
||||
},
|
||||
"throughput": {
|
||||
Type: "integer",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -963,6 +968,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"kubernetes": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"additional_pod_capabilities": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"cluster_domain": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
@ -1291,6 +1304,15 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"logical_backup_docker_image": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_google_application_credentials": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_job_prefix": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_provider": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_s3_access_key_id": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
@ -1464,7 +1486,7 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
"connection_pooler_number_of_instances": {
|
||||
Type: "integer",
|
||||
Minimum: &min2,
|
||||
Minimum: &min1,
|
||||
},
|
||||
"connection_pooler_schema": {
|
||||
Type: "string",
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ type KubernetesMetaConfiguration struct {
|
|||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
||||
AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"`
|
||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
||||
|
|
@ -196,6 +197,7 @@ type OperatorLogicalBackupConfiguration struct {
|
|||
S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"`
|
||||
S3SSE string `json:"logical_backup_s3_sse,omitempty"`
|
||||
GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"`
|
||||
JobPrefix string `json:"logical_backup_job_prefix,omitempty"`
|
||||
}
|
||||
|
||||
// OperatorConfigurationData defines the operation config
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ type PostgresSpec struct {
|
|||
Databases map[string]string `json:"databases,omitempty"`
|
||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||
SchedulerName *string `json:"schedulerName,omitempty"`
|
||||
NodeAffinity v1.NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||
NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
Sidecars []Sidecar `json:"sidecars,omitempty"`
|
||||
InitContainers []v1.Container `json:"initContainers,omitempty"`
|
||||
|
|
@ -118,6 +118,7 @@ type Volume struct {
|
|||
SubPath string `json:"subPath,omitempty"`
|
||||
Iops *int64 `json:"iops,omitempty"`
|
||||
Throughput *int64 `json:"throughput,omitempty"`
|
||||
VolumeType string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// AdditionalVolume specs additional optional volumes for statefulset
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -162,6 +162,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
|||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.AdditionalPodCapabilities != nil {
|
||||
in, out := &in.AdditionalPodCapabilities, &out.AdditionalPodCapabilities
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.EnablePodDisruptionBudget != nil {
|
||||
in, out := &in.EnablePodDisruptionBudget, &out.EnablePodDisruptionBudget
|
||||
*out = new(bool)
|
||||
|
|
@ -633,7 +638,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
in.NodeAffinity.DeepCopyInto(&out.NodeAffinity)
|
||||
if in.NodeAffinity != nil {
|
||||
in, out := &in.NodeAffinity, &out.NodeAffinity
|
||||
*out = new(corev1.NodeAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]corev1.Toleration, len(*in))
|
||||
|
|
|
|||
|
|
@ -113,9 +113,9 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
|||
|
||||
return fmt.Sprintf("%s-%s", e.PodName, e.ResourceVersion), nil
|
||||
})
|
||||
password_encryption, ok := pgSpec.Spec.PostgresqlParam.Parameters["password_encryption"]
|
||||
passwordEncryption, ok := pgSpec.Spec.PostgresqlParam.Parameters["password_encryption"]
|
||||
if !ok {
|
||||
password_encryption = "md5"
|
||||
passwordEncryption = "md5"
|
||||
}
|
||||
|
||||
cluster := &Cluster{
|
||||
|
|
@ -128,7 +128,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
|||
Secrets: make(map[types.UID]*v1.Secret),
|
||||
Services: make(map[PostgresRole]*v1.Service),
|
||||
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
||||
userSyncStrategy: users.DefaultUserSyncStrategy{PasswordEncryption: password_encryption},
|
||||
userSyncStrategy: users.DefaultUserSyncStrategy{PasswordEncryption: passwordEncryption},
|
||||
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
||||
podEventsQueue: podEventsQueue,
|
||||
KubeClient: kubeClient,
|
||||
|
|
@ -659,20 +659,8 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
|
||||
// Volume
|
||||
if oldSpec.Spec.Size != newSpec.Spec.Size {
|
||||
c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume)
|
||||
c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode)
|
||||
if c.OpConfig.StorageResizeMode == "pvc" {
|
||||
if err := c.syncVolumeClaims(); err != nil {
|
||||
c.logger.Errorf("could not sync persistent volume claims: %v", err)
|
||||
updateFailed = true
|
||||
}
|
||||
} else if c.OpConfig.StorageResizeMode == "ebs" {
|
||||
if err := c.syncVolumes(); err != nil {
|
||||
c.logger.Errorf("could not sync persistent volumes: %v", err)
|
||||
updateFailed = true
|
||||
}
|
||||
}
|
||||
if c.OpConfig.StorageResizeMode != "off" {
|
||||
c.syncVolumes()
|
||||
} else {
|
||||
c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
// K8S objects that are belong to connection pooler
|
||||
// ConnectionPoolerObjects K8s objects that are belong to connection pooler
|
||||
type ConnectionPoolerObjects struct {
|
||||
Deployment *appsv1.Deployment
|
||||
Service *v1.Service
|
||||
|
|
@ -280,6 +280,9 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: util.False(),
|
||||
},
|
||||
}
|
||||
|
||||
podTemplate := &v1.PodTemplateSpec{
|
||||
|
|
@ -289,7 +292,6 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
Annotations: c.annotationsSet(c.generatePodAnnotations(spec)),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
ServiceAccountName: c.OpConfig.PodServiceAccountName,
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Containers: []v1.Container{poolerContainer},
|
||||
// TODO: add tolerations to scheduler pooler on the same node
|
||||
|
|
@ -539,13 +541,13 @@ func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, depl
|
|||
// Test if two connection pooler configuration needs to be synced. For simplicity
|
||||
// compare not the actual K8S objects, but the configuration itself and request
|
||||
// sync if there is any difference.
|
||||
func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) {
|
||||
func needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler, logger *logrus.Entry) (sync bool, reasons []string) {
|
||||
reasons = []string{}
|
||||
sync = false
|
||||
|
||||
changelog, err := diff.Diff(oldSpec, newSpec)
|
||||
if err != nil {
|
||||
//c.logger.Infof("Cannot get diff, do not do anything, %+v", err)
|
||||
logger.Infof("cannot get diff, do not do anything, %+v", err)
|
||||
return false, reasons
|
||||
}
|
||||
|
||||
|
|
@ -681,13 +683,45 @@ func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql)
|
|||
}
|
||||
|
||||
func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) {
|
||||
logPoolerEssentials(c.logger, oldSpec, newSpec)
|
||||
|
||||
var reason SyncReason
|
||||
var err error
|
||||
var newNeedConnectionPooler, oldNeedConnectionPooler bool
|
||||
oldNeedConnectionPooler = false
|
||||
|
||||
if oldSpec == nil {
|
||||
oldSpec = &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
needSync, _ := needSyncConnectionPoolerSpecs(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler, c.logger)
|
||||
masterChanges, err := diff.Diff(oldSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableConnectionPooler)
|
||||
if err != nil {
|
||||
c.logger.Error("Error in getting diff of master connection pooler changes")
|
||||
}
|
||||
replicaChanges, err := diff.Diff(oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler)
|
||||
if err != nil {
|
||||
c.logger.Error("Error in getting diff of replica connection pooler changes")
|
||||
}
|
||||
|
||||
// skip pooler sync only
|
||||
// 1. if there is no diff in spec, AND
|
||||
// 2. if connection pooler is already there and is also required as per newSpec
|
||||
//
|
||||
// Handling the case when connectionPooler is not there but it is required
|
||||
// as per spec, hence do not skip syncing in that case, even though there
|
||||
// is no diff in specs
|
||||
if (!needSync && len(masterChanges) <= 0 && len(replicaChanges) <= 0) &&
|
||||
(c.ConnectionPooler != nil && (needConnectionPooler(&newSpec.Spec))) {
|
||||
c.logger.Debugln("syncing pooler is not required")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logPoolerEssentials(c.logger, oldSpec, newSpec)
|
||||
|
||||
// Check and perform the sync requirements for each of the roles.
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
|
||||
|
|
@ -841,7 +875,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
var specReason []string
|
||||
|
||||
if oldSpec != nil {
|
||||
specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler)
|
||||
specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger)
|
||||
}
|
||||
|
||||
defaultsSync, defaultsReason := needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment)
|
||||
|
|
|
|||
|
|
@ -6,13 +6,17 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func mockInstallLookupFunction(schema string, user string, role PostgresRole) error {
|
||||
|
|
@ -27,79 +31,122 @@ func int32ToPointer(value int32) *int32 {
|
|||
return &value
|
||||
}
|
||||
|
||||
func TestConnectionPoolerCreationAndDeletion(t *testing.T) {
|
||||
testName := "Test connection pooler creation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
},
|
||||
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
||||
cluster.Statefulset = &appsv1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-sts",
|
||||
},
|
||||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||
}
|
||||
|
||||
reason, err := cluster.createConnectionPooler(mockInstallLookupFunction)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot create connection pooler, %s, %+v",
|
||||
testName, err, reason)
|
||||
}
|
||||
func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error {
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
if cluster.ConnectionPooler[role] != nil {
|
||||
if cluster.ConnectionPooler[role].Deployment == nil {
|
||||
t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role)
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[role].Service == nil {
|
||||
t.Errorf("%s: Connection pooler service is empty for role %s", testName, role)
|
||||
}
|
||||
poolerLabels := cluster.labelsSet(false)
|
||||
poolerLabels["application"] = "db-connection-pooler"
|
||||
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role)
|
||||
|
||||
if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil &&
|
||||
util.MapContains(cluster.ConnectionPooler[role].Deployment.Labels, poolerLabels) &&
|
||||
(cluster.ConnectionPooler[role].Deployment.Spec.Replicas == nil ||
|
||||
*cluster.ConnectionPooler[role].Deployment.Spec.Replicas != 2) {
|
||||
return fmt.Errorf("Wrong number of instances")
|
||||
}
|
||||
}
|
||||
oldSpec := &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||
},
|
||||
}
|
||||
newSpec := &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
EnableConnectionPooler: boolToPointer(false),
|
||||
EnableReplicaConnectionPooler: boolToPointer(false),
|
||||
},
|
||||
return nil
|
||||
}
|
||||
|
||||
func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
|
||||
if cluster.ConnectionPooler == nil {
|
||||
return fmt.Errorf("Connection pooler resources are empty")
|
||||
}
|
||||
|
||||
// Delete connection pooler via sync
|
||||
_, err = cluster.syncConnectionPooler(oldSpec, newSpec, mockInstallLookupFunction)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot sync connection pooler, %s", testName, err)
|
||||
}
|
||||
for _, role := range []PostgresRole{Master, Replica} {
|
||||
poolerLabels := cluster.labelsSet(false)
|
||||
poolerLabels["application"] = "db-connection-pooler"
|
||||
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role)
|
||||
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
err = cluster.deleteConnectionPooler(role)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot delete connection pooler, %s", testName, err)
|
||||
if cluster.ConnectionPooler[role].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[role].Deployment.Labels, poolerLabels) {
|
||||
return fmt.Errorf("Deployment was not saved or labels not attached %s %s", role, cluster.ConnectionPooler[role].Deployment.Labels)
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[role].Service == nil || !util.MapContains(cluster.ConnectionPooler[role].Service.Labels, poolerLabels) {
|
||||
return fmt.Errorf("Service was not saved or labels not attached %s %s", role, cluster.ConnectionPooler[role].Service.Labels)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func MasterObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
|
||||
if cluster.ConnectionPooler == nil {
|
||||
return fmt.Errorf("Connection pooler resources are empty")
|
||||
}
|
||||
|
||||
poolerLabels := cluster.labelsSet(false)
|
||||
poolerLabels["application"] = "db-connection-pooler"
|
||||
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Master)
|
||||
|
||||
if cluster.ConnectionPooler[Master].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Master].Deployment.Labels, poolerLabels) {
|
||||
return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Deployment.Labels)
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[Master].Service == nil || !util.MapContains(cluster.ConnectionPooler[Master].Service.Labels, poolerLabels) {
|
||||
return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Service.Labels)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReplicaObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
|
||||
if cluster.ConnectionPooler == nil {
|
||||
return fmt.Errorf("Connection pooler resources are empty")
|
||||
}
|
||||
|
||||
poolerLabels := cluster.labelsSet(false)
|
||||
poolerLabels["application"] = "db-connection-pooler"
|
||||
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Replica)
|
||||
|
||||
if cluster.ConnectionPooler[Replica].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Deployment.Labels, poolerLabels) {
|
||||
return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Deployment.Labels)
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[Replica].Service == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Service.Labels, poolerLabels) {
|
||||
return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Service.Labels)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error {
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
if cluster.ConnectionPooler[role] != nil &&
|
||||
(cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) {
|
||||
return fmt.Errorf("Connection pooler was not deleted for role %v", role)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error {
|
||||
|
||||
if cluster.ConnectionPooler[Master] != nil &&
|
||||
(cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) {
|
||||
return fmt.Errorf("Connection pooler master was not deleted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error {
|
||||
|
||||
if cluster.ConnectionPooler[Replica] != nil &&
|
||||
(cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) {
|
||||
return fmt.Errorf("Connection pooler replica was not deleted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func noEmptySync(cluster *Cluster, err error, reason SyncReason) error {
|
||||
for _, msg := range reason {
|
||||
if strings.HasPrefix(msg, "update [] from '<nil>' to '") {
|
||||
return fmt.Errorf("There is an empty reason, %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestNeedConnectionPooler(t *testing.T) {
|
||||
|
|
@ -210,133 +257,178 @@ func TestNeedConnectionPooler(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error {
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
if cluster.ConnectionPooler[role] != nil && cluster.ConnectionPooler[role].Deployment != nil &&
|
||||
(cluster.ConnectionPooler[role].Deployment.Spec.Replicas == nil ||
|
||||
*cluster.ConnectionPooler[role].Deployment.Spec.Replicas != 2) {
|
||||
return fmt.Errorf("Wrong number of instances")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func TestConnectionPoolerCreateDeletion(t *testing.T) {
|
||||
|
||||
func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
|
||||
if cluster.ConnectionPooler == nil {
|
||||
return fmt.Errorf("Connection pooler resources are empty")
|
||||
testName := "test connection pooler creation and deletion"
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
acidClientSet := fakeacidv1.NewSimpleClientset()
|
||||
namespace := "default"
|
||||
|
||||
client := k8sutil.KubernetesClient{
|
||||
StatefulSetsGetter: clientSet.AppsV1(),
|
||||
ServicesGetter: clientSet.CoreV1(),
|
||||
DeploymentsGetter: clientSet.AppsV1(),
|
||||
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||
SecretsGetter: clientSet.CoreV1(),
|
||||
}
|
||||
|
||||
for _, role := range []PostgresRole{Master, Replica} {
|
||||
if cluster.ConnectionPooler[role].Deployment == nil {
|
||||
return fmt.Errorf("Deployment was not saved %s", role)
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[role].Service == nil {
|
||||
return fmt.Errorf("Service was not saved %s", role)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func MasterobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
|
||||
if cluster.ConnectionPooler == nil {
|
||||
return fmt.Errorf("Connection pooler resources are empty")
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[Master].Deployment == nil {
|
||||
return fmt.Errorf("Deployment was not saved")
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[Master].Service == nil {
|
||||
return fmt.Errorf("Service was not saved")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReplicaobjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
|
||||
if cluster.ConnectionPooler == nil {
|
||||
return fmt.Errorf("Connection pooler resources are empty")
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[Replica].Deployment == nil {
|
||||
return fmt.Errorf("Deployment was not saved")
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[Replica].Service == nil {
|
||||
return fmt.Errorf("Service was not saved")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error {
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
if cluster.ConnectionPooler[role] != nil &&
|
||||
(cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) {
|
||||
return fmt.Errorf("Connection pooler was not deleted for role %v", role)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error {
|
||||
|
||||
if cluster.ConnectionPooler[Master] != nil &&
|
||||
(cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) {
|
||||
return fmt.Errorf("Connection pooler master was not deleted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error {
|
||||
|
||||
if cluster.ConnectionPooler[Replica] != nil &&
|
||||
(cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) {
|
||||
return fmt.Errorf("Connection pooler replica was not deleted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func noEmptySync(cluster *Cluster, err error, reason SyncReason) error {
|
||||
for _, msg := range reason {
|
||||
if strings.HasPrefix(msg, "update [] from '<nil>' to '") {
|
||||
return fmt.Errorf("There is an empty reason, %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||
testName := "Test connection pooler synchronization"
|
||||
newCluster := func(client k8sutil.KubernetesClient) *Cluster {
|
||||
return New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
},
|
||||
}, client, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
}
|
||||
cluster := newCluster(k8sutil.KubernetesClient{})
|
||||
|
||||
cluster.Statefulset = &appsv1.StatefulSet{
|
||||
pg := acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-sts",
|
||||
Name: "acid-fake-cluster",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1Gi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
DefaultCPURequest: "300m",
|
||||
DefaultCPULimit: "300m",
|
||||
DefaultMemoryRequest: "300Mi",
|
||||
DefaultMemoryLimit: "300Mi",
|
||||
PodRoleLabel: "spilo-role",
|
||||
},
|
||||
},
|
||||
}, client, pg, logger, eventRecorder)
|
||||
|
||||
cluster.Name = "acid-fake-cluster"
|
||||
cluster.Namespace = "default"
|
||||
|
||||
_, err := cluster.createService(Master)
|
||||
assert.NoError(t, err)
|
||||
_, err = cluster.createStatefulSet()
|
||||
assert.NoError(t, err)
|
||||
|
||||
reason, err := cluster.createConnectionPooler(mockInstallLookupFunction)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot create connection pooler, %s, %+v",
|
||||
testName, err, reason)
|
||||
}
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
poolerLabels := cluster.labelsSet(false)
|
||||
poolerLabels["application"] = "db-connection-pooler"
|
||||
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role)
|
||||
|
||||
if cluster.ConnectionPooler[role] != nil {
|
||||
if cluster.ConnectionPooler[role].Deployment == nil && util.MapContains(cluster.ConnectionPooler[role].Deployment.Labels, poolerLabels) {
|
||||
t.Errorf("%s: Connection pooler deployment is empty for role %s", testName, role)
|
||||
}
|
||||
|
||||
if cluster.ConnectionPooler[role].Service == nil && util.MapContains(cluster.ConnectionPooler[role].Service.Labels, poolerLabels) {
|
||||
t.Errorf("%s: Connection pooler service is empty for role %s", testName, role)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oldSpec := &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||
},
|
||||
}
|
||||
newSpec := &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
EnableConnectionPooler: boolToPointer(false),
|
||||
EnableReplicaConnectionPooler: boolToPointer(false),
|
||||
},
|
||||
}
|
||||
|
||||
// Delete connection pooler via sync
|
||||
_, err = cluster.syncConnectionPooler(oldSpec, newSpec, mockInstallLookupFunction)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot sync connection pooler, %s", testName, err)
|
||||
}
|
||||
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
err = cluster.deleteConnectionPooler(role)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot delete connection pooler, %s", testName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectionPoolerSync(t *testing.T) {
|
||||
|
||||
testName := "test connection pooler synchronization"
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
acidClientSet := fakeacidv1.NewSimpleClientset()
|
||||
namespace := "default"
|
||||
|
||||
client := k8sutil.KubernetesClient{
|
||||
StatefulSetsGetter: clientSet.AppsV1(),
|
||||
ServicesGetter: clientSet.CoreV1(),
|
||||
DeploymentsGetter: clientSet.AppsV1(),
|
||||
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||
SecretsGetter: clientSet.CoreV1(),
|
||||
}
|
||||
|
||||
pg := acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "acid-fake-cluster",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1Gi",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
DefaultCPURequest: "300m",
|
||||
DefaultCPULimit: "300m",
|
||||
DefaultMemoryRequest: "300Mi",
|
||||
DefaultMemoryLimit: "300Mi",
|
||||
PodRoleLabel: "spilo-role",
|
||||
},
|
||||
},
|
||||
}, client, pg, logger, eventRecorder)
|
||||
|
||||
cluster.Name = "acid-fake-cluster"
|
||||
cluster.Namespace = "default"
|
||||
|
||||
_, err := cluster.createService(Master)
|
||||
assert.NoError(t, err)
|
||||
_, err = cluster.createStatefulSet()
|
||||
assert.NoError(t, err)
|
||||
|
||||
reason, err := cluster.createConnectionPooler(mockInstallLookupFunction)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot create connection pooler, %s, %+v",
|
||||
testName, err, reason)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
|
|
@ -358,10 +450,10 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.ClientMissingObjects()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: MasterobjectsAreSaved,
|
||||
check: MasterObjectsAreSaved,
|
||||
},
|
||||
{
|
||||
subTest: "create if doesn't exist",
|
||||
|
|
@ -375,10 +467,10 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.ClientMissingObjects()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: MasterobjectsAreSaved,
|
||||
check: MasterObjectsAreSaved,
|
||||
},
|
||||
{
|
||||
subTest: "create if doesn't exist with a flag",
|
||||
|
|
@ -390,10 +482,10 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
EnableConnectionPooler: boolToPointer(true),
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.ClientMissingObjects()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: MasterobjectsAreSaved,
|
||||
check: MasterObjectsAreSaved,
|
||||
},
|
||||
{
|
||||
subTest: "create no replica with flag",
|
||||
|
|
@ -405,7 +497,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
EnableReplicaConnectionPooler: boolToPointer(false),
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: objectsAreDeleted,
|
||||
|
|
@ -421,10 +513,10 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: ReplicaobjectsAreSaved,
|
||||
check: ReplicaObjectsAreSaved,
|
||||
},
|
||||
{
|
||||
subTest: "create both master and replica",
|
||||
|
|
@ -438,7 +530,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
EnableConnectionPooler: boolToPointer(true),
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.ClientMissingObjects()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: objectsAreSaved,
|
||||
|
|
@ -456,7 +548,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: OnlyReplicaDeleted,
|
||||
|
|
@ -474,7 +566,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: OnlyMasterDeleted,
|
||||
|
|
@ -489,7 +581,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: objectsAreDeleted,
|
||||
|
|
@ -502,53 +594,11 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: objectsAreDeleted,
|
||||
},
|
||||
{
|
||||
subTest: "update deployment",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{
|
||||
NumberOfInstances: int32ToPointer(2),
|
||||
},
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: deploymentUpdated,
|
||||
},
|
||||
{
|
||||
subTest: "update deployment",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{
|
||||
NumberOfInstances: int32ToPointer(2),
|
||||
},
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: deploymentUpdated,
|
||||
},
|
||||
{
|
||||
subTest: "update image from changed defaults",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
|
|
@ -561,7 +611,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:2.0",
|
||||
defaultInstances: 2,
|
||||
check: deploymentUpdated,
|
||||
|
|
@ -580,7 +630,7 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: newCluster(k8sutil.NewMockKubernetesClient()),
|
||||
cluster: cluster,
|
||||
defaultImage: "pooler:1.0",
|
||||
defaultInstances: 1,
|
||||
check: noEmptySync,
|
||||
|
|
@ -591,6 +641,8 @@ func TestConnectionPoolerSynchronization(t *testing.T) {
|
|||
tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances =
|
||||
int32ToPointer(tt.defaultInstances)
|
||||
|
||||
t.Logf("running test for %s [%s]", testName, tt.subTest)
|
||||
|
||||
reason, err := tt.cluster.syncConnectionPooler(tt.oldSpec,
|
||||
tt.newSpec, mockInstallLookupFunction)
|
||||
|
||||
|
|
@ -778,7 +830,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
|
|||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
check: testDeploymentOwnwerReference,
|
||||
check: testDeploymentOwnerReference,
|
||||
},
|
||||
{
|
||||
subTest: "selector",
|
||||
|
|
@ -931,7 +983,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
|
|||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
cluster: cluster,
|
||||
check: testServiceOwnwerReference,
|
||||
check: testServiceOwnerReference,
|
||||
},
|
||||
{
|
||||
subTest: "selector",
|
||||
|
|
|
|||
|
|
@ -320,6 +320,16 @@ func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (loca
|
|||
return
|
||||
}
|
||||
|
||||
func generateCapabilities(capabilities []string) v1.Capabilities {
|
||||
additionalCapabilities := make([]v1.Capability, 0, len(capabilities))
|
||||
for _, capability := range capabilities {
|
||||
additionalCapabilities = append(additionalCapabilities, v1.Capability(strings.ToUpper(capability)))
|
||||
}
|
||||
return v1.Capabilities{
|
||||
Add: additionalCapabilities,
|
||||
}
|
||||
}
|
||||
|
||||
func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity {
|
||||
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
|
||||
return nil
|
||||
|
|
@ -430,6 +440,7 @@ func generateContainer(
|
|||
envVars []v1.EnvVar,
|
||||
volumeMounts []v1.VolumeMount,
|
||||
privilegedMode bool,
|
||||
additionalPodCapabilities v1.Capabilities,
|
||||
) *v1.Container {
|
||||
return &v1.Container{
|
||||
Name: name,
|
||||
|
|
@ -453,8 +464,10 @@ func generateContainer(
|
|||
VolumeMounts: volumeMounts,
|
||||
Env: envVars,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privilegedMode,
|
||||
ReadOnlyRootFilesystem: util.False(),
|
||||
AllowPrivilegeEscalation: &privilegedMode,
|
||||
Privileged: &privilegedMode,
|
||||
ReadOnlyRootFilesystem: util.False(),
|
||||
Capabilities: &additionalPodCapabilities,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -1147,6 +1160,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
|
||||
volumeMounts,
|
||||
c.OpConfig.Resources.SpiloPrivileged,
|
||||
generateCapabilities(c.OpConfig.AdditionalPodCapabilities),
|
||||
)
|
||||
|
||||
// generate container specs for sidecars specified in the cluster manifest
|
||||
|
|
@ -1223,7 +1237,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
effectiveRunAsUser,
|
||||
effectiveRunAsGroup,
|
||||
effectiveFSGroup,
|
||||
nodeAffinity(c.OpConfig.NodeReadinessLabel, &spec.NodeAffinity),
|
||||
nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity),
|
||||
spec.SchedulerName,
|
||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||
c.OpConfig.PodServiceAccountName,
|
||||
|
|
@ -1561,11 +1575,17 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
|
|||
}
|
||||
|
||||
username := pgUser.Name
|
||||
lbls := c.labelsSet(true)
|
||||
|
||||
if username == constants.ConnectionPoolerUserName {
|
||||
lbls = c.connectionPoolerLabels("", false).MatchLabels
|
||||
}
|
||||
|
||||
secret := v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.credentialSecretName(username),
|
||||
Namespace: namespace,
|
||||
Labels: c.labelsSet(true),
|
||||
Labels: lbls,
|
||||
Annotations: c.annotationsSet(nil),
|
||||
},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
|
|
@ -1574,6 +1594,7 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
|
|||
"password": []byte(pgUser.Password),
|
||||
},
|
||||
}
|
||||
|
||||
return &secret
|
||||
}
|
||||
|
||||
|
|
@ -1893,6 +1914,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
envVars,
|
||||
[]v1.VolumeMount{},
|
||||
c.OpConfig.SpiloPrivileged, // use same value as for normal DB pods
|
||||
v1.Capabilities{},
|
||||
)
|
||||
|
||||
labels := map[string]string{
|
||||
|
|
@ -2079,7 +2101,7 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
|
|||
|
||||
// getLogicalBackupJobName returns the name; the job itself may not exists
|
||||
func (c *Cluster) getLogicalBackupJobName() (jobName string) {
|
||||
return "logical-backup-" + c.clusterName().Name
|
||||
return c.OpConfig.LogicalBackupJobPrefix + c.clusterName().Name
|
||||
}
|
||||
|
||||
// Return an array of ownerReferences to make an arbitraty object dependent on
|
||||
|
|
|
|||
|
|
@ -882,7 +882,7 @@ func TestNodeAffinity(t *testing.T) {
|
|||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
NodeAffinity: *nodeAffinity,
|
||||
NodeAffinity: nodeAffinity,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -939,7 +939,7 @@ func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployment) error {
|
||||
func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error {
|
||||
owner := deployment.ObjectMeta.OwnerReferences[0]
|
||||
|
||||
if owner.Name != cluster.Statefulset.ObjectMeta.Name {
|
||||
|
|
@ -950,7 +950,7 @@ func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployme
|
|||
return nil
|
||||
}
|
||||
|
||||
func testServiceOwnwerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error {
|
||||
func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error {
|
||||
owner := service.ObjectMeta.OwnerReferences[0]
|
||||
|
||||
if owner.Name != cluster.Statefulset.ObjectMeta.Name {
|
||||
|
|
@ -1489,3 +1489,50 @@ func TestGenerateService(t *testing.T) {
|
|||
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy)
|
||||
|
||||
}
|
||||
|
||||
func TestGenerateCapabilities(t *testing.T) {
|
||||
|
||||
testName := "TestGenerateCapabilities"
|
||||
tests := []struct {
|
||||
subTest string
|
||||
configured []string
|
||||
capabilities v1.Capabilities
|
||||
err error
|
||||
}{
|
||||
{
|
||||
subTest: "no capabilities",
|
||||
configured: nil,
|
||||
capabilities: v1.Capabilities{Add: []v1.Capability{}},
|
||||
err: fmt.Errorf("could not parse capabilities configuration of nil"),
|
||||
},
|
||||
{
|
||||
subTest: "empty capabilities",
|
||||
configured: []string{},
|
||||
capabilities: v1.Capabilities{Add: []v1.Capability{}},
|
||||
err: fmt.Errorf("could not parse empty capabilities configuration"),
|
||||
},
|
||||
{
|
||||
subTest: "configured capability",
|
||||
configured: []string{"SYS_NICE"},
|
||||
capabilities: v1.Capabilities{
|
||||
Add: []v1.Capability{"SYS_NICE"},
|
||||
},
|
||||
err: fmt.Errorf("could not generate one configured capability"),
|
||||
},
|
||||
{
|
||||
subTest: "configured capabilities",
|
||||
configured: []string{"SYS_NICE", "CHOWN"},
|
||||
capabilities: v1.Capabilities{
|
||||
Add: []v1.Capability{"SYS_NICE", "CHOWN"},
|
||||
},
|
||||
err: fmt.Errorf("could not generate multiple configured capabilities"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
caps := generateCapabilities(tt.configured)
|
||||
if !reflect.DeepEqual(caps, tt.capabilities) {
|
||||
t.Errorf("%s %s: expected `%v` but got `%v`",
|
||||
testName, tt.subTest, tt.capabilities, caps)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
|
@ -11,6 +12,7 @@ import (
|
|||
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
||||
)
|
||||
|
||||
func (c *Cluster) listPods() ([]v1.Pod, error) {
|
||||
|
|
@ -309,7 +311,23 @@ func (c *Cluster) isSafeToRecreatePods(pods *v1.PodList) bool {
|
|||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
state, err := c.patroni.GetPatroniMemberState(&pod)
|
||||
|
||||
var state string
|
||||
|
||||
err := retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
func() (bool, error) {
|
||||
|
||||
var err error
|
||||
|
||||
state, err = c.patroni.GetPatroniMemberState(&pod)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
c.logger.Errorf("failed to get Patroni state for pod: %s", err)
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -53,7 +53,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
return err
|
||||
}
|
||||
|
||||
c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode)
|
||||
// sync volume may already transition volumes to gp3, if iops/throughput or type is specified
|
||||
if err = c.syncVolumes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.OpConfig.EnableEBSGp3Migration {
|
||||
err = c.executeEBSMigration()
|
||||
|
|
@ -62,34 +65,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
if c.OpConfig.StorageResizeMode == "mixed" {
|
||||
// mixed op uses AWS API to adjust size,throughput,iops and calls pvc chance for file system resize
|
||||
|
||||
// resize pvc to adjust filesystem size until better K8s support
|
||||
if err = c.syncVolumeClaims(); err != nil {
|
||||
err = fmt.Errorf("could not sync persistent volume claims: %v", err)
|
||||
return err
|
||||
}
|
||||
} else if c.OpConfig.StorageResizeMode == "pvc" {
|
||||
if err = c.syncVolumeClaims(); err != nil {
|
||||
err = fmt.Errorf("could not sync persistent volume claims: %v", err)
|
||||
return err
|
||||
}
|
||||
} else if c.OpConfig.StorageResizeMode == "ebs" {
|
||||
// potentially enlarge volumes before changing the statefulset. By doing that
|
||||
// in this order we make sure the operator is not stuck waiting for a pod that
|
||||
// cannot start because it ran out of disk space.
|
||||
// TODO: handle the case of the cluster that is downsized and enlarged again
|
||||
// (there will be a volume from the old pod for which we can't act before the
|
||||
// the statefulset modification is concluded)
|
||||
if err = c.syncVolumes(); err != nil {
|
||||
err = fmt.Errorf("could not sync persistent volumes: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.")
|
||||
}
|
||||
|
||||
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||
err = fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||
return err
|
||||
|
|
@ -590,48 +565,6 @@ func (c *Cluster) syncRoles() (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
// syncVolumeClaims reads all persistent volume claims and checks that their size matches the one declared in the statefulset.
|
||||
func (c *Cluster) syncVolumeClaims() error {
|
||||
c.setProcessName("syncing volume claims")
|
||||
|
||||
act, err := c.volumeClaimsNeedResizing(c.Spec.Volume)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare size of the volume claims: %v", err)
|
||||
}
|
||||
if !act {
|
||||
c.logger.Infof("volume claims do not require changes")
|
||||
return nil
|
||||
}
|
||||
if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil {
|
||||
return fmt.Errorf("could not sync volume claims: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("volume claims have been synced successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset.
|
||||
func (c *Cluster) syncVolumes() error {
|
||||
c.setProcessName("syncing volumes")
|
||||
|
||||
act, err := c.volumesNeedResizing(c.Spec.Volume)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare size of the volumes: %v", err)
|
||||
}
|
||||
if !act {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.resizeVolumes(); err != nil {
|
||||
return fmt.Errorf("could not sync volumes: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("volumes have been synced successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncDatabases() error {
|
||||
c.setProcessName("syncing databases")
|
||||
|
||||
|
|
|
|||
|
|
@ -240,7 +240,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
|||
|
||||
c.logger.Debugf("fetching possible additional team members for team %q", teamID)
|
||||
members := []string{}
|
||||
additionalMembers := c.PgTeamMap[c.Spec.TeamID].AdditionalMembers
|
||||
additionalMembers := c.PgTeamMap[teamID].AdditionalMembers
|
||||
for _, member := range additionalMembers {
|
||||
members = append(members, member)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,13 +10,215 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/filesystems"
|
||||
"github.com/zalando/postgres-operator/pkg/util/volumes"
|
||||
)
|
||||
|
||||
func (c *Cluster) syncVolumes() error {
|
||||
c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode)
|
||||
var err error
|
||||
|
||||
// check quantity string once, and do not bother with it anymore anywhere else
|
||||
_, err = resource.ParseQuantity(c.Spec.Volume.Size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse volume size from the manifest: %v", err)
|
||||
}
|
||||
|
||||
if c.OpConfig.StorageResizeMode == "mixed" {
|
||||
// mixed op uses AWS API to adjust size, throughput, iops, and calls pvc change for file system resize
|
||||
// in case of errors we proceed to let K8s do its work, favoring disk space increase of other adjustments
|
||||
|
||||
err = c.populateVolumeMetaData()
|
||||
if err != nil {
|
||||
c.logger.Errorf("populating EBS meta data failed, skipping potential adjustements: %v", err)
|
||||
} else {
|
||||
err = c.syncUnderlyingEBSVolume()
|
||||
if err != nil {
|
||||
c.logger.Errorf("errors occured during EBS volume adjustments: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// resize pvc to adjust filesystem size until better K8s support
|
||||
if err = c.syncVolumeClaims(); err != nil {
|
||||
err = fmt.Errorf("could not sync persistent volume claims: %v", err)
|
||||
return err
|
||||
}
|
||||
} else if c.OpConfig.StorageResizeMode == "pvc" {
|
||||
if err = c.syncVolumeClaims(); err != nil {
|
||||
err = fmt.Errorf("could not sync persistent volume claims: %v", err)
|
||||
return err
|
||||
}
|
||||
} else if c.OpConfig.StorageResizeMode == "ebs" {
|
||||
// potentially enlarge volumes before changing the statefulset. By doing that
|
||||
// in this order we make sure the operator is not stuck waiting for a pod that
|
||||
// cannot start because it ran out of disk space.
|
||||
// TODO: handle the case of the cluster that is downsized and enlarged again
|
||||
// (there will be a volume from the old pod for which we can't act before the
|
||||
// the statefulset modification is concluded)
|
||||
if err = c.syncEbsVolumes(); err != nil {
|
||||
err = fmt.Errorf("could not sync persistent volumes: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncUnderlyingEBSVolume() error {
|
||||
c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size")
|
||||
|
||||
var err error
|
||||
|
||||
targetValue := c.Spec.Volume
|
||||
newSize, err := resource.ParseQuantity(targetValue.Size)
|
||||
targetSize := quantityToGigabyte(newSize)
|
||||
|
||||
awsGp3 := aws.String("gp3")
|
||||
awsIo2 := aws.String("io2")
|
||||
|
||||
errors := []string{}
|
||||
|
||||
for _, volume := range c.EBSVolumes {
|
||||
var modifyIops *int64
|
||||
var modifyThroughput *int64
|
||||
var modifySize *int64
|
||||
var modifyType *string
|
||||
|
||||
if targetValue.Iops != nil {
|
||||
if volume.Iops != *targetValue.Iops {
|
||||
modifyIops = targetValue.Iops
|
||||
}
|
||||
}
|
||||
|
||||
if targetValue.Throughput != nil {
|
||||
if volume.Throughput != *targetValue.Throughput {
|
||||
modifyThroughput = targetValue.Throughput
|
||||
}
|
||||
}
|
||||
|
||||
if targetSize > volume.Size {
|
||||
modifySize = &targetSize
|
||||
}
|
||||
|
||||
if modifyIops != nil || modifyThroughput != nil || modifySize != nil {
|
||||
if modifyIops != nil || modifyThroughput != nil {
|
||||
// we default to gp3 if iops and throughput are configured
|
||||
modifyType = awsGp3
|
||||
if targetValue.VolumeType == "io2" {
|
||||
modifyType = awsIo2
|
||||
}
|
||||
} else if targetValue.VolumeType == "gp3" && volume.VolumeType != "gp3" {
|
||||
modifyType = awsGp3
|
||||
} else {
|
||||
// do not touch type
|
||||
modifyType = nil
|
||||
}
|
||||
|
||||
err = c.VolumeResizer.ModifyVolume(volume.VolumeID, modifyType, modifySize, modifyIops, modifyThroughput)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Sprintf("modify volume failed: volume=%s size=%d iops=%d throughput=%d", volume.VolumeID, volume.Size, volume.Iops, volume.Throughput))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
for _, s := range errors {
|
||||
c.logger.Warningf(s)
|
||||
}
|
||||
// c.logger.Errorf("failed to modify %d of %d volumes", len(c.EBSVolumes), len(errors))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) populateVolumeMetaData() error {
|
||||
c.logger.Infof("starting reading ebs meta data")
|
||||
|
||||
pvs, err := c.listPersistentVolumes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list persistent volumes: %v", err)
|
||||
}
|
||||
c.logger.Debugf("found %d volumes, size of known volumes %d", len(pvs), len(c.EBSVolumes))
|
||||
|
||||
volumeIds := []string{}
|
||||
var volumeID string
|
||||
for _, pv := range pvs {
|
||||
volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
volumeIds = append(volumeIds, volumeID)
|
||||
}
|
||||
|
||||
currentVolumes, err := c.VolumeResizer.DescribeVolumes(volumeIds)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(currentVolumes) != len(c.EBSVolumes) {
|
||||
c.logger.Debugf("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes))
|
||||
}
|
||||
|
||||
// reset map, operator is not responsible for dangling ebs volumes
|
||||
c.EBSVolumes = make(map[string]volumes.VolumeProperties)
|
||||
for _, volume := range currentVolumes {
|
||||
c.EBSVolumes[volume.VolumeID] = volume
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncVolumeClaims reads all persistent volume claims and checks that their size matches the one declared in the statefulset.
|
||||
func (c *Cluster) syncVolumeClaims() error {
|
||||
c.setProcessName("syncing volume claims")
|
||||
|
||||
needsResizing, err := c.volumeClaimsNeedResizing(c.Spec.Volume)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare size of the volume claims: %v", err)
|
||||
}
|
||||
|
||||
if !needsResizing {
|
||||
c.logger.Infof("volume claims do not require changes")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil {
|
||||
return fmt.Errorf("could not sync volume claims: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("volume claims have been synced successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset.
|
||||
func (c *Cluster) syncEbsVolumes() error {
|
||||
c.setProcessName("syncing EBS and Claims volumes")
|
||||
|
||||
act, err := c.volumesNeedResizing()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare size of the volumes: %v", err)
|
||||
}
|
||||
if !act {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.resizeVolumes(); err != nil {
|
||||
return fmt.Errorf("could not sync volumes: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("volumes have been synced successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, error) {
|
||||
ns := c.Namespace
|
||||
listOptions := metav1.ListOptions{
|
||||
|
|
@ -125,15 +327,16 @@ func (c *Cluster) resizeVolumes() error {
|
|||
|
||||
c.setProcessName("resizing EBS volumes")
|
||||
|
||||
resizer := c.VolumeResizer
|
||||
var totalIncompatible int
|
||||
|
||||
newQuantity, err := resource.ParseQuantity(c.Spec.Volume.Size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse volume size: %v", err)
|
||||
}
|
||||
|
||||
pvs, newSize, err := c.listVolumesWithManifestSize(c.Spec.Volume)
|
||||
newSize := quantityToGigabyte(newQuantity)
|
||||
resizer := c.VolumeResizer
|
||||
var totalIncompatible int
|
||||
|
||||
pvs, err := c.listPersistentVolumes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list persistent volumes: %v", err)
|
||||
}
|
||||
|
|
@ -214,33 +417,23 @@ func (c *Cluster) volumeClaimsNeedResizing(newVolume acidv1.Volume) (bool, error
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) {
|
||||
vols, manifestSize, err := c.listVolumesWithManifestSize(newVolume)
|
||||
func (c *Cluster) volumesNeedResizing() (bool, error) {
|
||||
newQuantity, _ := resource.ParseQuantity(c.Spec.Volume.Size)
|
||||
newSize := quantityToGigabyte(newQuantity)
|
||||
|
||||
vols, err := c.listPersistentVolumes()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, pv := range vols {
|
||||
currentSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage])
|
||||
if currentSize != manifestSize {
|
||||
if currentSize != newSize {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) listVolumesWithManifestSize(newVolume acidv1.Volume) ([]*v1.PersistentVolume, int64, error) {
|
||||
newSize, err := resource.ParseQuantity(newVolume.Size)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("could not parse volume size from the manifest: %v", err)
|
||||
}
|
||||
manifestSize := quantityToGigabyte(newSize)
|
||||
vols, err := c.listPersistentVolumes()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("could not list persistent volumes: %v", err)
|
||||
}
|
||||
return vols, manifestSize, nil
|
||||
}
|
||||
|
||||
// getPodNameFromPersistentVolume returns a pod name that it extracts from the volume claim ref.
|
||||
func getPodNameFromPersistentVolume(pv *v1.PersistentVolume) *spec.NamespacedName {
|
||||
namespace := pv.Spec.ClaimRef.Namespace
|
||||
|
|
@ -258,7 +451,7 @@ func (c *Cluster) executeEBSMigration() error {
|
|||
}
|
||||
c.logger.Infof("starting EBS gp2 to gp3 migration")
|
||||
|
||||
pvs, _, err := c.listVolumesWithManifestSize(c.Spec.Volume)
|
||||
pvs, err := c.listPersistentVolumes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list persistent volumes: %v", err)
|
||||
}
|
||||
|
|
@ -294,10 +487,13 @@ func (c *Cluster) executeEBSMigration() error {
|
|||
return err
|
||||
}
|
||||
|
||||
var i3000 int64 = 3000
|
||||
var i125 int64 = 125
|
||||
|
||||
for _, volume := range awsVolumes {
|
||||
if volume.VolumeType == "gp2" && volume.Size < c.OpConfig.EnableEBSGp3MigrationMaxSize {
|
||||
c.logger.Infof("modifying EBS volume %s to type gp3 migration (%d)", volume.VolumeID, volume.Size)
|
||||
err = c.VolumeResizer.ModifyVolume(volume.VolumeID, "gp3", volume.Size, 3000, 125)
|
||||
err = c.VolumeResizer.ModifyVolume(volume.VolumeID, aws.String("gp3"), &volume.Size, &i3000, &i125)
|
||||
if nil != err {
|
||||
c.logger.Warningf("modifying volume %s failed: %v", volume.VolumeID, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,9 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/golang/mock/gomock"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zalando/postgres-operator/mocks"
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
|
|
@ -187,60 +189,16 @@ func TestMigrateEBS(t *testing.T) {
|
|||
cluster.Namespace = namespace
|
||||
filterLabels := cluster.labelsSet(false)
|
||||
|
||||
pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi")
|
||||
|
||||
ps := v1.PersistentVolumeSpec{}
|
||||
ps.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{}
|
||||
ps.AWSElasticBlockStore.VolumeID = "aws://eu-central-1b/ebs-volume-1"
|
||||
|
||||
ps2 := v1.PersistentVolumeSpec{}
|
||||
ps2.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{}
|
||||
ps2.AWSElasticBlockStore.VolumeID = "aws://eu-central-1b/ebs-volume-2"
|
||||
|
||||
pvList := &v1.PersistentVolumeList{
|
||||
Items: []v1.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "persistent-volume-0",
|
||||
},
|
||||
Spec: ps,
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "persistent-volume-1",
|
||||
},
|
||||
Spec: ps2,
|
||||
},
|
||||
testVolumes := []testVolume{
|
||||
{
|
||||
size: 100,
|
||||
},
|
||||
{
|
||||
size: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, pvc := range pvcList.Items {
|
||||
cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
for _, pv := range pvList.Items {
|
||||
cluster.KubeClient.PersistentVolumes().Create(context.TODO(), &pv, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName + "-0",
|
||||
Labels: filterLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||
|
||||
pod = v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName + "-1",
|
||||
Labels: filterLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
|
@ -256,8 +214,251 @@ func TestMigrateEBS(t *testing.T) {
|
|||
{VolumeID: "ebs-volume-2", VolumeType: "gp3", Size: 100}}, nil)
|
||||
|
||||
// expect only gp2 volume to be modified
|
||||
resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq("gp3"), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
|
||||
resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
|
||||
|
||||
cluster.VolumeResizer = resizer
|
||||
cluster.executeEBSMigration()
|
||||
}
|
||||
|
||||
type testVolume struct {
|
||||
iops int64
|
||||
throughtput int64
|
||||
size int64
|
||||
volType string
|
||||
}
|
||||
|
||||
func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustername string, labels labels.Set, volumes []testVolume) {
|
||||
i := 0
|
||||
for _, v := range volumes {
|
||||
storage1Gi, _ := resource.ParseQuantity(fmt.Sprintf("%d", v.size))
|
||||
|
||||
ps := v1.PersistentVolumeSpec{}
|
||||
ps.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{}
|
||||
ps.AWSElasticBlockStore.VolumeID = fmt.Sprintf("aws://eu-central-1b/ebs-volume-%d", i+1)
|
||||
|
||||
pv := v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("persistent-volume-%d", i),
|
||||
},
|
||||
Spec: ps,
|
||||
}
|
||||
|
||||
client.PersistentVolumes().Create(context.TODO(), &pv, metav1.CreateOptions{})
|
||||
|
||||
pvc := v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s-%d", constants.DataVolumeName, clustername, i),
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: storage1Gi,
|
||||
},
|
||||
},
|
||||
VolumeName: fmt.Sprintf("persistent-volume-%d", i),
|
||||
},
|
||||
}
|
||||
|
||||
client.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{})
|
||||
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", clustername, i),
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
client.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
|
||||
|
||||
i = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
func TestMigrateGp3Support(t *testing.T) {
|
||||
client, _ := newFakeK8sPVCclient()
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
|
||||
// new cluster with pvc storage resize mode and configured labels
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
},
|
||||
StorageResizeMode: "mixed",
|
||||
EnableEBSGp3Migration: false,
|
||||
EnableEBSGp3MigrationMaxSize: 1000,
|
||||
},
|
||||
}, client, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
||||
cluster.Spec.Volume.Size = "150Gi"
|
||||
cluster.Spec.Volume.Iops = aws.Int64(6000)
|
||||
cluster.Spec.Volume.Throughput = aws.Int64(275)
|
||||
|
||||
// set metadata, so that labels will get correct values
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
filterLabels := cluster.labelsSet(false)
|
||||
|
||||
testVolumes := []testVolume{
|
||||
{
|
||||
size: 100,
|
||||
},
|
||||
{
|
||||
size: 100,
|
||||
},
|
||||
{
|
||||
size: 100,
|
||||
},
|
||||
}
|
||||
|
||||
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
resizer := mocks.NewMockVolumeResizer(ctrl)
|
||||
|
||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil)
|
||||
|
||||
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return(
|
||||
[]volumes.VolumeProperties{
|
||||
{VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000},
|
||||
{VolumeID: "ebs-volume-2", VolumeType: "gp3", Size: 105, Iops: 4000},
|
||||
{VolumeID: "ebs-volume-3", VolumeType: "gp3", Size: 151, Iops: 6000, Throughput: 275}}, nil)
|
||||
|
||||
// expect only gp2 volume to be modified
|
||||
resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Eq(aws.Int64(150)), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil)
|
||||
resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Eq(aws.String("gp3")), gomock.Eq(aws.Int64(150)), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil)
|
||||
// resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-3"), gomock.Eq(aws.String("gp3")), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
|
||||
|
||||
cluster.VolumeResizer = resizer
|
||||
cluster.syncVolumes()
|
||||
}
|
||||
|
||||
func TestManualGp2Gp3Support(t *testing.T) {
|
||||
client, _ := newFakeK8sPVCclient()
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
|
||||
// new cluster with pvc storage resize mode and configured labels
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
},
|
||||
StorageResizeMode: "mixed",
|
||||
EnableEBSGp3Migration: false,
|
||||
EnableEBSGp3MigrationMaxSize: 1000,
|
||||
},
|
||||
}, client, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
||||
cluster.Spec.Volume.Size = "150Gi"
|
||||
cluster.Spec.Volume.Iops = aws.Int64(6000)
|
||||
cluster.Spec.Volume.Throughput = aws.Int64(275)
|
||||
|
||||
// set metadata, so that labels will get correct values
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
filterLabels := cluster.labelsSet(false)
|
||||
|
||||
testVolumes := []testVolume{
|
||||
{
|
||||
size: 100,
|
||||
},
|
||||
{
|
||||
size: 100,
|
||||
},
|
||||
}
|
||||
|
||||
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
resizer := mocks.NewMockVolumeResizer(ctrl)
|
||||
|
||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
||||
|
||||
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
|
||||
[]volumes.VolumeProperties{
|
||||
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},
|
||||
{VolumeID: "ebs-volume-2", VolumeType: "gp2", Size: 150, Iops: 4000},
|
||||
}, nil)
|
||||
|
||||
// expect only gp2 volume to be modified
|
||||
resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Nil(), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil)
|
||||
resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Eq(aws.String("gp3")), gomock.Nil(), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil)
|
||||
|
||||
cluster.VolumeResizer = resizer
|
||||
cluster.syncVolumes()
|
||||
}
|
||||
|
||||
func TestDontTouchType(t *testing.T) {
|
||||
client, _ := newFakeK8sPVCclient()
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
|
||||
// new cluster with pvc storage resize mode and configured labels
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: "cluster-name",
|
||||
},
|
||||
StorageResizeMode: "mixed",
|
||||
EnableEBSGp3Migration: false,
|
||||
EnableEBSGp3MigrationMaxSize: 1000,
|
||||
},
|
||||
}, client, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
||||
cluster.Spec.Volume.Size = "177Gi"
|
||||
|
||||
// set metadata, so that labels will get correct values
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
filterLabels := cluster.labelsSet(false)
|
||||
|
||||
testVolumes := []testVolume{
|
||||
{
|
||||
size: 150,
|
||||
},
|
||||
{
|
||||
size: 150,
|
||||
},
|
||||
}
|
||||
|
||||
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
resizer := mocks.NewMockVolumeResizer(ctrl)
|
||||
|
||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
|
||||
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
|
||||
|
||||
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
|
||||
[]volumes.VolumeProperties{
|
||||
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},
|
||||
{VolumeID: "ebs-volume-2", VolumeType: "gp2", Size: 150, Iops: 4000},
|
||||
}, nil)
|
||||
|
||||
// expect only gp2 volume to be modified
|
||||
resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Nil(), gomock.Eq(aws.Int64(177)), gomock.Nil(), gomock.Nil()).Return(nil)
|
||||
resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Nil(), gomock.Eq(aws.Int64(177)), gomock.Nil(), gomock.Nil()).Return(nil)
|
||||
|
||||
cluster.VolumeResizer = resizer
|
||||
cluster.syncVolumes()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-12:1.6-p3")
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p2")
|
||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
result.MaxInstances = fromCRD.MaxInstances
|
||||
|
|
@ -66,11 +66,12 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
||||
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
||||
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
||||
result.AdditionalPodCapabilities = fromCRD.Kubernetes.AdditionalPodCapabilities
|
||||
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
||||
result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True())
|
||||
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "ebs")
|
||||
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc")
|
||||
result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True())
|
||||
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
|
||||
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
||||
|
|
@ -141,11 +142,11 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount
|
||||
result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials")
|
||||
result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration
|
||||
result.EnableEBSGp3MigrationMaxSize = fromCRD.AWSGCP.EnableEBSGp3MigrationMaxSize
|
||||
result.EnableEBSGp3MigrationMaxSize = util.CoalesceInt64(fromCRD.AWSGCP.EnableEBSGp3MigrationMaxSize, 1000)
|
||||
|
||||
// logical backup config
|
||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.6.0")
|
||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
|
||||
result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region
|
||||
|
|
@ -154,6 +155,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey
|
||||
result.LogicalBackupS3SSE = fromCRD.LogicalBackup.S3SSE
|
||||
result.LogicalBackupGoogleApplicationCredentials = fromCRD.LogicalBackup.GoogleApplicationCredentials
|
||||
result.LogicalBackupJobPrefix = util.Coalesce(fromCRD.LogicalBackup.JobPrefix, "logical-backup-")
|
||||
|
||||
// debug config
|
||||
result.DebugLogging = fromCRD.OperatorDebug.DebugLogging
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2020 Compose, Zalando SE
|
||||
Copyright 2021 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -23,38 +23,39 @@ type CRD struct {
|
|||
|
||||
// Resources describes kubernetes resource specific configuration parameters
|
||||
type Resources struct {
|
||||
ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"`
|
||||
ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"`
|
||||
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
||||
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
||||
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
|
||||
InheritedLabels []string `name:"inherited_labels" default:""`
|
||||
InheritedAnnotations []string `name:"inherited_annotations" default:""`
|
||||
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
||||
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
||||
DeleteAnnotationDateKey string `name:"delete_annotation_date_key"`
|
||||
DeleteAnnotationNameKey string `name:"delete_annotation_name_key"`
|
||||
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
|
||||
PodToleration map[string]string `name:"toleration" default:""`
|
||||
DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
|
||||
DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"`
|
||||
DefaultCPULimit string `name:"default_cpu_limit" default:"1"`
|
||||
DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"`
|
||||
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
||||
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
||||
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
||||
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
||||
ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"`
|
||||
ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"`
|
||||
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
||||
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
SpiloRunAsUser *int64 `name:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `name:"spilo_runasgroup,omitempty"`
|
||||
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
||||
AdditionalPodCapabilities []string `name:"additional_pod_capabilities" default:""`
|
||||
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
|
||||
InheritedLabels []string `name:"inherited_labels" default:""`
|
||||
InheritedAnnotations []string `name:"inherited_annotations" default:""`
|
||||
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
||||
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
||||
DeleteAnnotationDateKey string `name:"delete_annotation_date_key"`
|
||||
DeleteAnnotationNameKey string `name:"delete_annotation_name_key"`
|
||||
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
|
||||
PodToleration map[string]string `name:"toleration" default:""`
|
||||
DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
|
||||
DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"`
|
||||
DefaultCPULimit string `name:"default_cpu_limit" default:"1"`
|
||||
DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"`
|
||||
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
||||
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
||||
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
||||
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
||||
}
|
||||
|
||||
type InfrastructureRole struct {
|
||||
|
|
@ -112,7 +113,7 @@ type Scalyr struct {
|
|||
// LogicalBackup defines configuration for logical backup
|
||||
type LogicalBackup struct {
|
||||
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.6.0"`
|
||||
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
|
||||
LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""`
|
||||
LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""`
|
||||
|
|
@ -121,6 +122,7 @@ type LogicalBackup struct {
|
|||
LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""`
|
||||
LogicalBackupS3SSE string `name:"logical_backup_s3_sse" default:""`
|
||||
LogicalBackupGoogleApplicationCredentials string `name:"logical_backup_google_application_credentials" default:""`
|
||||
LogicalBackupJobPrefix string `name:"logical_backup_job_prefix" default:"logical-backup-"`
|
||||
}
|
||||
|
||||
// Operator options for connection pooler
|
||||
|
|
@ -149,7 +151,7 @@ type Config struct {
|
|||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p3"`
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p2"`
|
||||
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
||||
SidecarContainers []v1.Container `name:"sidecars"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
|
|
@ -182,7 +184,7 @@ type Config struct {
|
|||
CustomPodAnnotations map[string]string `name:"custom_pod_annotations"`
|
||||
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`
|
||||
PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"`
|
||||
StorageResizeMode string `name:"storage_resize_mode" default:"ebs"`
|
||||
StorageResizeMode string `name:"storage_resize_mode" default:"pvc"`
|
||||
EnableLoadBalancer *bool `name:"enable_load_balancer"` // deprecated and kept for backward compatibility
|
||||
ExternalTrafficPolicy string `name:"external_traffic_policy" default:"Cluster"`
|
||||
MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"`
|
||||
|
|
@ -202,7 +204,7 @@ type Config struct {
|
|||
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
|
||||
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
|
||||
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
|
||||
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"false"`
|
||||
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"`
|
||||
EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"`
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -271,6 +271,14 @@ func CoalesceUInt32(val, defaultVal uint32) uint32 {
|
|||
return val
|
||||
}
|
||||
|
||||
// CoalesceInt64 works like coalesce but for int64
|
||||
func CoalesceInt64(val, defaultVal int64) int64 {
|
||||
if val == 0 {
|
||||
return defaultVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// CoalesceBool works like coalesce but for *bool
|
||||
func CoalesceBool(val, defaultVal *bool) *bool {
|
||||
if val == nil {
|
||||
|
|
|
|||
|
|
@ -141,18 +141,9 @@ func (r *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error {
|
|||
}
|
||||
|
||||
// ModifyVolume Modify EBS volume
|
||||
func (r *EBSVolumeResizer) ModifyVolume(volumeID string, newType string, newSize int64, iops int64, throughput int64) error {
|
||||
func (r *EBSVolumeResizer) ModifyVolume(volumeID string, newType *string, newSize *int64, iops *int64, throughput *int64) error {
|
||||
/* first check if the volume is already of a requested size */
|
||||
volumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get information about the volume: %v", err)
|
||||
}
|
||||
vol := volumeOutput.Volumes[0]
|
||||
if *vol.VolumeId != volumeID {
|
||||
return fmt.Errorf("describe volume %q returned information about a non-matching volume %q", volumeID, *vol.VolumeId)
|
||||
}
|
||||
|
||||
input := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID, VolumeType: &newType, Iops: &iops, Throughput: &throughput}
|
||||
input := ec2.ModifyVolumeInput{Size: newSize, VolumeId: &volumeID, VolumeType: newType, Iops: iops, Throughput: throughput}
|
||||
output, err := r.connection.ModifyVolume(&input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not modify persistent volume: %v", err)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ type VolumeResizer interface {
|
|||
GetProviderVolumeID(pv *v1.PersistentVolume) (string, error)
|
||||
ExtractVolumeID(volumeID string) (string, error)
|
||||
ResizeVolume(providerVolumeID string, newSize int64) error
|
||||
ModifyVolume(providerVolumeID string, newType string, newSize int64, iops int64, throughput int64) error
|
||||
ModifyVolume(providerVolumeID string, newType *string, newSize *int64, iops *int64, throughput *int64) error
|
||||
DisconnectFromProvider() error
|
||||
DescribeVolumes(providerVolumesID []string) ([]VolumeProperties, error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
FROM alpine:3.6
|
||||
MAINTAINER team-acid@zalando.de
|
||||
FROM registry.opensource.zalan.do/library/alpine-3.12:latest
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
EXPOSE 8081
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue