Merge branch 'delete-volume-on-scale-down' of github.com:zalando/postgres-operator into delete-volume-on-scale-down
This commit is contained in:
commit
6f631c0a83
|
|
@ -17,8 +17,9 @@ pipelines with no access to Kubernetes directly.
|
|||
|
||||
* Rolling updates on Postgres cluster changes
|
||||
* Volume resize without Pod restarts
|
||||
* Database connection pooler
|
||||
* Cloning Postgres clusters
|
||||
* Logical Backups to S3 Bucket
|
||||
* Logical backups to S3 Bucket
|
||||
* Standby cluster from S3 WAL archive
|
||||
* Configurable for non-cloud environments
|
||||
* UI to create and edit Postgres cluster manifests
|
||||
|
|
|
|||
|
|
@ -68,6 +68,8 @@ spec:
|
|||
type: boolean
|
||||
etcd_host:
|
||||
type: string
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
max_instances:
|
||||
type: integer
|
||||
minimum: -1 # -1 = disabled
|
||||
|
|
@ -320,44 +322,44 @@ spec:
|
|||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
scalyr_server_url:
|
||||
type: string
|
||||
connection_pool:
|
||||
connection_pooler:
|
||||
type: object
|
||||
properties:
|
||||
connection_pool_schema:
|
||||
connection_pooler_schema:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
connection_pool_user:
|
||||
connection_pooler_user:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
connection_pool_image:
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
#default: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
connection_pool_max_db_connections:
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
#default: 60
|
||||
connection_pool_mode:
|
||||
connection_pooler_mode:
|
||||
type: string
|
||||
enum:
|
||||
- "session"
|
||||
- "transaction"
|
||||
#default: "transaction"
|
||||
connection_pool_number_of_instances:
|
||||
connection_pooler_number_of_instances:
|
||||
type: integer
|
||||
minimum: 2
|
||||
#default: 2
|
||||
connection_pool_default_cpu_limit:
|
||||
connection_pooler_default_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "1"
|
||||
connection_pool_default_cpu_request:
|
||||
connection_pooler_default_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "500m"
|
||||
connection_pool_default_memory_limit:
|
||||
connection_pooler_default_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
connection_pool_default_memory_request:
|
||||
connection_pooler_default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ spec:
|
|||
uid:
|
||||
format: uuid
|
||||
type: string
|
||||
connectionPool:
|
||||
connectionPooler:
|
||||
type: object
|
||||
properties:
|
||||
dockerImage:
|
||||
|
|
@ -162,7 +162,7 @@ spec:
|
|||
# Note: usernames specified here as database owners must be declared in the users key of the spec key.
|
||||
dockerImage:
|
||||
type: string
|
||||
enableConnectionPool:
|
||||
enableConnectionPooler:
|
||||
type: boolean
|
||||
enableLogicalBackup:
|
||||
type: boolean
|
||||
|
|
@ -218,6 +218,10 @@ spec:
|
|||
type: integer
|
||||
retry_timeout:
|
||||
type: integer
|
||||
synchronous_mode:
|
||||
type: boolean
|
||||
synchronous_mode_strict:
|
||||
type: boolean
|
||||
maximum_lag_on_failover:
|
||||
type: integer
|
||||
podAnnotations:
|
||||
|
|
|
|||
|
|
@ -20,5 +20,5 @@ data:
|
|||
{{ toYaml .Values.configDebug | indent 2 }}
|
||||
{{ toYaml .Values.configLoggingRestApi | indent 2 }}
|
||||
{{ toYaml .Values.configTeamsApi | indent 2 }}
|
||||
{{ toYaml .Values.configConnectionPool | indent 2 }}
|
||||
{{ toYaml .Values.configConnectionPooler | indent 2 }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,6 @@ configuration:
|
|||
{{ toYaml .Values.configLoggingRestApi | indent 4 }}
|
||||
scalyr:
|
||||
{{ toYaml .Values.configScalyr | indent 4 }}
|
||||
connection_pool:
|
||||
{{ toYaml .Values.configConnectionPool | indent 4 }}
|
||||
connection_pooler:
|
||||
{{ toYaml .Values.configConnectionPooler | indent 4 }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ configGeneral:
|
|||
enable_unused_pvc_deletion: false
|
||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -269,24 +271,24 @@ configScalyr:
|
|||
# Memory request value for the Scalyr sidecar
|
||||
scalyr_memory_request: 50Mi
|
||||
|
||||
configConnectionPool:
|
||||
configConnectionPooler:
|
||||
# db schema to install lookup function into
|
||||
connection_pool_schema: "pooler"
|
||||
connection_pooler_schema: "pooler"
|
||||
# db user for pooler to use
|
||||
connection_pool_user: "pooler"
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pool_image: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
# max db connections the pooler should hold
|
||||
connection_pool_max_db_connections: 60
|
||||
connection_pooler_max_db_connections: 60
|
||||
# default pooling mode
|
||||
connection_pool_mode: "transaction"
|
||||
connection_pooler_mode: "transaction"
|
||||
# number of pooler instances
|
||||
connection_pool_number_of_instances: 2
|
||||
connection_pooler_number_of_instances: 2
|
||||
# default resources
|
||||
connection_pool_default_cpu_request: 500m
|
||||
connection_pool_default_memory_request: 100Mi
|
||||
connection_pool_default_cpu_limit: "1"
|
||||
connection_pool_default_memory_limit: 100Mi
|
||||
connection_pooler_default_cpu_request: 500m
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_default_cpu_limit: "1"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ configGeneral:
|
|||
enable_unused_pvc_deletion: "false"
|
||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -246,24 +248,24 @@ configTeamsApi:
|
|||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||
|
||||
# configure connection pooler deployment created by the operator
|
||||
configConnectionPool:
|
||||
configConnectionPooler:
|
||||
# db schema to install lookup function into
|
||||
connection_pool_schema: "pooler"
|
||||
connection_pooler_schema: "pooler"
|
||||
# db user for pooler to use
|
||||
connection_pool_user: "pooler"
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pool_image: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
# max db connections the pooler should hold
|
||||
connection_pool_max_db_connections: 60
|
||||
connection_pooler_max_db_connections: 60
|
||||
# default pooling mode
|
||||
connection_pool_mode: "transaction"
|
||||
connection_pooler_mode: "transaction"
|
||||
# number of pooler instances
|
||||
connection_pool_number_of_instances: 2
|
||||
connection_pooler_number_of_instances: 2
|
||||
# default resources
|
||||
connection_pool_default_cpu_request: 500m
|
||||
connection_pool_default_memory_request: 100Mi
|
||||
connection_pool_default_cpu_limit: "1"
|
||||
connection_pool_default_memory_limit: 100Mi
|
||||
connection_pooler_default_cpu_request: 500m
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_default_cpu_limit: "1"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
|
|
|
|||
|
|
@ -140,10 +140,10 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
is `false`, then no volume will be mounted no matter how operator was
|
||||
configured (so you can override the operator configuration). Optional.
|
||||
|
||||
* **enableConnectionPool**
|
||||
Tells the operator to create a connection pool with a database. If this
|
||||
field is true, a connection pool deployment will be created even if
|
||||
`connectionPool` section is empty. Optional, not set by default.
|
||||
* **enableConnectionPooler**
|
||||
Tells the operator to create a connection pooler with a database. If this
|
||||
field is true, a connection pooler deployment will be created even if
|
||||
`connectionPooler` section is empty. Optional, not set by default.
|
||||
|
||||
* **enableLogicalBackup**
|
||||
Determines if the logical backup of this cluster should be taken and uploaded
|
||||
|
|
@ -217,6 +217,12 @@ explanation of `ttl` and `loop_wait` parameters.
|
|||
automatically created by Patroni for cluster members and permanent replication
|
||||
slots. Optional.
|
||||
|
||||
* **synchronous_mode**
|
||||
Patroni `synchronous_mode` parameter value. The default is set to `false`. Optional.
|
||||
|
||||
* **synchronous_mode_strict**
|
||||
Patroni `synchronous_mode_strict` parameter value. Can be used in addition to `synchronous_mode`. The default is set to `false`. Optional.
|
||||
|
||||
## Postgres container resources
|
||||
|
||||
Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
|
||||
|
|
@ -365,34 +371,35 @@ CPU and memory limits for the sidecar container.
|
|||
memory limits for the sidecar container. Optional, overrides the
|
||||
`default_memory_limits` operator configuration parameter. Optional.
|
||||
|
||||
## Connection pool
|
||||
## Connection pooler
|
||||
|
||||
Parameters are grouped under the `connectionPool` top-level key and specify
|
||||
configuration for connection pool. If this section is not empty, a connection
|
||||
pool will be created for a database even if `enableConnectionPool` is not
|
||||
Parameters are grouped under the `connectionPooler` top-level key and specify
|
||||
configuration for connection pooler. If this section is not empty, a connection
|
||||
pooler will be created for a database even if `enableConnectionPooler` is not
|
||||
present.
|
||||
|
||||
* **numberOfInstances**
|
||||
How many instances of connection pool to create.
|
||||
How many instances of connection pooler to create.
|
||||
|
||||
* **schema**
|
||||
Schema to create for credentials lookup function.
|
||||
Database schema to create for credentials lookup function.
|
||||
|
||||
* **user**
|
||||
User to create for connection pool to be able to connect to a database.
|
||||
User to create for connection pooler to be able to connect to a database.
|
||||
You can also choose a role from the `users` section or a system user role.
|
||||
|
||||
* **dockerImage**
|
||||
Which docker image to use for connection pool deployment.
|
||||
Which docker image to use for connection pooler deployment.
|
||||
|
||||
* **maxDBConnections**
|
||||
How many connections the pooler can max hold. This value is divided among the
|
||||
pooler pods.
|
||||
|
||||
* **mode**
|
||||
In which mode to run connection pool, transaction or session.
|
||||
In which mode to run connection pooler, transaction or session.
|
||||
|
||||
* **resources**
|
||||
Resource configuration for connection pool deployment.
|
||||
Resource configuration for connection pooler deployment.
|
||||
|
||||
## Custom TLS certificates
|
||||
|
||||
|
|
|
|||
|
|
@ -80,6 +80,12 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
Patroni native Kubernetes support is used. The default is empty (use
|
||||
Kubernetes-native DCS).
|
||||
|
||||
* **kubernetes_use_configmaps**
|
||||
Select if setup uses endpoints (default), or configmaps to manage leader when
|
||||
DCS is kubernetes (not etcd or similar). In OpenShift it is not possible to
|
||||
use endpoints option, and configmaps is required. By default,
|
||||
`kubernetes_use_configmaps: false`, meaning endpoints will be used.
|
||||
|
||||
* **docker_image**
|
||||
Spilo Docker image for Postgres instances. For production, don't rely on the
|
||||
default image, as it might be not the most up-to-date one. Instead, build
|
||||
|
|
@ -132,7 +138,7 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
The default is `false`.
|
||||
|
||||
* **enable_unused_pvc_deletion**
|
||||
Tells the operator to delete persistent volume claims of no longer running pods. That removes respective persistent volumes because operator configures them with the 'Delete' reclaim policy. Note operator deletes unused PVCs for clusters created both before and after this option is turned on. Deletion is not guaranteed: When it fails, operator retries at next Sync() event.
|
||||
Tells the operator to delete persistent volume claims of no longer running pods. That removes respective persistent volumes because the operator configures them with the 'Delete' reclaim policy. The operator deletes unused PVCs for clusters created both before and after this option is turned on. Deletion is not guaranteed: When it fails, operator retries at the next Sync() event.
|
||||
The default is `false`.
|
||||
|
||||
## Postgres users
|
||||
|
|
@ -601,39 +607,42 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the
|
|||
* **scalyr_memory_limit**
|
||||
Memory limit value for the Scalyr sidecar. The default is `500Mi`.
|
||||
|
||||
## Connection pool configuration
|
||||
## Connection pooler configuration
|
||||
|
||||
Parameters are grouped under the `connection_pool` top-level key and specify
|
||||
default configuration for connection pool, if a postgres manifest requests it
|
||||
Parameters are grouped under the `connection_pooler` top-level key and specify
|
||||
default configuration for connection pooler, if a postgres manifest requests it
|
||||
but do not specify some of the parameters. All of them are optional with the
|
||||
operator being able to provide some reasonable defaults.
|
||||
|
||||
* **connection_pool_number_of_instances**
|
||||
How many instances of connection pool to create. Default is 2 which is also
|
||||
* **connection_pooler_number_of_instances**
|
||||
How many instances of connection pooler to create. Default is 2 which is also
|
||||
the required minimum.
|
||||
|
||||
* **connection_pool_schema**
|
||||
Schema to create for credentials lookup function. Default is `pooler`.
|
||||
* **connection_pooler_schema**
|
||||
Database schema to create for credentials lookup function to be used by the
|
||||
connection pooler. Is is created in every database of the Postgres cluster.
|
||||
You can also choose an existing schema. Default schema is `pooler`.
|
||||
|
||||
* **connection_pool_user**
|
||||
User to create for connection pool to be able to connect to a database.
|
||||
Default is `pooler`.
|
||||
* **connection_pooler_user**
|
||||
User to create for connection pooler to be able to connect to a database.
|
||||
You can also choose an existing role, but make sure it has the `LOGIN`
|
||||
privilege. Default role is `pooler`.
|
||||
|
||||
* **connection_pool_image**
|
||||
Docker image to use for connection pool deployment.
|
||||
* **connection_pooler_image**
|
||||
Docker image to use for connection pooler deployment.
|
||||
Default: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
|
||||
* **connection_pool_max_db_connections**
|
||||
* **connection_pooler_max_db_connections**
|
||||
How many connections the pooler can max hold. This value is divided among the
|
||||
pooler pods. Default is 60 which will make up 30 connections per pod for the
|
||||
default setup with two instances.
|
||||
|
||||
* **connection_pool_mode**
|
||||
Default pool mode, `session` or `transaction`. Default is `transaction`.
|
||||
* **connection_pooler_mode**
|
||||
Default pooler mode, `session` or `transaction`. Default is `transaction`.
|
||||
|
||||
* **connection_pool_default_cpu_request**
|
||||
**connection_pool_default_memory_reques**
|
||||
**connection_pool_default_cpu_limit**
|
||||
**connection_pool_default_memory_limit**
|
||||
Default resource configuration for connection pool deployment. The internal
|
||||
* **connection_pooler_default_cpu_request**
|
||||
**connection_pooler_default_memory_reques**
|
||||
**connection_pooler_default_cpu_limit**
|
||||
**connection_pooler_default_memory_limit**
|
||||
Default resource configuration for connection pooler deployment. The internal
|
||||
default for memory request and limit is `100Mi`, for CPU it is `500m` and `1`.
|
||||
|
|
|
|||
61
docs/user.md
61
docs/user.md
|
|
@ -512,39 +512,39 @@ monitoring is outside the scope of operator responsibilities. See
|
|||
[administrator documentation](administrator.md) for details on how backups are
|
||||
executed.
|
||||
|
||||
## Connection pool
|
||||
## Connection pooler
|
||||
|
||||
The operator can create a database side connection pool for those applications,
|
||||
where an application side pool is not feasible, but a number of connections is
|
||||
high. To create a connection pool together with a database, modify the
|
||||
The operator can create a database side connection pooler for those applications
|
||||
where an application side pooler is not feasible, but a number of connections is
|
||||
high. To create a connection pooler together with a database, modify the
|
||||
manifest:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
enableConnectionPool: true
|
||||
enableConnectionPooler: true
|
||||
```
|
||||
|
||||
This will tell the operator to create a connection pool with default
|
||||
This will tell the operator to create a connection pooler with default
|
||||
configuration, through which one can access the master via a separate service
|
||||
`{cluster-name}-pooler`. In most of the cases provided default configuration
|
||||
should be good enough.
|
||||
|
||||
To configure a new connection pool, specify:
|
||||
`{cluster-name}-pooler`. In most of the cases the
|
||||
[default configuration](reference/operator_parameters.md#connection-pooler-configuration)
|
||||
should be good enough. To configure a new connection pooler individually for
|
||||
each Postgres cluster, specify:
|
||||
|
||||
```
|
||||
spec:
|
||||
connectionPool:
|
||||
# how many instances of connection pool to create
|
||||
number_of_instances: 2
|
||||
connectionPooler:
|
||||
# how many instances of connection pooler to create
|
||||
numberOfInstances: 2
|
||||
|
||||
# in which mode to run, session or transaction
|
||||
mode: "transaction"
|
||||
|
||||
# schema, which operator will create to install credentials lookup
|
||||
# function
|
||||
# schema, which operator will create in each database
|
||||
# to install credentials lookup function for connection pooler
|
||||
schema: "pooler"
|
||||
|
||||
# user, which operator will create for connection pool
|
||||
# user, which operator will create for connection pooler
|
||||
user: "pooler"
|
||||
|
||||
# resources for each instance
|
||||
|
|
@ -557,13 +557,17 @@ spec:
|
|||
memory: 100Mi
|
||||
```
|
||||
|
||||
By default `pgbouncer` is used to create a connection pool. To find out about
|
||||
pool modes see [docs](https://www.pgbouncer.org/config.html#pool_mode) (but it
|
||||
should be general approach between different implementation).
|
||||
The `enableConnectionPooler` flag is not required when the `connectionPooler`
|
||||
section is present in the manifest. But, it can be used to disable/remove the
|
||||
pooler while keeping its configuration.
|
||||
|
||||
Note, that using `pgbouncer` means meaningful resource CPU limit should be less
|
||||
than 1 core (there is a way to utilize more than one, but in K8S it's easier
|
||||
just to spin up more instances).
|
||||
By default, [`PgBouncer`](https://www.pgbouncer.org/) is used as connection pooler.
|
||||
To find out about pool modes read the `PgBouncer` [docs](https://www.pgbouncer.org/config.html#pooler_mode)
|
||||
(but it should be the general approach between different implementation).
|
||||
|
||||
Note, that using `PgBouncer` a meaningful resource CPU limit should be 1 core
|
||||
or less (there is a way to utilize more than one, but in K8s it's easier just to
|
||||
spin up more instances).
|
||||
|
||||
## Custom TLS certificates
|
||||
|
||||
|
|
@ -572,10 +576,15 @@ However, this certificate cannot be verified and thus doesn't protect from
|
|||
active MITM attacks. In this section we show how to specify a custom TLS
|
||||
certificate which is mounted in the database pods via a K8s Secret.
|
||||
|
||||
Before applying these changes, the operator must also be configured with the
|
||||
`spilo_fsgroup` set to the GID matching the postgres user group. If the value
|
||||
is not provided, the cluster will default to `103` which is the GID from the
|
||||
default spilo image.
|
||||
Before applying these changes, in k8s the operator must also be configured with
|
||||
the `spilo_fsgroup` set to the GID matching the postgres user group. If you
|
||||
don't know the value, use `103` which is the GID from the default spilo image
|
||||
(`spilo_fsgroup=103` in the cluster request spec).
|
||||
|
||||
OpenShift allocates the users and groups dynamically (based on scc), and their
|
||||
range is different in every namespace. Due to this dynamic behaviour, it's not
|
||||
trivial to know at deploy time the uid/gid of the user in the cluster.
|
||||
This way, in OpenShift, you may want to skip the spilo_fsgroup setting.
|
||||
|
||||
Upload the cert as a kubernetes secret:
|
||||
```sh
|
||||
|
|
|
|||
|
|
@ -71,32 +71,32 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_enable_disable_connection_pool(self):
|
||||
def test_enable_disable_connection_pooler(self):
|
||||
'''
|
||||
For a database without connection pool, then turns it on, scale up,
|
||||
For a database without connection pooler, then turns it on, scale up,
|
||||
turn off and on again. Test with different ways of doing this (via
|
||||
enableConnectionPool or connectionPool configuration section). At the
|
||||
end turn the connection pool off to not interfere with other tests.
|
||||
enableConnectionPooler or connectionPooler configuration section). At
|
||||
the end turn connection pooler off to not interfere with other tests.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
service_labels = {
|
||||
'cluster-name': 'acid-minimal-cluster',
|
||||
}
|
||||
pod_labels = dict({
|
||||
'connection-pool': 'acid-minimal-cluster-pooler',
|
||||
'connection-pooler': 'acid-minimal-cluster-pooler',
|
||||
})
|
||||
|
||||
pod_selector = to_selector(pod_labels)
|
||||
service_selector = to_selector(service_labels)
|
||||
|
||||
try:
|
||||
# enable connection pool
|
||||
# enable connection pooler
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPool': True,
|
||||
'enableConnectionPooler': True,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pod_start(pod_selector)
|
||||
|
|
@ -105,7 +105,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'default', label_selector=pod_selector
|
||||
).items
|
||||
|
||||
self.assertTrue(pods, 'No connection pool pods')
|
||||
self.assertTrue(pods, 'No connection pooler pods')
|
||||
|
||||
k8s.wait_for_service(service_selector)
|
||||
services = k8s.api.core_v1.list_namespaced_service(
|
||||
|
|
@ -116,15 +116,15 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
if s.metadata.name.endswith('pooler')
|
||||
]
|
||||
|
||||
self.assertTrue(services, 'No connection pool service')
|
||||
self.assertTrue(services, 'No connection pooler service')
|
||||
|
||||
# scale up connection pool deployment
|
||||
# scale up connection pooler deployment
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'connectionPool': {
|
||||
'connectionPooler': {
|
||||
'numberOfInstances': 2,
|
||||
},
|
||||
}
|
||||
|
|
@ -138,7 +138,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPool': False,
|
||||
'enableConnectionPooler': False,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pods_to_stop(pod_selector)
|
||||
|
|
@ -148,7 +148,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPool': True,
|
||||
'enableConnectionPooler': True,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pod_start(pod_selector)
|
||||
|
|
@ -516,20 +516,11 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
self.assert_running_pods_have_volumes()
|
||||
|
||||
# Update() deletes pvc on scale down
|
||||
# we do not use wait_for_pg_to_scale here because it waits until a pod is completely gone
|
||||
# we want to capture a potential situation where a pod is in Terminating state
|
||||
# but its pvc is already being deleted
|
||||
# TODO that needs a more thourough test at the DB level
|
||||
k8s.change_number_of_instances(1)
|
||||
k8s.wait_for_pvc_deletion("pgdata-acid-minimal-cluster-1")
|
||||
|
||||
self.assert_running_pods_have_volumes()
|
||||
|
||||
# pvc with index 0 must stay around when cluster has 0 pods
|
||||
last_pvc_name = "pgdata-acid-minimal-cluster-0"
|
||||
volume_before_scaledown = k8s.get_volume_name(last_pvc_name)
|
||||
k8s.wait_for_pg_to_scale(0)
|
||||
k8s.update_config(patch) # force a Sync to delete unused PVCs
|
||||
self.assertTrue(k8s.pvc_exist(last_pvc_name), "The last pvc was deleted")
|
||||
|
||||
# sanity check
|
||||
|
|
@ -539,6 +530,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
volume_after_scaleup,
|
||||
"the surviving pvc must have the same volume before scale down to 0 and after scale up")
|
||||
|
||||
self.assert_running_pods_have_volumes()
|
||||
|
||||
# clean up
|
||||
patch = {
|
||||
"data": {
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ spec:
|
|||
- createdb
|
||||
enableMasterLoadBalancer: false
|
||||
enableReplicaLoadBalancer: false
|
||||
# enableConnectionPooler: true # not needed when connectionPooler section is present (see below)
|
||||
allowedSourceRanges: # load balancers' source ranges for both master and replica services
|
||||
- 127.0.0.1/32
|
||||
databases:
|
||||
|
|
@ -66,6 +67,8 @@ spec:
|
|||
ttl: 30
|
||||
loop_wait: &loop_wait 10
|
||||
retry_timeout: 10
|
||||
synchronous_mode: false
|
||||
synchronous_mode_strict: false
|
||||
maximum_lag_on_failover: 33554432
|
||||
|
||||
# restore a Postgres DB with point-in-time-recovery
|
||||
|
|
@ -85,6 +88,19 @@ spec:
|
|||
# - 01:00-06:00 #UTC
|
||||
# - Sat:00:00-04:00
|
||||
|
||||
connectionPooler:
|
||||
numberOfInstances: 2
|
||||
mode: "transaction"
|
||||
schema: "pooler"
|
||||
user: "pooler"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 300m
|
||||
memory: 100Mi
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 100Mi
|
||||
|
||||
initContainers:
|
||||
- name: date
|
||||
image: busybox
|
||||
|
|
@ -109,3 +125,5 @@ spec:
|
|||
certificateFile: "tls.crt"
|
||||
privateKeyFile: "tls.key"
|
||||
caFile: "" # optionally configure Postgres with a CA certificate
|
||||
# When TLS is enabled, also set spiloFSGroup parameter above to the relevant value.
|
||||
# if unknown, set it to 103 which is the usual value in the default spilo images.
|
||||
|
|
|
|||
|
|
@ -11,16 +11,16 @@ data:
|
|||
cluster_history_entries: "1000"
|
||||
cluster_labels: application:spilo
|
||||
cluster_name_label: cluster-name
|
||||
# connection_pool_default_cpu_limit: "1"
|
||||
# connection_pool_default_cpu_request: "500m"
|
||||
# connection_pool_default_memory_limit: 100Mi
|
||||
# connection_pool_default_memory_request: 100Mi
|
||||
connection_pool_image: "registry.opensource.zalan.do/acid/pgbouncer:master-5"
|
||||
# connection_pool_max_db_connections: 60
|
||||
# connection_pool_mode: "transaction"
|
||||
# connection_pool_number_of_instances: 2
|
||||
# connection_pool_schema: "pooler"
|
||||
# connection_pool_user: "pooler"
|
||||
# connection_pooler_default_cpu_limit: "1"
|
||||
# connection_pooler_default_cpu_request: "500m"
|
||||
# connection_pooler_default_memory_limit: 100Mi
|
||||
# connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-6"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
# connection_pooler_mode: "transaction"
|
||||
# connection_pooler_number_of_instances: 2
|
||||
# connection_pooler_schema: "pooler"
|
||||
# connection_pooler_user: "pooler"
|
||||
# custom_service_annotations: "keyx:valuez,keya:valuea"
|
||||
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
||||
db_hosted_zone: db.example.com
|
||||
|
|
@ -43,6 +43,7 @@ data:
|
|||
# enable_team_superuser: "false"
|
||||
enable_teams_api: "false"
|
||||
# etcd_host: ""
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||
# inherited_labels: application,environment
|
||||
# kube_iam_role: ""
|
||||
|
|
|
|||
|
|
@ -44,6 +44,8 @@ spec:
|
|||
type: boolean
|
||||
etcd_host:
|
||||
type: string
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
max_instances:
|
||||
type: integer
|
||||
minimum: -1 # -1 = disabled
|
||||
|
|
@ -296,44 +298,44 @@ spec:
|
|||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
scalyr_server_url:
|
||||
type: string
|
||||
connection_pool:
|
||||
connection_pooler:
|
||||
type: object
|
||||
properties:
|
||||
connection_pool_schema:
|
||||
connection_pooler_schema:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
connection_pool_user:
|
||||
connection_pooler_user:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
connection_pool_image:
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
#default: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
connection_pool_max_db_connections:
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
#default: 60
|
||||
connection_pool_mode:
|
||||
connection_pooler_mode:
|
||||
type: string
|
||||
enum:
|
||||
- "session"
|
||||
- "transaction"
|
||||
#default: "transaction"
|
||||
connection_pool_number_of_instances:
|
||||
connection_pooler_number_of_instances:
|
||||
type: integer
|
||||
minimum: 2
|
||||
#default: 2
|
||||
connection_pool_default_cpu_limit:
|
||||
connection_pooler_default_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "1"
|
||||
connection_pool_default_cpu_request:
|
||||
connection_pooler_default_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "500m"
|
||||
connection_pool_default_memory_limit:
|
||||
connection_pooler_default_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
connection_pool_default_memory_request:
|
||||
connection_pooler_default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ metadata:
|
|||
configuration:
|
||||
# enable_crd_validation: true
|
||||
etcd_host: ""
|
||||
# kubernetes_use_configmaps: false
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# enable_shm_volume: true
|
||||
# enable_unused_pvc_deletion: false
|
||||
|
|
@ -122,14 +123,14 @@ configuration:
|
|||
scalyr_memory_limit: 500Mi
|
||||
scalyr_memory_request: 50Mi
|
||||
# scalyr_server_url: ""
|
||||
connection_pool:
|
||||
connection_pool_default_cpu_limit: "1"
|
||||
connection_pool_default_cpu_request: "500m"
|
||||
connection_pool_default_memory_limit: 100Mi
|
||||
connection_pool_default_memory_request: 100Mi
|
||||
connection_pool_image: "registry.opensource.zalan.do/acid/pgbouncer:master-5"
|
||||
# connection_pool_max_db_connections: 60
|
||||
connection_pool_mode: "transaction"
|
||||
connection_pool_number_of_instances: 2
|
||||
# connection_pool_schema: "pooler"
|
||||
# connection_pool_user: "pooler"
|
||||
connection_pooler:
|
||||
connection_pooler_default_cpu_limit: "1"
|
||||
connection_pooler_default_cpu_request: "500m"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-6"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
connection_pooler_mode: "transaction"
|
||||
connection_pooler_number_of_instances: 2
|
||||
# connection_pooler_schema: "pooler"
|
||||
# connection_pooler_user: "pooler"
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ spec:
|
|||
uid:
|
||||
format: uuid
|
||||
type: string
|
||||
connectionPool:
|
||||
connectionPooler:
|
||||
type: object
|
||||
properties:
|
||||
dockerImage:
|
||||
|
|
@ -126,7 +126,7 @@ spec:
|
|||
# Note: usernames specified here as database owners must be declared in the users key of the spec key.
|
||||
dockerImage:
|
||||
type: string
|
||||
enableConnectionPool:
|
||||
enableConnectionPooler:
|
||||
type: boolean
|
||||
enableLogicalBackup:
|
||||
type: boolean
|
||||
|
|
@ -184,6 +184,10 @@ spec:
|
|||
type: integer
|
||||
maximum_lag_on_failover:
|
||||
type: integer
|
||||
synchronous_mode:
|
||||
type: boolean
|
||||
synchronous_mode_strict:
|
||||
type: boolean
|
||||
podAnnotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"connectionPool": {
|
||||
"connectionPooler": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||
"dockerImage": {
|
||||
|
|
@ -259,7 +259,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
"dockerImage": {
|
||||
Type: "string",
|
||||
},
|
||||
"enableConnectionPool": {
|
||||
"enableConnectionPooler": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enableLogicalBackup": {
|
||||
|
|
@ -358,6 +358,12 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
"maximum_lag_on_failover": {
|
||||
Type: "integer",
|
||||
},
|
||||
"synchronous_mode": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"synchronous_mode_strict": {
|
||||
Type: "boolean",
|
||||
},
|
||||
},
|
||||
},
|
||||
"podAnnotations": {
|
||||
|
|
@ -727,6 +733,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
"etcd_host": {
|
||||
Type: "string",
|
||||
},
|
||||
"kubernetes_use_configmaps": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"max_instances": {
|
||||
Type: "integer",
|
||||
Description: "-1 = disabled",
|
||||
|
|
@ -1129,32 +1138,32 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
},
|
||||
},
|
||||
},
|
||||
"connection_pool": {
|
||||
"connection_pooler": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||
"connection_pool_default_cpu_limit": {
|
||||
"connection_pooler_default_cpu_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
},
|
||||
"connection_pool_default_cpu_request": {
|
||||
"connection_pooler_default_cpu_request": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
},
|
||||
"connection_pool_default_memory_limit": {
|
||||
"connection_pooler_default_memory_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"connection_pool_default_memory_request": {
|
||||
"connection_pooler_default_memory_request": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"connection_pool_image": {
|
||||
"connection_pooler_image": {
|
||||
Type: "string",
|
||||
},
|
||||
"connection_pool_max_db_connections": {
|
||||
"connection_pooler_max_db_connections": {
|
||||
Type: "integer",
|
||||
},
|
||||
"connection_pool_mode": {
|
||||
"connection_pooler_mode": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1beta1.JSON{
|
||||
{
|
||||
|
|
@ -1165,14 +1174,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
},
|
||||
},
|
||||
},
|
||||
"connection_pool_number_of_instances": {
|
||||
"connection_pooler_number_of_instances": {
|
||||
Type: "integer",
|
||||
Minimum: &min2,
|
||||
},
|
||||
"connection_pool_schema": {
|
||||
"connection_pooler_schema": {
|
||||
Type: "string",
|
||||
},
|
||||
"connection_pool_user": {
|
||||
"connection_pooler_user": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -153,18 +153,18 @@ type ScalyrConfiguration struct {
|
|||
ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"`
|
||||
}
|
||||
|
||||
// Defines default configuration for connection pool
|
||||
type ConnectionPoolConfiguration struct {
|
||||
NumberOfInstances *int32 `json:"connection_pool_number_of_instances,omitempty"`
|
||||
Schema string `json:"connection_pool_schema,omitempty"`
|
||||
User string `json:"connection_pool_user,omitempty"`
|
||||
Image string `json:"connection_pool_image,omitempty"`
|
||||
Mode string `json:"connection_pool_mode,omitempty"`
|
||||
MaxDBConnections *int32 `json:"connection_pool_max_db_connections,omitempty"`
|
||||
DefaultCPURequest string `json:"connection_pool_default_cpu_request,omitempty"`
|
||||
DefaultMemoryRequest string `json:"connection_pool_default_memory_request,omitempty"`
|
||||
DefaultCPULimit string `json:"connection_pool_default_cpu_limit,omitempty"`
|
||||
DefaultMemoryLimit string `json:"connection_pool_default_memory_limit,omitempty"`
|
||||
// Defines default configuration for connection pooler
|
||||
type ConnectionPoolerConfiguration struct {
|
||||
NumberOfInstances *int32 `json:"connection_pooler_number_of_instances,omitempty"`
|
||||
Schema string `json:"connection_pooler_schema,omitempty"`
|
||||
User string `json:"connection_pooler_user,omitempty"`
|
||||
Image string `json:"connection_pooler_image,omitempty"`
|
||||
Mode string `json:"connection_pooler_mode,omitempty"`
|
||||
MaxDBConnections *int32 `json:"connection_pooler_max_db_connections,omitempty"`
|
||||
DefaultCPURequest string `json:"connection_pooler_default_cpu_request,omitempty"`
|
||||
DefaultMemoryRequest string `json:"connection_pooler_default_memory_request,omitempty"`
|
||||
DefaultCPULimit string `json:"connection_pooler_default_cpu_limit,omitempty"`
|
||||
DefaultMemoryLimit string `json:"connection_pooler_default_memory_limit,omitempty"`
|
||||
}
|
||||
|
||||
// OperatorLogicalBackupConfiguration defines configuration for logical backup
|
||||
|
|
@ -183,6 +183,7 @@ type OperatorLogicalBackupConfiguration struct {
|
|||
type OperatorConfigurationData struct {
|
||||
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
|
||||
EtcdHost string `json:"etcd_host,omitempty"`
|
||||
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
|
||||
DockerImage string `json:"docker_image,omitempty"`
|
||||
Workers uint32 `json:"workers,omitempty"`
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
|
|
@ -203,7 +204,7 @@ type OperatorConfigurationData struct {
|
|||
LoggingRESTAPI LoggingRESTAPIConfiguration `json:"logging_rest_api"`
|
||||
Scalyr ScalyrConfiguration `json:"scalyr"`
|
||||
LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"`
|
||||
ConnectionPool ConnectionPoolConfiguration `json:"connection_pool"`
|
||||
ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"`
|
||||
}
|
||||
|
||||
//Duration shortens this frequently used name
|
||||
|
|
|
|||
|
|
@ -29,8 +29,8 @@ type PostgresSpec struct {
|
|||
Patroni `json:"patroni,omitempty"`
|
||||
Resources `json:"resources,omitempty"`
|
||||
|
||||
EnableConnectionPool *bool `json:"enableConnectionPool,omitempty"`
|
||||
ConnectionPool *ConnectionPool `json:"connectionPool,omitempty"`
|
||||
EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"`
|
||||
ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"`
|
||||
|
||||
TeamID string `json:"teamId"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
|
|
@ -118,13 +118,15 @@ type Resources struct {
|
|||
|
||||
// Patroni contains Patroni-specific configuration
|
||||
type Patroni struct {
|
||||
InitDB map[string]string `json:"initdb"`
|
||||
PgHba []string `json:"pg_hba"`
|
||||
TTL uint32 `json:"ttl"`
|
||||
LoopWait uint32 `json:"loop_wait"`
|
||||
RetryTimeout uint32 `json:"retry_timeout"`
|
||||
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
|
||||
Slots map[string]map[string]string `json:"slots"`
|
||||
InitDB map[string]string `json:"initdb"`
|
||||
PgHba []string `json:"pg_hba"`
|
||||
TTL uint32 `json:"ttl"`
|
||||
LoopWait uint32 `json:"loop_wait"`
|
||||
RetryTimeout uint32 `json:"retry_timeout"`
|
||||
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
|
||||
Slots map[string]map[string]string `json:"slots"`
|
||||
SynchronousMode bool `json:"synchronous_mode"`
|
||||
SynchronousModeStrict bool `json:"synchronous_mode_strict"`
|
||||
}
|
||||
|
||||
//StandbyCluster
|
||||
|
|
@ -175,10 +177,10 @@ type PostgresStatus struct {
|
|||
// resources)
|
||||
// Type string `json:"type,omitempty"`
|
||||
//
|
||||
// TODO: figure out what other important parameters of the connection pool it
|
||||
// TODO: figure out what other important parameters of the connection pooler it
|
||||
// makes sense to expose. E.g. pool size (min/max boundaries), max client
|
||||
// connections etc.
|
||||
type ConnectionPool struct {
|
||||
type ConnectionPooler struct {
|
||||
NumberOfInstances *int32 `json:"numberOfInstances,omitempty"`
|
||||
Schema string `json:"schema,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func (in *CloneDescription) DeepCopy() *CloneDescription {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConnectionPool) DeepCopyInto(out *ConnectionPool) {
|
||||
func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) {
|
||||
*out = *in
|
||||
if in.NumberOfInstances != nil {
|
||||
in, out := &in.NumberOfInstances, &out.NumberOfInstances
|
||||
|
|
@ -85,18 +85,18 @@ func (in *ConnectionPool) DeepCopyInto(out *ConnectionPool) {
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPool.
|
||||
func (in *ConnectionPool) DeepCopy() *ConnectionPool {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPooler.
|
||||
func (in *ConnectionPooler) DeepCopy() *ConnectionPooler {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConnectionPool)
|
||||
out := new(ConnectionPooler)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConnectionPoolConfiguration) DeepCopyInto(out *ConnectionPoolConfiguration) {
|
||||
func (in *ConnectionPoolerConfiguration) DeepCopyInto(out *ConnectionPoolerConfiguration) {
|
||||
*out = *in
|
||||
if in.NumberOfInstances != nil {
|
||||
in, out := &in.NumberOfInstances, &out.NumberOfInstances
|
||||
|
|
@ -111,12 +111,12 @@ func (in *ConnectionPoolConfiguration) DeepCopyInto(out *ConnectionPoolConfigura
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolConfiguration.
|
||||
func (in *ConnectionPoolConfiguration) DeepCopy() *ConnectionPoolConfiguration {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolerConfiguration.
|
||||
func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConnectionPoolConfiguration)
|
||||
out := new(ConnectionPoolerConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
@ -308,7 +308,7 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
|||
out.LoggingRESTAPI = in.LoggingRESTAPI
|
||||
out.Scalyr = in.Scalyr
|
||||
out.LogicalBackup = in.LogicalBackup
|
||||
in.ConnectionPool.DeepCopyInto(&out.ConnectionPool)
|
||||
in.ConnectionPooler.DeepCopyInto(&out.ConnectionPooler)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -471,14 +471,14 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
out.Volume = in.Volume
|
||||
in.Patroni.DeepCopyInto(&out.Patroni)
|
||||
out.Resources = in.Resources
|
||||
if in.EnableConnectionPool != nil {
|
||||
in, out := &in.EnableConnectionPool, &out.EnableConnectionPool
|
||||
if in.EnableConnectionPooler != nil {
|
||||
in, out := &in.EnableConnectionPooler, &out.EnableConnectionPooler
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.ConnectionPool != nil {
|
||||
in, out := &in.ConnectionPool, &out.ConnectionPool
|
||||
*out = new(ConnectionPool)
|
||||
if in.ConnectionPooler != nil {
|
||||
in, out := &in.ConnectionPooler, &out.ConnectionPooler
|
||||
*out = new(ConnectionPooler)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.SpiloFSGroup != nil {
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -51,14 +50,14 @@ type Config struct {
|
|||
PodServiceAccountRoleBinding *rbacv1.RoleBinding
|
||||
}
|
||||
|
||||
// K8S objects that are belongs to a connection pool
|
||||
type ConnectionPoolObjects struct {
|
||||
// K8S objects that are belongs to a connection pooler
|
||||
type ConnectionPoolerObjects struct {
|
||||
Deployment *appsv1.Deployment
|
||||
Service *v1.Service
|
||||
|
||||
// It could happen that a connection pool was enabled, but the operator was
|
||||
// not able to properly process a corresponding event or was restarted. In
|
||||
// this case we will miss missing/require situation and a lookup function
|
||||
// It could happen that a connection pooler was enabled, but the operator
|
||||
// was not able to properly process a corresponding event or was restarted.
|
||||
// In this case we will miss missing/require situation and a lookup function
|
||||
// will not be installed. To avoid synchronizing it all the time to prevent
|
||||
// this, we can remember the result in memory at least until the next
|
||||
// restart.
|
||||
|
|
@ -70,7 +69,7 @@ type kubeResources struct {
|
|||
Endpoints map[PostgresRole]*v1.Endpoints
|
||||
Secrets map[types.UID]*v1.Secret
|
||||
Statefulset *appsv1.StatefulSet
|
||||
ConnectionPool *ConnectionPoolObjects
|
||||
ConnectionPooler *ConnectionPoolerObjects
|
||||
PodDisruptionBudget *policybeta1.PodDisruptionBudget
|
||||
//Pods are treated separately
|
||||
//PVCs are treated separately
|
||||
|
|
@ -338,24 +337,24 @@ func (c *Cluster) Create() error {
|
|||
c.logger.Errorf("could not list resources: %v", err)
|
||||
}
|
||||
|
||||
// Create connection pool deployment and services if necessary. Since we
|
||||
// need to peform some operations with the database itself (e.g. install
|
||||
// Create connection pooler deployment and services if necessary. Since we
|
||||
// need to perform some operations with the database itself (e.g. install
|
||||
// lookup function), do it as the last step, when everything is available.
|
||||
//
|
||||
// Do not consider connection pool as a strict requirement, and if
|
||||
// Do not consider connection pooler as a strict requirement, and if
|
||||
// something fails, report warning
|
||||
if c.needConnectionPool() {
|
||||
if c.ConnectionPool != nil {
|
||||
c.logger.Warning("Connection pool already exists in the cluster")
|
||||
if c.needConnectionPooler() {
|
||||
if c.ConnectionPooler != nil {
|
||||
c.logger.Warning("Connection pooler already exists in the cluster")
|
||||
return nil
|
||||
}
|
||||
connPool, err := c.createConnectionPool(c.installLookupFunction)
|
||||
connectionPooler, err := c.createConnectionPooler(c.installLookupFunction)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not create connection pool: %v", err)
|
||||
c.logger.Warningf("could not create connection pooler: %v", err)
|
||||
return nil
|
||||
}
|
||||
c.logger.Infof("connection pool %q has been successfully created",
|
||||
util.NameFromMeta(connPool.Deployment.ObjectMeta))
|
||||
c.logger.Infof("connection pooler %q has been successfully created",
|
||||
util.NameFromMeta(connectionPooler.Deployment.ObjectMeta))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -613,11 +612,11 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
// connection pool needs one system user created, which is done in
|
||||
// connection pooler needs one system user created, which is done in
|
||||
// initUsers. Check if it needs to be called.
|
||||
sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users)
|
||||
needConnPool := c.needConnectionPoolWorker(&newSpec.Spec)
|
||||
if !sameUsers || needConnPool {
|
||||
needConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec)
|
||||
if !sameUsers || needConnectionPooler {
|
||||
c.logger.Debugf("syncing secrets")
|
||||
if err := c.initUsers(); err != nil {
|
||||
c.logger.Errorf("could not init users: %v", err)
|
||||
|
|
@ -679,27 +678,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}()
|
||||
|
||||
if c.OpConfig.EnableUnusedPVCDeletion && oldSpec.Spec.NumberOfInstances > newSpec.Spec.NumberOfInstances {
|
||||
c.logger.Debug("deleting pvc of shut down pods")
|
||||
|
||||
for i := oldSpec.Spec.NumberOfInstances - 1; i >= newSpec.Spec.NumberOfInstances; i-- {
|
||||
|
||||
// Scaling down to 0 replicas is not cluster deletion so keep the last pvc.
|
||||
// Operator will remove it only when explicit "kubectl pg delete" is issued
|
||||
if i == 0 {
|
||||
c.logger.Info("cluster scaled down to 0 pods; skipping deletion of the last pvc")
|
||||
break
|
||||
}
|
||||
|
||||
podIndex := strconv.Itoa(int(i))
|
||||
pvcName := "pgdata-" + c.Name + "-" + podIndex
|
||||
if err := c.KubeClient.PersistentVolumeClaims(c.Namespace).Delete(context.TODO(), pvcName, c.deleteOptions); err != nil {
|
||||
c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err)
|
||||
// failing to delete pvc does not fail the update; Sync() may also delete unused PVCs later
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pod disruption budget
|
||||
if oldSpec.Spec.NumberOfInstances != newSpec.Spec.NumberOfInstances {
|
||||
c.logger.Debug("syncing pod disruption budgets")
|
||||
|
|
@ -762,9 +740,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
// sync connection pool
|
||||
if err := c.syncConnectionPool(oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
return fmt.Errorf("could not sync connection pool: %v", err)
|
||||
// sync connection pooler
|
||||
if err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
return fmt.Errorf("could not sync connection pooler: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -818,11 +796,11 @@ func (c *Cluster) Delete() {
|
|||
c.logger.Warningf("could not remove leftover patroni objects; %v", err)
|
||||
}
|
||||
|
||||
// Delete connection pool objects anyway, even if it's not mentioned in the
|
||||
// Delete connection pooler objects anyway, even if it's not mentioned in the
|
||||
// manifest, just to not keep orphaned components in case if something went
|
||||
// wrong
|
||||
if err := c.deleteConnectionPool(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pool: %v", err)
|
||||
if err := c.deleteConnectionPooler(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -891,32 +869,32 @@ func (c *Cluster) initSystemUsers() {
|
|||
Password: util.RandomPassword(constants.PasswordLength),
|
||||
}
|
||||
|
||||
// Connection pool user is an exception, if requested it's going to be
|
||||
// Connection pooler user is an exception, if requested it's going to be
|
||||
// created by operator as a normal pgUser
|
||||
if c.needConnectionPool() {
|
||||
// initialize empty connection pool if not done yet
|
||||
if c.Spec.ConnectionPool == nil {
|
||||
c.Spec.ConnectionPool = &acidv1.ConnectionPool{}
|
||||
if c.needConnectionPooler() {
|
||||
// initialize empty connection pooler if not done yet
|
||||
if c.Spec.ConnectionPooler == nil {
|
||||
c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
username := util.Coalesce(
|
||||
c.Spec.ConnectionPool.User,
|
||||
c.OpConfig.ConnectionPool.User)
|
||||
c.Spec.ConnectionPooler.User,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
|
||||
// connection pooler application should be able to login with this role
|
||||
connPoolUser := spec.PgUser{
|
||||
Origin: spec.RoleConnectionPool,
|
||||
connectionPoolerUser := spec.PgUser{
|
||||
Origin: spec.RoleConnectionPooler,
|
||||
Name: username,
|
||||
Flags: []string{constants.RoleFlagLogin},
|
||||
Password: util.RandomPassword(constants.PasswordLength),
|
||||
}
|
||||
|
||||
if _, exists := c.pgUsers[username]; !exists {
|
||||
c.pgUsers[username] = connPoolUser
|
||||
c.pgUsers[username] = connectionPoolerUser
|
||||
}
|
||||
|
||||
if _, exists := c.systemUsers[constants.ConnectionPoolUserKeyName]; !exists {
|
||||
c.systemUsers[constants.ConnectionPoolUserKeyName] = connPoolUser
|
||||
if _, exists := c.systemUsers[constants.ConnectionPoolerUserKeyName]; !exists {
|
||||
c.systemUsers[constants.ConnectionPoolerUserKeyName] = connectionPoolerUser
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1246,10 +1224,10 @@ func (c *Cluster) deletePatroniClusterConfigMaps() error {
|
|||
return c.deleteClusterObject(get, deleteConfigMapFn, "configmap")
|
||||
}
|
||||
|
||||
// Test if two connection pool configuration needs to be synced. For simplicity
|
||||
// Test if two connection pooler configuration needs to be synced. For simplicity
|
||||
// compare not the actual K8S objects, but the configuration itself and request
|
||||
// sync if there is any difference.
|
||||
func (c *Cluster) needSyncConnPoolSpecs(oldSpec, newSpec *acidv1.ConnectionPool) (sync bool, reasons []string) {
|
||||
func (c *Cluster) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) {
|
||||
reasons = []string{}
|
||||
sync = false
|
||||
|
||||
|
|
@ -1286,21 +1264,21 @@ func syncResources(a, b *v1.ResourceRequirements) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Check if we need to synchronize connection pool deployment due to new
|
||||
// Check if we need to synchronize connection pooler deployment due to new
|
||||
// defaults, that are different from what we see in the DeploymentSpec
|
||||
func (c *Cluster) needSyncConnPoolDefaults(
|
||||
spec *acidv1.ConnectionPool,
|
||||
func (c *Cluster) needSyncConnectionPoolerDefaults(
|
||||
spec *acidv1.ConnectionPooler,
|
||||
deployment *appsv1.Deployment) (sync bool, reasons []string) {
|
||||
|
||||
reasons = []string{}
|
||||
sync = false
|
||||
|
||||
config := c.OpConfig.ConnectionPool
|
||||
config := c.OpConfig.ConnectionPooler
|
||||
podTemplate := deployment.Spec.Template
|
||||
poolContainer := podTemplate.Spec.Containers[constants.ConnPoolContainer]
|
||||
poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer]
|
||||
|
||||
if spec == nil {
|
||||
spec = &acidv1.ConnectionPool{}
|
||||
spec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
if spec.NumberOfInstances == nil &&
|
||||
|
|
@ -1313,25 +1291,25 @@ func (c *Cluster) needSyncConnPoolDefaults(
|
|||
}
|
||||
|
||||
if spec.DockerImage == "" &&
|
||||
poolContainer.Image != config.Image {
|
||||
poolerContainer.Image != config.Image {
|
||||
|
||||
sync = true
|
||||
msg := fmt.Sprintf("DockerImage is different (having %s, required %s)",
|
||||
poolContainer.Image, config.Image)
|
||||
poolerContainer.Image, config.Image)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
||||
expectedResources, err := generateResourceRequirements(spec.Resources,
|
||||
c.makeDefaultConnPoolResources())
|
||||
c.makeDefaultConnectionPoolerResources())
|
||||
|
||||
// An error to generate expected resources means something is not quite
|
||||
// right, but for the purpose of robustness do not panic here, just report
|
||||
// and ignore resources comparison (in the worst case there will be no
|
||||
// updates for new resource values).
|
||||
if err == nil && syncResources(&poolContainer.Resources, expectedResources) {
|
||||
if err == nil && syncResources(&poolerContainer.Resources, expectedResources) {
|
||||
sync = true
|
||||
msg := fmt.Sprintf("Resources are different (having %+v, required %+v)",
|
||||
poolContainer.Resources, expectedResources)
|
||||
poolerContainer.Resources, expectedResources)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
||||
|
|
@ -1339,13 +1317,13 @@ func (c *Cluster) needSyncConnPoolDefaults(
|
|||
c.logger.Warningf("Cannot generate expected resources, %v", err)
|
||||
}
|
||||
|
||||
for _, env := range poolContainer.Env {
|
||||
for _, env := range poolerContainer.Env {
|
||||
if spec.User == "" && env.Name == "PGUSER" {
|
||||
ref := env.ValueFrom.SecretKeyRef.LocalObjectReference
|
||||
|
||||
if ref.Name != c.credentialSecretName(config.User) {
|
||||
sync = true
|
||||
msg := fmt.Sprintf("Pool user is different (having %s, required %s)",
|
||||
msg := fmt.Sprintf("pooler user is different (having %s, required %s)",
|
||||
ref.Name, config.User)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
|
@ -1353,7 +1331,7 @@ func (c *Cluster) needSyncConnPoolDefaults(
|
|||
|
||||
if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema {
|
||||
sync = true
|
||||
msg := fmt.Sprintf("Pool schema is different (having %s, required %s)",
|
||||
msg := fmt.Sprintf("pooler schema is different (having %s, required %s)",
|
||||
env.Value, config.Schema)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -709,16 +709,16 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
func TestInitSystemUsers(t *testing.T) {
|
||||
testName := "Test system users initialization"
|
||||
|
||||
// default cluster without connection pool
|
||||
// default cluster without connection pooler
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolUserKeyName]; exist {
|
||||
t.Errorf("%s, connection pool user is present", testName)
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; exist {
|
||||
t.Errorf("%s, connection pooler user is present", testName)
|
||||
}
|
||||
|
||||
// cluster with connection pool
|
||||
cl.Spec.EnableConnectionPool = boolToPointer(true)
|
||||
// cluster with connection pooler
|
||||
cl.Spec.EnableConnectionPooler = boolToPointer(true)
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolUserKeyName]; !exist {
|
||||
t.Errorf("%s, connection pool user is not present", testName)
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist {
|
||||
t.Errorf("%s, connection pooler user is not present", testName)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,13 +27,13 @@ const (
|
|||
WHERE a.rolname = ANY($1)
|
||||
ORDER BY 1;`
|
||||
|
||||
getDatabasesSQL = `SELECT datname, pg_get_userbyid(datdba) AS owner FROM pg_database;`
|
||||
createDatabaseSQL = `CREATE DATABASE "%s" OWNER "%s";`
|
||||
alterDatabaseOwnerSQL = `ALTER DATABASE "%s" OWNER TO "%s";`
|
||||
connectionPoolLookup = `
|
||||
CREATE SCHEMA IF NOT EXISTS {{.pool_schema}};
|
||||
getDatabasesSQL = `SELECT datname, pg_get_userbyid(datdba) AS owner FROM pg_database;`
|
||||
createDatabaseSQL = `CREATE DATABASE "%s" OWNER "%s";`
|
||||
alterDatabaseOwnerSQL = `ALTER DATABASE "%s" OWNER TO "%s";`
|
||||
connectionPoolerLookup = `
|
||||
CREATE SCHEMA IF NOT EXISTS {{.pooler_schema}};
|
||||
|
||||
CREATE OR REPLACE FUNCTION {{.pool_schema}}.user_lookup(
|
||||
CREATE OR REPLACE FUNCTION {{.pooler_schema}}.user_lookup(
|
||||
in i_username text, out uname text, out phash text)
|
||||
RETURNS record AS $$
|
||||
BEGIN
|
||||
|
|
@ -43,11 +43,11 @@ const (
|
|||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
REVOKE ALL ON FUNCTION {{.pool_schema}}.user_lookup(text)
|
||||
FROM public, {{.pool_user}};
|
||||
GRANT EXECUTE ON FUNCTION {{.pool_schema}}.user_lookup(text)
|
||||
TO {{.pool_user}};
|
||||
GRANT USAGE ON SCHEMA {{.pool_schema}} TO {{.pool_user}};
|
||||
REVOKE ALL ON FUNCTION {{.pooler_schema}}.user_lookup(text)
|
||||
FROM public, {{.pooler_user}};
|
||||
GRANT EXECUTE ON FUNCTION {{.pooler_schema}}.user_lookup(text)
|
||||
TO {{.pooler_user}};
|
||||
GRANT USAGE ON SCHEMA {{.pooler_schema}} TO {{.pooler_user}};
|
||||
`
|
||||
)
|
||||
|
||||
|
|
@ -278,9 +278,9 @@ func makeUserFlags(rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin
|
|||
return result
|
||||
}
|
||||
|
||||
// Creates a connection pool credentials lookup function in every database to
|
||||
// perform remote authentification.
|
||||
func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error {
|
||||
// Creates a connection pooler credentials lookup function in every database to
|
||||
// perform remote authentication.
|
||||
func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
|
||||
var stmtBytes bytes.Buffer
|
||||
c.logger.Info("Installing lookup function")
|
||||
|
||||
|
|
@ -299,11 +299,11 @@ func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error {
|
|||
|
||||
currentDatabases, err := c.getDatabases()
|
||||
if err != nil {
|
||||
msg := "could not get databases to install pool lookup function: %v"
|
||||
msg := "could not get databases to install pooler lookup function: %v"
|
||||
return fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
templater := template.Must(template.New("sql").Parse(connectionPoolLookup))
|
||||
templater := template.Must(template.New("sql").Parse(connectionPoolerLookup))
|
||||
|
||||
for dbname, _ := range currentDatabases {
|
||||
if dbname == "template0" || dbname == "template1" {
|
||||
|
|
@ -314,11 +314,11 @@ func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error {
|
|||
return fmt.Errorf("could not init database connection to %s", dbname)
|
||||
}
|
||||
|
||||
c.logger.Infof("Install pool lookup function into %s", dbname)
|
||||
c.logger.Infof("Install pooler lookup function into %s", dbname)
|
||||
|
||||
params := TemplateParams{
|
||||
"pool_schema": poolSchema,
|
||||
"pool_user": poolUser,
|
||||
"pooler_schema": poolerSchema,
|
||||
"pooler_user": poolerUser,
|
||||
}
|
||||
|
||||
if err := templater.Execute(&stmtBytes, params); err != nil {
|
||||
|
|
@ -353,12 +353,12 @@ func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error {
|
|||
continue
|
||||
}
|
||||
|
||||
c.logger.Infof("Pool lookup function installed into %s", dbname)
|
||||
c.logger.Infof("pooler lookup function installed into %s", dbname)
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close database connection: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.ConnectionPool.LookupFunction = true
|
||||
c.ConnectionPooler.LookupFunction = true
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,11 +35,8 @@ const (
|
|||
patroniPGParametersParameterName = "parameters"
|
||||
patroniPGHBAConfParameterName = "pg_hba"
|
||||
localHost = "127.0.0.1/32"
|
||||
connectionPoolContainer = "connection-pool"
|
||||
connectionPoolerContainer = "connection-pooler"
|
||||
pgPort = 5432
|
||||
|
||||
// the gid of the postgres user in the default spilo image
|
||||
spiloPostgresGID = 103
|
||||
)
|
||||
|
||||
type pgUser struct {
|
||||
|
|
@ -52,6 +49,8 @@ type patroniDCS struct {
|
|||
LoopWait uint32 `json:"loop_wait,omitempty"`
|
||||
RetryTimeout uint32 `json:"retry_timeout,omitempty"`
|
||||
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover,omitempty"`
|
||||
SynchronousMode bool `json:"synchronous_mode,omitempty"`
|
||||
SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"`
|
||||
PGBootstrapConfiguration map[string]interface{} `json:"postgresql,omitempty"`
|
||||
Slots map[string]map[string]string `json:"slots,omitempty"`
|
||||
}
|
||||
|
|
@ -75,7 +74,7 @@ func (c *Cluster) statefulSetName() string {
|
|||
return c.Name
|
||||
}
|
||||
|
||||
func (c *Cluster) connPoolName() string {
|
||||
func (c *Cluster) connectionPoolerName() string {
|
||||
return c.Name + "-pooler"
|
||||
}
|
||||
|
||||
|
|
@ -142,18 +141,18 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
|
|||
}
|
||||
}
|
||||
|
||||
// Generate default resource section for connection pool deployment, to be used
|
||||
// if nothing custom is specified in the manifest
|
||||
func (c *Cluster) makeDefaultConnPoolResources() acidv1.Resources {
|
||||
// Generate default resource section for connection pooler deployment, to be
|
||||
// used if nothing custom is specified in the manifest
|
||||
func (c *Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources {
|
||||
config := c.OpConfig
|
||||
|
||||
defaultRequests := acidv1.ResourceDescription{
|
||||
CPU: config.ConnectionPool.ConnPoolDefaultCPURequest,
|
||||
Memory: config.ConnectionPool.ConnPoolDefaultMemoryRequest,
|
||||
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest,
|
||||
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest,
|
||||
}
|
||||
defaultLimits := acidv1.ResourceDescription{
|
||||
CPU: config.ConnectionPool.ConnPoolDefaultCPULimit,
|
||||
Memory: config.ConnectionPool.ConnPoolDefaultMemoryLimit,
|
||||
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit,
|
||||
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit,
|
||||
}
|
||||
|
||||
return acidv1.Resources{
|
||||
|
|
@ -286,6 +285,12 @@ PatroniInitDBParams:
|
|||
if patroni.Slots != nil {
|
||||
config.Bootstrap.DCS.Slots = patroni.Slots
|
||||
}
|
||||
if patroni.SynchronousMode {
|
||||
config.Bootstrap.DCS.SynchronousMode = patroni.SynchronousMode
|
||||
}
|
||||
if patroni.SynchronousModeStrict != false {
|
||||
config.Bootstrap.DCS.SynchronousModeStrict = patroni.SynchronousModeStrict
|
||||
}
|
||||
|
||||
config.PgLocalConfiguration = make(map[string]interface{})
|
||||
config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion)
|
||||
|
|
@ -675,6 +680,10 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
|
|||
envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost})
|
||||
}
|
||||
|
||||
if c.patroniKubernetesUseConfigMaps() {
|
||||
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"})
|
||||
}
|
||||
|
||||
if cloneDescription.ClusterName != "" {
|
||||
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
|
||||
}
|
||||
|
|
@ -990,13 +999,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
// configure TLS with a custom secret volume
|
||||
if spec.TLS != nil && spec.TLS.SecretName != "" {
|
||||
if effectiveFSGroup == nil {
|
||||
c.logger.Warnf("Setting the default FSGroup to satisfy the TLS configuration")
|
||||
fsGroup := int64(spiloPostgresGID)
|
||||
effectiveFSGroup = &fsGroup
|
||||
}
|
||||
// this is combined with the FSGroup above to give read access to the
|
||||
// postgres user
|
||||
// this is combined with the FSGroup in the section above
|
||||
// to give read access to the postgres user
|
||||
defaultMode := int32(0640)
|
||||
volumes = append(volumes, v1.Volume{
|
||||
Name: "tls-secret",
|
||||
|
|
@ -1414,7 +1418,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
Type: v1.ServiceTypeClusterIP,
|
||||
}
|
||||
|
||||
if role == Replica {
|
||||
if role == Replica || c.patroniKubernetesUseConfigMaps() {
|
||||
serviceSpec.Selector = c.roleLabelsSet(false, role)
|
||||
}
|
||||
|
||||
|
|
@ -1859,33 +1863,33 @@ func (c *Cluster) getLogicalBackupJobName() (jobName string) {
|
|||
//
|
||||
// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when
|
||||
// most of the queries coming through a connection pooler are from the same
|
||||
// user to the same db). In case if we want to spin up more connection pool
|
||||
// user to the same db). In case if we want to spin up more connection pooler
|
||||
// instances, take this into account and maintain the same number of
|
||||
// connections.
|
||||
//
|
||||
// MIN_SIZE is a pool minimal size, to prevent situation when sudden workload
|
||||
// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload
|
||||
// have to wait for spinning up a new connections.
|
||||
//
|
||||
// RESERVE_SIZE is how many additional connections to allow for a pool.
|
||||
func (c *Cluster) getConnPoolEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar {
|
||||
// RESERVE_SIZE is how many additional connections to allow for a pooler.
|
||||
func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar {
|
||||
effectiveMode := util.Coalesce(
|
||||
spec.ConnectionPool.Mode,
|
||||
c.OpConfig.ConnectionPool.Mode)
|
||||
spec.ConnectionPooler.Mode,
|
||||
c.OpConfig.ConnectionPooler.Mode)
|
||||
|
||||
numberOfInstances := spec.ConnectionPool.NumberOfInstances
|
||||
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
|
||||
if numberOfInstances == nil {
|
||||
numberOfInstances = util.CoalesceInt32(
|
||||
c.OpConfig.ConnectionPool.NumberOfInstances,
|
||||
c.OpConfig.ConnectionPooler.NumberOfInstances,
|
||||
k8sutil.Int32ToPointer(1))
|
||||
}
|
||||
|
||||
effectiveMaxDBConn := util.CoalesceInt32(
|
||||
spec.ConnectionPool.MaxDBConnections,
|
||||
c.OpConfig.ConnectionPool.MaxDBConnections)
|
||||
spec.ConnectionPooler.MaxDBConnections,
|
||||
c.OpConfig.ConnectionPooler.MaxDBConnections)
|
||||
|
||||
if effectiveMaxDBConn == nil {
|
||||
effectiveMaxDBConn = k8sutil.Int32ToPointer(
|
||||
constants.ConnPoolMaxDBConnections)
|
||||
constants.ConnectionPoolerMaxDBConnections)
|
||||
}
|
||||
|
||||
maxDBConn := *effectiveMaxDBConn / *numberOfInstances
|
||||
|
|
@ -1896,51 +1900,51 @@ func (c *Cluster) getConnPoolEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar {
|
|||
|
||||
return []v1.EnvVar{
|
||||
{
|
||||
Name: "CONNECTION_POOL_PORT",
|
||||
Name: "CONNECTION_POOLER_PORT",
|
||||
Value: fmt.Sprint(pgPort),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_MODE",
|
||||
Name: "CONNECTION_POOLER_MODE",
|
||||
Value: effectiveMode,
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_DEFAULT_SIZE",
|
||||
Name: "CONNECTION_POOLER_DEFAULT_SIZE",
|
||||
Value: fmt.Sprint(defaultSize),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_MIN_SIZE",
|
||||
Name: "CONNECTION_POOLER_MIN_SIZE",
|
||||
Value: fmt.Sprint(minSize),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_RESERVE_SIZE",
|
||||
Name: "CONNECTION_POOLER_RESERVE_SIZE",
|
||||
Value: fmt.Sprint(reserveSize),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_MAX_CLIENT_CONN",
|
||||
Value: fmt.Sprint(constants.ConnPoolMaxClientConnections),
|
||||
Name: "CONNECTION_POOLER_MAX_CLIENT_CONN",
|
||||
Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_MAX_DB_CONN",
|
||||
Name: "CONNECTION_POOLER_MAX_DB_CONN",
|
||||
Value: fmt.Sprint(maxDBConn),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
||||
func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec) (
|
||||
*v1.PodTemplateSpec, error) {
|
||||
|
||||
gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
|
||||
resources, err := generateResourceRequirements(
|
||||
spec.ConnectionPool.Resources,
|
||||
c.makeDefaultConnPoolResources())
|
||||
spec.ConnectionPooler.Resources,
|
||||
c.makeDefaultConnectionPoolerResources())
|
||||
|
||||
effectiveDockerImage := util.Coalesce(
|
||||
spec.ConnectionPool.DockerImage,
|
||||
c.OpConfig.ConnectionPool.Image)
|
||||
spec.ConnectionPooler.DockerImage,
|
||||
c.OpConfig.ConnectionPooler.Image)
|
||||
|
||||
effectiveSchema := util.Coalesce(
|
||||
spec.ConnectionPool.Schema,
|
||||
c.OpConfig.ConnectionPool.Schema)
|
||||
spec.ConnectionPooler.Schema,
|
||||
c.OpConfig.ConnectionPooler.Schema)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||
|
|
@ -1948,8 +1952,8 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
|||
|
||||
secretSelector := func(key string) *v1.SecretKeySelector {
|
||||
effectiveUser := util.Coalesce(
|
||||
spec.ConnectionPool.User,
|
||||
c.OpConfig.ConnectionPool.User)
|
||||
spec.ConnectionPooler.User,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
|
||||
return &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
|
|
@ -1975,7 +1979,7 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
|||
},
|
||||
},
|
||||
// the convention is to use the same schema name as
|
||||
// connection pool username
|
||||
// connection pooler username
|
||||
{
|
||||
Name: "PGSCHEMA",
|
||||
Value: effectiveSchema,
|
||||
|
|
@ -1988,10 +1992,10 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
|||
},
|
||||
}
|
||||
|
||||
envVars = append(envVars, c.getConnPoolEnvVars(spec)...)
|
||||
envVars = append(envVars, c.getConnectionPoolerEnvVars(spec)...)
|
||||
|
||||
poolerContainer := v1.Container{
|
||||
Name: connectionPoolContainer,
|
||||
Name: connectionPoolerContainer,
|
||||
Image: effectiveDockerImage,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Resources: *resources,
|
||||
|
|
@ -2006,7 +2010,7 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
|||
|
||||
podTemplate := &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: c.connPoolLabelsSelector().MatchLabels,
|
||||
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
|
||||
Namespace: c.Namespace,
|
||||
Annotations: c.generatePodAnnotations(spec),
|
||||
},
|
||||
|
|
@ -2048,32 +2052,32 @@ func (c *Cluster) ownerReferences() []metav1.OwnerReference {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) (
|
||||
func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec) (
|
||||
*appsv1.Deployment, error) {
|
||||
|
||||
// there are two ways to enable connection pooler, either to specify a
|
||||
// connectionPool section or enableConnectionPool. In the second case
|
||||
// spec.connectionPool will be nil, so to make it easier to calculate
|
||||
// connectionPooler section or enableConnectionPooler. In the second case
|
||||
// spec.connectionPooler will be nil, so to make it easier to calculate
|
||||
// default values, initialize it to an empty structure. It could be done
|
||||
// anywhere, but here is the earliest common entry point between sync and
|
||||
// create code, so init here.
|
||||
if spec.ConnectionPool == nil {
|
||||
spec.ConnectionPool = &acidv1.ConnectionPool{}
|
||||
if spec.ConnectionPooler == nil {
|
||||
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
podTemplate, err := c.generateConnPoolPodTemplate(spec)
|
||||
numberOfInstances := spec.ConnectionPool.NumberOfInstances
|
||||
podTemplate, err := c.generateConnectionPoolerPodTemplate(spec)
|
||||
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
|
||||
if numberOfInstances == nil {
|
||||
numberOfInstances = util.CoalesceInt32(
|
||||
c.OpConfig.ConnectionPool.NumberOfInstances,
|
||||
c.OpConfig.ConnectionPooler.NumberOfInstances,
|
||||
k8sutil.Int32ToPointer(1))
|
||||
}
|
||||
|
||||
if *numberOfInstances < constants.ConnPoolMinInstances {
|
||||
msg := "Adjusted number of connection pool instances from %d to %d"
|
||||
c.logger.Warningf(msg, numberOfInstances, constants.ConnPoolMinInstances)
|
||||
if *numberOfInstances < constants.ConnectionPoolerMinInstances {
|
||||
msg := "Adjusted number of connection pooler instances from %d to %d"
|
||||
c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances)
|
||||
|
||||
*numberOfInstances = constants.ConnPoolMinInstances
|
||||
*numberOfInstances = constants.ConnectionPoolerMinInstances
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -2082,9 +2086,9 @@ func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) (
|
|||
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.connPoolName(),
|
||||
Name: c.connectionPoolerName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.connPoolLabelsSelector().MatchLabels,
|
||||
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
|
||||
Annotations: map[string]string{},
|
||||
// make StatefulSet object its owner to represent the dependency.
|
||||
// By itself StatefulSet is being deleted with "Orphaned"
|
||||
|
|
@ -2096,7 +2100,7 @@ func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) (
|
|||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: numberOfInstances,
|
||||
Selector: c.connPoolLabelsSelector(),
|
||||
Selector: c.connectionPoolerLabelsSelector(),
|
||||
Template: *podTemplate,
|
||||
},
|
||||
}
|
||||
|
|
@ -2104,37 +2108,37 @@ func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) (
|
|||
return deployment, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) generateConnPoolService(spec *acidv1.PostgresSpec) *v1.Service {
|
||||
func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service {
|
||||
|
||||
// there are two ways to enable connection pooler, either to specify a
|
||||
// connectionPool section or enableConnectionPool. In the second case
|
||||
// spec.connectionPool will be nil, so to make it easier to calculate
|
||||
// connectionPooler section or enableConnectionPooler. In the second case
|
||||
// spec.connectionPooler will be nil, so to make it easier to calculate
|
||||
// default values, initialize it to an empty structure. It could be done
|
||||
// anywhere, but here is the earliest common entry point between sync and
|
||||
// create code, so init here.
|
||||
if spec.ConnectionPool == nil {
|
||||
spec.ConnectionPool = &acidv1.ConnectionPool{}
|
||||
if spec.ConnectionPooler == nil {
|
||||
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
serviceSpec := v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: c.connPoolName(),
|
||||
Name: c.connectionPoolerName(),
|
||||
Port: pgPort,
|
||||
TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)},
|
||||
},
|
||||
},
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{
|
||||
"connection-pool": c.connPoolName(),
|
||||
"connection-pooler": c.connectionPoolerName(),
|
||||
},
|
||||
}
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.connPoolName(),
|
||||
Name: c.connectionPoolerName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.connPoolLabelsSelector().MatchLabels,
|
||||
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
|
||||
Annotations: map[string]string{},
|
||||
// make StatefulSet object its owner to represent the dependency.
|
||||
// By itself StatefulSet is being deleted with "Orphaned"
|
||||
|
|
|
|||
|
|
@ -65,16 +65,18 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
|
|||
"locale": "en_US.UTF-8",
|
||||
"data-checksums": "true",
|
||||
},
|
||||
PgHba: []string{"hostssl all all 0.0.0.0/0 md5", "host all all 0.0.0.0/0 md5"},
|
||||
TTL: 30,
|
||||
LoopWait: 10,
|
||||
RetryTimeout: 10,
|
||||
MaximumLagOnFailover: 33554432,
|
||||
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
|
||||
PgHba: []string{"hostssl all all 0.0.0.0/0 md5", "host all all 0.0.0.0/0 md5"},
|
||||
TTL: 30,
|
||||
LoopWait: 10,
|
||||
RetryTimeout: 10,
|
||||
MaximumLagOnFailover: 33554432,
|
||||
SynchronousMode: true,
|
||||
SynchronousModeStrict: true,
|
||||
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
|
||||
},
|
||||
role: "zalandos",
|
||||
opConfig: config.Config{},
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/11/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}}}}`,
|
||||
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/11/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}}}}`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
|
@ -587,38 +589,38 @@ func TestSecretVolume(t *testing.T) {
|
|||
|
||||
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
||||
cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"]
|
||||
if cpuReq.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPURequest {
|
||||
if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest {
|
||||
return fmt.Errorf("CPU request doesn't match, got %s, expected %s",
|
||||
cpuReq.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPURequest)
|
||||
cpuReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest)
|
||||
}
|
||||
|
||||
memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"]
|
||||
if memReq.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryRequest {
|
||||
if memReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest {
|
||||
return fmt.Errorf("Memory request doesn't match, got %s, expected %s",
|
||||
memReq.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryRequest)
|
||||
memReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest)
|
||||
}
|
||||
|
||||
cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"]
|
||||
if cpuLim.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPULimit {
|
||||
if cpuLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit {
|
||||
return fmt.Errorf("CPU limit doesn't match, got %s, expected %s",
|
||||
cpuLim.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPULimit)
|
||||
cpuLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit)
|
||||
}
|
||||
|
||||
memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"]
|
||||
if memLim.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryLimit {
|
||||
if memLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit {
|
||||
return fmt.Errorf("Memory limit doesn't match, got %s, expected %s",
|
||||
memLim.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryLimit)
|
||||
memLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
||||
poolLabels := podSpec.ObjectMeta.Labels["connection-pool"]
|
||||
poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"]
|
||||
|
||||
if poolLabels != cluster.connPoolLabelsSelector().MatchLabels["connection-pool"] {
|
||||
if poolerLabels != cluster.connectionPoolerLabelsSelector().MatchLabels["connection-pooler"] {
|
||||
return fmt.Errorf("Pod labels do not match, got %+v, expected %+v",
|
||||
podSpec.ObjectMeta.Labels, cluster.connPoolLabelsSelector().MatchLabels)
|
||||
podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector().MatchLabels)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -626,13 +628,13 @@ func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
|||
|
||||
func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
||||
required := map[string]bool{
|
||||
"PGHOST": false,
|
||||
"PGPORT": false,
|
||||
"PGUSER": false,
|
||||
"PGSCHEMA": false,
|
||||
"PGPASSWORD": false,
|
||||
"CONNECTION_POOL_MODE": false,
|
||||
"CONNECTION_POOL_PORT": false,
|
||||
"PGHOST": false,
|
||||
"PGPORT": false,
|
||||
"PGUSER": false,
|
||||
"PGSCHEMA": false,
|
||||
"PGPASSWORD": false,
|
||||
"CONNECTION_POOLER_MODE": false,
|
||||
"CONNECTION_POOLER_PORT": false,
|
||||
}
|
||||
|
||||
envs := podSpec.Spec.Containers[0].Env
|
||||
|
|
@ -658,8 +660,8 @@ func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestConnPoolPodSpec(t *testing.T) {
|
||||
testName := "Test connection pool pod template generation"
|
||||
func TestConnectionPoolerPodSpec(t *testing.T) {
|
||||
testName := "Test connection pooler pod template generation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -668,12 +670,12 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
MaxDBConnections: int32ToPointer(60),
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
MaxDBConnections: int32ToPointer(60),
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
|
@ -686,7 +688,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{},
|
||||
ConnectionPooler: config.ConnectionPooler{},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
||||
|
|
@ -702,7 +704,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "default configuration",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -711,7 +713,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "no default resources",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`),
|
||||
cluster: clusterNoDefaultRes,
|
||||
|
|
@ -720,7 +722,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "default resources are set",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -729,7 +731,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "labels for service",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -738,7 +740,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "required envs",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -746,7 +748,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
podSpec, err := tt.cluster.generateConnPoolPodTemplate(tt.spec)
|
||||
podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec)
|
||||
|
||||
if err != tt.expected && err.Error() != tt.expected.Error() {
|
||||
t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v",
|
||||
|
|
@ -774,9 +776,9 @@ func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployme
|
|||
|
||||
func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error {
|
||||
labels := deployment.Spec.Selector.MatchLabels
|
||||
expected := cluster.connPoolLabelsSelector().MatchLabels
|
||||
expected := cluster.connectionPoolerLabelsSelector().MatchLabels
|
||||
|
||||
if labels["connection-pool"] != expected["connection-pool"] {
|
||||
if labels["connection-pooler"] != expected["connection-pooler"] {
|
||||
return fmt.Errorf("Labels are incorrect, got %+v, expected %+v",
|
||||
labels, expected)
|
||||
}
|
||||
|
|
@ -784,8 +786,8 @@ func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestConnPoolDeploymentSpec(t *testing.T) {
|
||||
testName := "Test connection pool deployment spec generation"
|
||||
func TestConnectionPoolerDeploymentSpec(t *testing.T) {
|
||||
testName := "Test connection pooler deployment spec generation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -794,11 +796,11 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
|
@ -822,7 +824,7 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "default configuration",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -831,7 +833,7 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "owner reference",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -840,7 +842,7 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "selector",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -848,7 +850,7 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
deployment, err := tt.cluster.generateConnPoolDeployment(tt.spec)
|
||||
deployment, err := tt.cluster.generateConnectionPoolerDeployment(tt.spec)
|
||||
|
||||
if err != tt.expected && err.Error() != tt.expected.Error() {
|
||||
t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v",
|
||||
|
|
@ -877,16 +879,16 @@ func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error {
|
|||
func testServiceSelector(cluster *Cluster, service *v1.Service) error {
|
||||
selector := service.Spec.Selector
|
||||
|
||||
if selector["connection-pool"] != cluster.connPoolName() {
|
||||
if selector["connection-pooler"] != cluster.connectionPoolerName() {
|
||||
return fmt.Errorf("Selector is incorrect, got %s, expected %s",
|
||||
selector["connection-pool"], cluster.connPoolName())
|
||||
selector["connection-pooler"], cluster.connectionPoolerName())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConnPoolServiceSpec(t *testing.T) {
|
||||
testName := "Test connection pool service spec generation"
|
||||
func TestConnectionPoolerServiceSpec(t *testing.T) {
|
||||
testName := "Test connection pooler service spec generation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -895,11 +897,11 @@ func TestConnPoolServiceSpec(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
|
@ -922,7 +924,7 @@ func TestConnPoolServiceSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "default configuration",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
cluster: cluster,
|
||||
check: noCheck,
|
||||
|
|
@ -930,7 +932,7 @@ func TestConnPoolServiceSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "owner reference",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
cluster: cluster,
|
||||
check: testServiceOwnwerReference,
|
||||
|
|
@ -938,14 +940,14 @@ func TestConnPoolServiceSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "selector",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
cluster: cluster,
|
||||
check: testServiceSelector,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service := tt.cluster.generateConnPoolService(tt.spec)
|
||||
service := tt.cluster.generateConnectionPoolerService(tt.spec)
|
||||
|
||||
if err := tt.check(cluster, service); err != nil {
|
||||
t.Errorf("%s [%s]: Service spec is incorrect, %+v",
|
||||
|
|
@ -958,6 +960,7 @@ func TestTLS(t *testing.T) {
|
|||
var err error
|
||||
var spec acidv1.PostgresSpec
|
||||
var cluster *Cluster
|
||||
var spiloFSGroup = int64(103)
|
||||
|
||||
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
|
||||
return acidv1.PostgresSpec{
|
||||
|
|
@ -982,6 +985,9 @@ func TestTLS(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
SpiloFSGroup: &spiloFSGroup,
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
|
||||
|
|
|
|||
|
|
@ -94,37 +94,37 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) {
|
|||
return statefulSet, nil
|
||||
}
|
||||
|
||||
// Prepare the database for connection pool to be used, i.e. install lookup
|
||||
// Prepare the database for connection pooler to be used, i.e. install lookup
|
||||
// function (do it first, because it should be fast and if it didn't succeed,
|
||||
// it doesn't makes sense to create more K8S objects. At this moment we assume
|
||||
// that necessary connection pool user exists.
|
||||
// that necessary connection pooler user exists.
|
||||
//
|
||||
// After that create all the objects for connection pool, namely a deployment
|
||||
// After that create all the objects for connection pooler, namely a deployment
|
||||
// with a chosen pooler and a service to expose it.
|
||||
func (c *Cluster) createConnectionPool(lookup InstallFunction) (*ConnectionPoolObjects, error) {
|
||||
func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoolerObjects, error) {
|
||||
var msg string
|
||||
c.setProcessName("creating connection pool")
|
||||
c.setProcessName("creating connection pooler")
|
||||
|
||||
schema := c.Spec.ConnectionPool.Schema
|
||||
schema := c.Spec.ConnectionPooler.Schema
|
||||
if schema == "" {
|
||||
schema = c.OpConfig.ConnectionPool.Schema
|
||||
schema = c.OpConfig.ConnectionPooler.Schema
|
||||
}
|
||||
|
||||
user := c.Spec.ConnectionPool.User
|
||||
user := c.Spec.ConnectionPooler.User
|
||||
if user == "" {
|
||||
user = c.OpConfig.ConnectionPool.User
|
||||
user = c.OpConfig.ConnectionPooler.User
|
||||
}
|
||||
|
||||
err := lookup(schema, user)
|
||||
|
||||
if err != nil {
|
||||
msg = "could not prepare database for connection pool: %v"
|
||||
msg = "could not prepare database for connection pooler: %v"
|
||||
return nil, fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
deploymentSpec, err := c.generateConnPoolDeployment(&c.Spec)
|
||||
deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec)
|
||||
if err != nil {
|
||||
msg = "could not generate deployment for connection pool: %v"
|
||||
msg = "could not generate deployment for connection pooler: %v"
|
||||
return nil, fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
|
|
@ -139,7 +139,7 @@ func (c *Cluster) createConnectionPool(lookup InstallFunction) (*ConnectionPoolO
|
|||
return nil, err
|
||||
}
|
||||
|
||||
serviceSpec := c.generateConnPoolService(&c.Spec)
|
||||
serviceSpec := c.generateConnectionPoolerService(&c.Spec)
|
||||
service, err := c.KubeClient.
|
||||
Services(serviceSpec.Namespace).
|
||||
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
||||
|
|
@ -148,31 +148,31 @@ func (c *Cluster) createConnectionPool(lookup InstallFunction) (*ConnectionPoolO
|
|||
return nil, err
|
||||
}
|
||||
|
||||
c.ConnectionPool = &ConnectionPoolObjects{
|
||||
c.ConnectionPooler = &ConnectionPoolerObjects{
|
||||
Deployment: deployment,
|
||||
Service: service,
|
||||
}
|
||||
c.logger.Debugf("created new connection pool %q, uid: %q",
|
||||
c.logger.Debugf("created new connection pooler %q, uid: %q",
|
||||
util.NameFromMeta(deployment.ObjectMeta), deployment.UID)
|
||||
|
||||
return c.ConnectionPool, nil
|
||||
return c.ConnectionPooler, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deleteConnectionPool() (err error) {
|
||||
c.setProcessName("deleting connection pool")
|
||||
c.logger.Debugln("deleting connection pool")
|
||||
func (c *Cluster) deleteConnectionPooler() (err error) {
|
||||
c.setProcessName("deleting connection pooler")
|
||||
c.logger.Debugln("deleting connection pooler")
|
||||
|
||||
// Lack of connection pooler objects is not a fatal error, just log it if
|
||||
// it was present before in the manifest
|
||||
if c.ConnectionPool == nil {
|
||||
c.logger.Infof("No connection pool to delete")
|
||||
if c.ConnectionPooler == nil {
|
||||
c.logger.Infof("No connection pooler to delete")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clean up the deployment object. If deployment resource we've remembered
|
||||
// is somehow empty, try to delete based on what would we generate
|
||||
deploymentName := c.connPoolName()
|
||||
deployment := c.ConnectionPool.Deployment
|
||||
deploymentName := c.connectionPoolerName()
|
||||
deployment := c.ConnectionPooler.Deployment
|
||||
|
||||
if deployment != nil {
|
||||
deploymentName = deployment.Name
|
||||
|
|
@ -187,16 +187,16 @@ func (c *Cluster) deleteConnectionPool() (err error) {
|
|||
Delete(context.TODO(), deploymentName, options)
|
||||
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Debugf("Connection pool deployment was already deleted")
|
||||
c.logger.Debugf("Connection pooler deployment was already deleted")
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not delete deployment: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("Connection pool deployment %q has been deleted", deploymentName)
|
||||
c.logger.Infof("Connection pooler deployment %q has been deleted", deploymentName)
|
||||
|
||||
// Repeat the same for the service object
|
||||
service := c.ConnectionPool.Service
|
||||
serviceName := c.connPoolName()
|
||||
service := c.ConnectionPooler.Service
|
||||
serviceName := c.connectionPoolerName()
|
||||
|
||||
if service != nil {
|
||||
serviceName = service.Name
|
||||
|
|
@ -209,14 +209,14 @@ func (c *Cluster) deleteConnectionPool() (err error) {
|
|||
Delete(context.TODO(), serviceName, options)
|
||||
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Debugf("Connection pool service was already deleted")
|
||||
c.logger.Debugf("Connection pooler service was already deleted")
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not delete service: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("Connection pool service %q has been deleted", serviceName)
|
||||
c.logger.Infof("Connection pooler service %q has been deleted", serviceName)
|
||||
|
||||
c.ConnectionPool = nil
|
||||
c.ConnectionPooler = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -816,12 +816,12 @@ func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget {
|
|||
return c.PodDisruptionBudget
|
||||
}
|
||||
|
||||
// Perform actual patching of a connection pool deployment, assuming that all
|
||||
// Perform actual patching of a connection pooler deployment, assuming that all
|
||||
// the check were already done before.
|
||||
func (c *Cluster) updateConnPoolDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) {
|
||||
c.setProcessName("updating connection pool")
|
||||
if c.ConnectionPool == nil || c.ConnectionPool.Deployment == nil {
|
||||
return nil, fmt.Errorf("there is no connection pool in the cluster")
|
||||
func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) {
|
||||
c.setProcessName("updating connection pooler")
|
||||
if c.ConnectionPooler == nil || c.ConnectionPooler.Deployment == nil {
|
||||
return nil, fmt.Errorf("there is no connection pooler in the cluster")
|
||||
}
|
||||
|
||||
patchData, err := specPatch(newDeployment.Spec)
|
||||
|
|
@ -833,9 +833,9 @@ func (c *Cluster) updateConnPoolDeployment(oldDeploymentSpec, newDeployment *app
|
|||
// worker at one time will try to update it chances of conflicts are
|
||||
// minimal.
|
||||
deployment, err := c.KubeClient.
|
||||
Deployments(c.ConnectionPool.Deployment.Namespace).Patch(
|
||||
Deployments(c.ConnectionPooler.Deployment.Namespace).Patch(
|
||||
context.TODO(),
|
||||
c.ConnectionPool.Deployment.Name,
|
||||
c.ConnectionPooler.Deployment.Name,
|
||||
types.MergePatchType,
|
||||
patchData,
|
||||
metav1.PatchOptions{},
|
||||
|
|
@ -844,7 +844,7 @@ func (c *Cluster) updateConnPoolDeployment(oldDeploymentSpec, newDeployment *app
|
|||
return nil, fmt.Errorf("could not patch deployment: %v", err)
|
||||
}
|
||||
|
||||
c.ConnectionPool.Deployment = deployment
|
||||
c.ConnectionPooler.Deployment = deployment
|
||||
|
||||
return deployment, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ func boolToPointer(value bool) *bool {
|
|||
return &value
|
||||
}
|
||||
|
||||
func TestConnPoolCreationAndDeletion(t *testing.T) {
|
||||
testName := "Test connection pool creation"
|
||||
func TestConnectionPoolerCreationAndDeletion(t *testing.T) {
|
||||
testName := "Test connection pooler creation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -29,11 +29,11 @@ func TestConnPoolCreationAndDeletion(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger)
|
||||
|
|
@ -45,31 +45,31 @@ func TestConnPoolCreationAndDeletion(t *testing.T) {
|
|||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
}
|
||||
poolResources, err := cluster.createConnectionPool(mockInstallLookupFunction)
|
||||
poolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot create connection pool, %s, %+v",
|
||||
testName, err, poolResources)
|
||||
t.Errorf("%s: Cannot create connection pooler, %s, %+v",
|
||||
testName, err, poolerResources)
|
||||
}
|
||||
|
||||
if poolResources.Deployment == nil {
|
||||
t.Errorf("%s: Connection pool deployment is empty", testName)
|
||||
if poolerResources.Deployment == nil {
|
||||
t.Errorf("%s: Connection pooler deployment is empty", testName)
|
||||
}
|
||||
|
||||
if poolResources.Service == nil {
|
||||
t.Errorf("%s: Connection pool service is empty", testName)
|
||||
if poolerResources.Service == nil {
|
||||
t.Errorf("%s: Connection pooler service is empty", testName)
|
||||
}
|
||||
|
||||
err = cluster.deleteConnectionPool()
|
||||
err = cluster.deleteConnectionPooler()
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot delete connection pool, %s", testName, err)
|
||||
t.Errorf("%s: Cannot delete connection pooler, %s", testName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedConnPool(t *testing.T) {
|
||||
testName := "Test how connection pool can be enabled"
|
||||
func TestNeedConnectionPooler(t *testing.T) {
|
||||
testName := "Test how connection pooler can be enabled"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -78,50 +78,50 @@ func TestNeedConnPool(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger)
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
}
|
||||
|
||||
if !cluster.needConnectionPool() {
|
||||
t.Errorf("%s: Connection pool is not enabled with full definition",
|
||||
if !cluster.needConnectionPooler() {
|
||||
t.Errorf("%s: Connection pooler is not enabled with full definition",
|
||||
testName)
|
||||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
EnableConnectionPool: boolToPointer(true),
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
}
|
||||
|
||||
if !cluster.needConnectionPool() {
|
||||
t.Errorf("%s: Connection pool is not enabled with flag",
|
||||
if !cluster.needConnectionPooler() {
|
||||
t.Errorf("%s: Connection pooler is not enabled with flag",
|
||||
testName)
|
||||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
EnableConnectionPool: boolToPointer(false),
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
EnableConnectionPooler: boolToPointer(false),
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
}
|
||||
|
||||
if cluster.needConnectionPool() {
|
||||
t.Errorf("%s: Connection pool is still enabled with flag being false",
|
||||
if cluster.needConnectionPooler() {
|
||||
t.Errorf("%s: Connection pooler is still enabled with flag being false",
|
||||
testName)
|
||||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
EnableConnectionPool: boolToPointer(true),
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
}
|
||||
|
||||
if !cluster.needConnectionPool() {
|
||||
t.Errorf("%s: Connection pool is not enabled with flag and full",
|
||||
if !cluster.needConnectionPooler() {
|
||||
t.Errorf("%s: Connection pooler is not enabled with flag and full",
|
||||
testName)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
|
@ -114,24 +113,12 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
// remove unused PVCs in case deleting them during scale down failed; see Update()
|
||||
// the last pvc stays until the cluster is explicitly deleted as opposed to being scaled down to 0 pods
|
||||
if c.OpConfig.EnableUnusedPVCDeletion && c.getNumberOfInstances(&c.Spec) > 0 {
|
||||
|
||||
// XXX that also deletes PVC of pods shut down before this change is deployed
|
||||
for i := c.getNumberOfInstances(&c.Spec); ; i++ {
|
||||
podIndex := strconv.Itoa(int(i))
|
||||
pvcName := "pgdata-" + c.Name + "-" + podIndex
|
||||
if err := c.KubeClient.PersistentVolumeClaims(c.Namespace).Delete(context.TODO(), pvcName, c.deleteOptions); err != nil {
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
// no more pvcs to delete
|
||||
break
|
||||
}
|
||||
c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err)
|
||||
// next Sync() or Update() will retry
|
||||
}
|
||||
}
|
||||
c.deleteUnusedPersistentVolumeClaims()
|
||||
}
|
||||
// sync connection pool
|
||||
if err = c.syncConnectionPool(&oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
return fmt.Errorf("could not sync connection pool: %v", err)
|
||||
|
||||
// sync connection pooler
|
||||
if err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
return fmt.Errorf("could not sync connection pooler: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
@ -497,12 +484,12 @@ func (c *Cluster) syncRoles() (err error) {
|
|||
userNames = append(userNames, u.Name)
|
||||
}
|
||||
|
||||
if c.needConnectionPool() {
|
||||
connPoolUser := c.systemUsers[constants.ConnectionPoolUserKeyName]
|
||||
userNames = append(userNames, connPoolUser.Name)
|
||||
if c.needConnectionPooler() {
|
||||
connectionPoolerUser := c.systemUsers[constants.ConnectionPoolerUserKeyName]
|
||||
userNames = append(userNames, connectionPoolerUser.Name)
|
||||
|
||||
if _, exists := c.pgUsers[connPoolUser.Name]; !exists {
|
||||
c.pgUsers[connPoolUser.Name] = connPoolUser
|
||||
if _, exists := c.pgUsers[connectionPoolerUser.Name]; !exists {
|
||||
c.pgUsers[connectionPoolerUser.Name] = connectionPoolerUser
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -639,69 +626,69 @@ func (c *Cluster) syncLogicalBackupJob() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncConnectionPool(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) error {
|
||||
if c.ConnectionPool == nil {
|
||||
c.ConnectionPool = &ConnectionPoolObjects{}
|
||||
func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) error {
|
||||
if c.ConnectionPooler == nil {
|
||||
c.ConnectionPooler = &ConnectionPoolerObjects{}
|
||||
}
|
||||
|
||||
newNeedConnPool := c.needConnectionPoolWorker(&newSpec.Spec)
|
||||
oldNeedConnPool := c.needConnectionPoolWorker(&oldSpec.Spec)
|
||||
newNeedConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec)
|
||||
oldNeedConnectionPooler := c.needConnectionPoolerWorker(&oldSpec.Spec)
|
||||
|
||||
if newNeedConnPool {
|
||||
// Try to sync in any case. If we didn't needed connection pool before,
|
||||
if newNeedConnectionPooler {
|
||||
// Try to sync in any case. If we didn't needed connection pooler before,
|
||||
// it means we want to create it. If it was already present, still sync
|
||||
// since it could happen that there is no difference in specs, and all
|
||||
// the resources are remembered, but the deployment was manualy deleted
|
||||
// the resources are remembered, but the deployment was manually deleted
|
||||
// in between
|
||||
c.logger.Debug("syncing connection pool")
|
||||
c.logger.Debug("syncing connection pooler")
|
||||
|
||||
// in this case also do not forget to install lookup function as for
|
||||
// creating cluster
|
||||
if !oldNeedConnPool || !c.ConnectionPool.LookupFunction {
|
||||
newConnPool := newSpec.Spec.ConnectionPool
|
||||
if !oldNeedConnectionPooler || !c.ConnectionPooler.LookupFunction {
|
||||
newConnectionPooler := newSpec.Spec.ConnectionPooler
|
||||
|
||||
specSchema := ""
|
||||
specUser := ""
|
||||
|
||||
if newConnPool != nil {
|
||||
specSchema = newConnPool.Schema
|
||||
specUser = newConnPool.User
|
||||
if newConnectionPooler != nil {
|
||||
specSchema = newConnectionPooler.Schema
|
||||
specUser = newConnectionPooler.User
|
||||
}
|
||||
|
||||
schema := util.Coalesce(
|
||||
specSchema,
|
||||
c.OpConfig.ConnectionPool.Schema)
|
||||
c.OpConfig.ConnectionPooler.Schema)
|
||||
|
||||
user := util.Coalesce(
|
||||
specUser,
|
||||
c.OpConfig.ConnectionPool.User)
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
|
||||
if err := lookup(schema, user); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.syncConnectionPoolWorker(oldSpec, newSpec); err != nil {
|
||||
c.logger.Errorf("could not sync connection pool: %v", err)
|
||||
if err := c.syncConnectionPoolerWorker(oldSpec, newSpec); err != nil {
|
||||
c.logger.Errorf("could not sync connection pooler: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if oldNeedConnPool && !newNeedConnPool {
|
||||
if oldNeedConnectionPooler && !newNeedConnectionPooler {
|
||||
// delete and cleanup resources
|
||||
if err := c.deleteConnectionPool(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pool: %v", err)
|
||||
if err := c.deleteConnectionPooler(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !oldNeedConnPool && !newNeedConnPool {
|
||||
if !oldNeedConnectionPooler && !newNeedConnectionPooler {
|
||||
// delete and cleanup resources if not empty
|
||||
if c.ConnectionPool != nil &&
|
||||
(c.ConnectionPool.Deployment != nil ||
|
||||
c.ConnectionPool.Service != nil) {
|
||||
if c.ConnectionPooler != nil &&
|
||||
(c.ConnectionPooler.Deployment != nil ||
|
||||
c.ConnectionPooler.Service != nil) {
|
||||
|
||||
if err := c.deleteConnectionPool(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pool: %v", err)
|
||||
if err := c.deleteConnectionPooler(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -709,22 +696,22 @@ func (c *Cluster) syncConnectionPool(oldSpec, newSpec *acidv1.Postgresql, lookup
|
|||
return nil
|
||||
}
|
||||
|
||||
// Synchronize connection pool resources. Effectively we're interested only in
|
||||
// Synchronize connection pooler resources. Effectively we're interested only in
|
||||
// synchronizing the corresponding deployment, but in case of deployment or
|
||||
// service is missing, create it. After checking, also remember an object for
|
||||
// the future references.
|
||||
func (c *Cluster) syncConnectionPoolWorker(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||
func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||
deployment, err := c.KubeClient.
|
||||
Deployments(c.Namespace).
|
||||
Get(context.TODO(), c.connPoolName(), metav1.GetOptions{})
|
||||
Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{})
|
||||
|
||||
if err != nil && k8sutil.ResourceNotFound(err) {
|
||||
msg := "Deployment %s for connection pool synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connPoolName())
|
||||
msg := "Deployment %s for connection pooler synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connectionPoolerName())
|
||||
|
||||
deploymentSpec, err := c.generateConnPoolDeployment(&newSpec.Spec)
|
||||
deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec)
|
||||
if err != nil {
|
||||
msg = "could not generate deployment for connection pool: %v"
|
||||
msg = "could not generate deployment for connection pooler: %v"
|
||||
return fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
|
|
@ -736,31 +723,31 @@ func (c *Cluster) syncConnectionPoolWorker(oldSpec, newSpec *acidv1.Postgresql)
|
|||
return err
|
||||
}
|
||||
|
||||
c.ConnectionPool.Deployment = deployment
|
||||
c.ConnectionPooler.Deployment = deployment
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not get connection pool deployment to sync: %v", err)
|
||||
return fmt.Errorf("could not get connection pooler deployment to sync: %v", err)
|
||||
} else {
|
||||
c.ConnectionPool.Deployment = deployment
|
||||
c.ConnectionPooler.Deployment = deployment
|
||||
|
||||
// actual synchronization
|
||||
oldConnPool := oldSpec.Spec.ConnectionPool
|
||||
newConnPool := newSpec.Spec.ConnectionPool
|
||||
specSync, specReason := c.needSyncConnPoolSpecs(oldConnPool, newConnPool)
|
||||
defaultsSync, defaultsReason := c.needSyncConnPoolDefaults(newConnPool, deployment)
|
||||
oldConnectionPooler := oldSpec.Spec.ConnectionPooler
|
||||
newConnectionPooler := newSpec.Spec.ConnectionPooler
|
||||
specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler)
|
||||
defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment)
|
||||
reason := append(specReason, defaultsReason...)
|
||||
if specSync || defaultsSync {
|
||||
c.logger.Infof("Update connection pool deployment %s, reason: %+v",
|
||||
c.connPoolName(), reason)
|
||||
c.logger.Infof("Update connection pooler deployment %s, reason: %+v",
|
||||
c.connectionPoolerName(), reason)
|
||||
|
||||
newDeploymentSpec, err := c.generateConnPoolDeployment(&newSpec.Spec)
|
||||
newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec)
|
||||
if err != nil {
|
||||
msg := "could not generate deployment for connection pool: %v"
|
||||
msg := "could not generate deployment for connection pooler: %v"
|
||||
return fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
oldDeploymentSpec := c.ConnectionPool.Deployment
|
||||
oldDeploymentSpec := c.ConnectionPooler.Deployment
|
||||
|
||||
deployment, err := c.updateConnPoolDeployment(
|
||||
deployment, err := c.updateConnectionPoolerDeployment(
|
||||
oldDeploymentSpec,
|
||||
newDeploymentSpec)
|
||||
|
||||
|
|
@ -768,20 +755,20 @@ func (c *Cluster) syncConnectionPoolWorker(oldSpec, newSpec *acidv1.Postgresql)
|
|||
return err
|
||||
}
|
||||
|
||||
c.ConnectionPool.Deployment = deployment
|
||||
c.ConnectionPooler.Deployment = deployment
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
service, err := c.KubeClient.
|
||||
Services(c.Namespace).
|
||||
Get(context.TODO(), c.connPoolName(), metav1.GetOptions{})
|
||||
Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{})
|
||||
|
||||
if err != nil && k8sutil.ResourceNotFound(err) {
|
||||
msg := "Service %s for connection pool synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connPoolName())
|
||||
msg := "Service %s for connection pooler synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connectionPoolerName())
|
||||
|
||||
serviceSpec := c.generateConnPoolService(&newSpec.Spec)
|
||||
serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec)
|
||||
service, err := c.KubeClient.
|
||||
Services(serviceSpec.Namespace).
|
||||
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
||||
|
|
@ -790,12 +777,12 @@ func (c *Cluster) syncConnectionPoolWorker(oldSpec, newSpec *acidv1.Postgresql)
|
|||
return err
|
||||
}
|
||||
|
||||
c.ConnectionPool.Service = service
|
||||
c.ConnectionPooler.Service = service
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not get connection pool service to sync: %v", err)
|
||||
return fmt.Errorf("could not get connection pooler service to sync: %v", err)
|
||||
} else {
|
||||
// Service updates are not supported and probably not that useful anyway
|
||||
c.ConnectionPool.Service = service
|
||||
c.ConnectionPooler.Service = service
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ func int32ToPointer(value int32) *int32 {
|
|||
}
|
||||
|
||||
func deploymentUpdated(cluster *Cluster, err error) error {
|
||||
if cluster.ConnectionPool.Deployment.Spec.Replicas == nil ||
|
||||
*cluster.ConnectionPool.Deployment.Spec.Replicas != 2 {
|
||||
if cluster.ConnectionPooler.Deployment.Spec.Replicas == nil ||
|
||||
*cluster.ConnectionPooler.Deployment.Spec.Replicas != 2 {
|
||||
return fmt.Errorf("Wrong nubmer of instances")
|
||||
}
|
||||
|
||||
|
|
@ -27,15 +27,15 @@ func deploymentUpdated(cluster *Cluster, err error) error {
|
|||
}
|
||||
|
||||
func objectsAreSaved(cluster *Cluster, err error) error {
|
||||
if cluster.ConnectionPool == nil {
|
||||
return fmt.Errorf("Connection pool resources are empty")
|
||||
if cluster.ConnectionPooler == nil {
|
||||
return fmt.Errorf("Connection pooler resources are empty")
|
||||
}
|
||||
|
||||
if cluster.ConnectionPool.Deployment == nil {
|
||||
if cluster.ConnectionPooler.Deployment == nil {
|
||||
return fmt.Errorf("Deployment was not saved")
|
||||
}
|
||||
|
||||
if cluster.ConnectionPool.Service == nil {
|
||||
if cluster.ConnectionPooler.Service == nil {
|
||||
return fmt.Errorf("Service was not saved")
|
||||
}
|
||||
|
||||
|
|
@ -43,15 +43,15 @@ func objectsAreSaved(cluster *Cluster, err error) error {
|
|||
}
|
||||
|
||||
func objectsAreDeleted(cluster *Cluster, err error) error {
|
||||
if cluster.ConnectionPool != nil {
|
||||
return fmt.Errorf("Connection pool was not deleted")
|
||||
if cluster.ConnectionPooler != nil {
|
||||
return fmt.Errorf("Connection pooler was not deleted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConnPoolSynchronization(t *testing.T) {
|
||||
testName := "Test connection pool synchronization"
|
||||
func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||
testName := "Test connection pooler synchronization"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -60,12 +60,12 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
|
@ -84,15 +84,15 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
|
||||
clusterDirtyMock := *cluster
|
||||
clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||
clusterDirtyMock.ConnectionPool = &ConnectionPoolObjects{
|
||||
clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{
|
||||
Deployment: &appsv1.Deployment{},
|
||||
Service: &v1.Service{},
|
||||
}
|
||||
|
||||
clusterNewDefaultsMock := *cluster
|
||||
clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||
cluster.OpConfig.ConnectionPool.Image = "pooler:2.0"
|
||||
cluster.OpConfig.ConnectionPool.NumberOfInstances = int32ToPointer(2)
|
||||
cluster.OpConfig.ConnectionPooler.Image = "pooler:2.0"
|
||||
cluster.OpConfig.ConnectionPooler.NumberOfInstances = int32ToPointer(2)
|
||||
|
||||
tests := []struct {
|
||||
subTest string
|
||||
|
|
@ -105,12 +105,12 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
subTest: "create if doesn't exist",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: &clusterMissingObjects,
|
||||
|
|
@ -123,7 +123,7 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
EnableConnectionPool: boolToPointer(true),
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
},
|
||||
},
|
||||
cluster: &clusterMissingObjects,
|
||||
|
|
@ -136,7 +136,7 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: &clusterMissingObjects,
|
||||
|
|
@ -146,7 +146,7 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
subTest: "delete if not needed",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
|
|
@ -170,14 +170,14 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
subTest: "update deployment",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{
|
||||
NumberOfInstances: int32ToPointer(2),
|
||||
},
|
||||
},
|
||||
|
|
@ -189,12 +189,12 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
subTest: "update image from changed defaults",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: &clusterNewDefaultsMock,
|
||||
|
|
@ -202,7 +202,7 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
err := tt.cluster.syncConnectionPool(tt.oldSpec, tt.newSpec, mockInstallLookupFunction)
|
||||
err := tt.cluster.syncConnectionPooler(tt.oldSpec, tt.newSpec, mockInstallLookupFunction)
|
||||
|
||||
if err := tt.check(tt.cluster, err); err != nil {
|
||||
t.Errorf("%s [%s]: Could not synchronize, %+v",
|
||||
|
|
|
|||
|
|
@ -415,24 +415,24 @@ func (c *Cluster) labelsSelector() *metav1.LabelSelector {
|
|||
}
|
||||
}
|
||||
|
||||
// Return connection pool labels selector, which should from one point of view
|
||||
// Return connection pooler labels selector, which should from one point of view
|
||||
// inherit most of the labels from the cluster itself, but at the same time
|
||||
// have e.g. different `application` label, so that recreatePod operation will
|
||||
// not interfere with it (it lists all the pods via labels, and if there would
|
||||
// be no difference, it will recreate also pooler pods).
|
||||
func (c *Cluster) connPoolLabelsSelector() *metav1.LabelSelector {
|
||||
connPoolLabels := labels.Set(map[string]string{})
|
||||
func (c *Cluster) connectionPoolerLabelsSelector() *metav1.LabelSelector {
|
||||
connectionPoolerLabels := labels.Set(map[string]string{})
|
||||
|
||||
extraLabels := labels.Set(map[string]string{
|
||||
"connection-pool": c.connPoolName(),
|
||||
"application": "db-connection-pool",
|
||||
"connection-pooler": c.connectionPoolerName(),
|
||||
"application": "db-connection-pooler",
|
||||
})
|
||||
|
||||
connPoolLabels = labels.Merge(connPoolLabels, c.labelsSet(false))
|
||||
connPoolLabels = labels.Merge(connPoolLabels, extraLabels)
|
||||
connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false))
|
||||
connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels)
|
||||
|
||||
return &metav1.LabelSelector{
|
||||
MatchLabels: connPoolLabels,
|
||||
MatchLabels: connectionPoolerLabels,
|
||||
MatchExpressions: nil,
|
||||
}
|
||||
}
|
||||
|
|
@ -510,14 +510,23 @@ func (c *Cluster) patroniUsesKubernetes() bool {
|
|||
return c.OpConfig.EtcdHost == ""
|
||||
}
|
||||
|
||||
func (c *Cluster) needConnectionPoolWorker(spec *acidv1.PostgresSpec) bool {
|
||||
if spec.EnableConnectionPool == nil {
|
||||
return spec.ConnectionPool != nil
|
||||
func (c *Cluster) patroniKubernetesUseConfigMaps() bool {
|
||||
if !c.patroniUsesKubernetes() {
|
||||
return false
|
||||
}
|
||||
|
||||
// otherwise, follow the operator configuration
|
||||
return c.OpConfig.KubernetesUseConfigMaps
|
||||
}
|
||||
|
||||
func (c *Cluster) needConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
|
||||
if spec.EnableConnectionPooler == nil {
|
||||
return spec.ConnectionPooler != nil
|
||||
} else {
|
||||
return *spec.EnableConnectionPool
|
||||
return *spec.EnableConnectionPooler
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) needConnectionPool() bool {
|
||||
return c.needConnectionPoolWorker(&c.Spec)
|
||||
func (c *Cluster) needConnectionPooler() bool {
|
||||
return c.needConnectionPoolerWorker(&c.Spec)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/filesystems"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
"github.com/zalando/postgres-operator/pkg/util/volumes"
|
||||
)
|
||||
|
||||
|
|
@ -52,6 +53,33 @@ func (c *Cluster) deletePersistentVolumeClaims() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deleteUnusedPersistentVolumeClaims() {
|
||||
|
||||
c.logger.Debug("deleting pvc of shut down pods")
|
||||
|
||||
// Scaling down to 0 replicas is not cluster deletion so keep the last pvc.
|
||||
// Operator will remove it only when explicit "kubectl pg delete" is issued
|
||||
if c.getNumberOfInstances(&c.Spec) == 0 {
|
||||
c.logger.Info("cluster scaled down to 0 pods; skipping deletion of the last pvc")
|
||||
return
|
||||
}
|
||||
|
||||
// XXX that also deletes PVC of pods shut down before this change is deployed
|
||||
for i := c.getNumberOfInstances(&c.Spec); ; i++ {
|
||||
podIndex := strconv.Itoa(int(i))
|
||||
pvcName := "pgdata-" + c.Name + "-" + podIndex
|
||||
if err := c.KubeClient.PersistentVolumeClaims(c.Namespace).Delete(context.TODO(), pvcName, c.deleteOptions); err != nil {
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
// no more pvcs to delete
|
||||
break
|
||||
}
|
||||
c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err)
|
||||
// next Sync() will retry
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) {
|
||||
result := make([]*v1.PersistentVolume, 0)
|
||||
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
// general config
|
||||
result.EnableCRDValidation = fromCRD.EnableCRDValidation
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = fromCRD.DockerImage
|
||||
result.Workers = fromCRD.Workers
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
|
|
@ -150,51 +151,51 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit
|
||||
result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit
|
||||
|
||||
// Connection pool. Looks like we can't use defaulting in CRD before 1.17,
|
||||
// Connection pooler. Looks like we can't use defaulting in CRD before 1.17,
|
||||
// so ensure default values here.
|
||||
result.ConnectionPool.NumberOfInstances = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPool.NumberOfInstances,
|
||||
result.ConnectionPooler.NumberOfInstances = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPooler.NumberOfInstances,
|
||||
int32ToPointer(2))
|
||||
|
||||
result.ConnectionPool.NumberOfInstances = util.MaxInt32(
|
||||
result.ConnectionPool.NumberOfInstances,
|
||||
result.ConnectionPooler.NumberOfInstances = util.MaxInt32(
|
||||
result.ConnectionPooler.NumberOfInstances,
|
||||
int32ToPointer(2))
|
||||
|
||||
result.ConnectionPool.Schema = util.Coalesce(
|
||||
fromCRD.ConnectionPool.Schema,
|
||||
constants.ConnectionPoolSchemaName)
|
||||
result.ConnectionPooler.Schema = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.Schema,
|
||||
constants.ConnectionPoolerSchemaName)
|
||||
|
||||
result.ConnectionPool.User = util.Coalesce(
|
||||
fromCRD.ConnectionPool.User,
|
||||
constants.ConnectionPoolUserName)
|
||||
result.ConnectionPooler.User = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.User,
|
||||
constants.ConnectionPoolerUserName)
|
||||
|
||||
result.ConnectionPool.Image = util.Coalesce(
|
||||
fromCRD.ConnectionPool.Image,
|
||||
result.ConnectionPooler.Image = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.Image,
|
||||
"registry.opensource.zalan.do/acid/pgbouncer")
|
||||
|
||||
result.ConnectionPool.Mode = util.Coalesce(
|
||||
fromCRD.ConnectionPool.Mode,
|
||||
constants.ConnectionPoolDefaultMode)
|
||||
result.ConnectionPooler.Mode = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.Mode,
|
||||
constants.ConnectionPoolerDefaultMode)
|
||||
|
||||
result.ConnectionPool.ConnPoolDefaultCPURequest = util.Coalesce(
|
||||
fromCRD.ConnectionPool.DefaultCPURequest,
|
||||
constants.ConnectionPoolDefaultCpuRequest)
|
||||
result.ConnectionPooler.ConnectionPoolerDefaultCPURequest = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.DefaultCPURequest,
|
||||
constants.ConnectionPoolerDefaultCpuRequest)
|
||||
|
||||
result.ConnectionPool.ConnPoolDefaultMemoryRequest = util.Coalesce(
|
||||
fromCRD.ConnectionPool.DefaultMemoryRequest,
|
||||
constants.ConnectionPoolDefaultMemoryRequest)
|
||||
result.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.DefaultMemoryRequest,
|
||||
constants.ConnectionPoolerDefaultMemoryRequest)
|
||||
|
||||
result.ConnectionPool.ConnPoolDefaultCPULimit = util.Coalesce(
|
||||
fromCRD.ConnectionPool.DefaultCPULimit,
|
||||
constants.ConnectionPoolDefaultCpuLimit)
|
||||
result.ConnectionPooler.ConnectionPoolerDefaultCPULimit = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.DefaultCPULimit,
|
||||
constants.ConnectionPoolerDefaultCpuLimit)
|
||||
|
||||
result.ConnectionPool.ConnPoolDefaultMemoryLimit = util.Coalesce(
|
||||
fromCRD.ConnectionPool.DefaultMemoryLimit,
|
||||
constants.ConnectionPoolDefaultMemoryLimit)
|
||||
result.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.DefaultMemoryLimit,
|
||||
constants.ConnectionPoolerDefaultMemoryLimit)
|
||||
|
||||
result.ConnectionPool.MaxDBConnections = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPool.MaxDBConnections,
|
||||
int32ToPointer(constants.ConnPoolMaxDBConnections))
|
||||
result.ConnectionPooler.MaxDBConnections = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPooler.MaxDBConnections,
|
||||
int32ToPointer(constants.ConnectionPoolerMaxDBConnections))
|
||||
|
||||
return result
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ const (
|
|||
RoleOriginInfrastructure
|
||||
RoleOriginTeamsAPI
|
||||
RoleOriginSystem
|
||||
RoleConnectionPool
|
||||
RoleConnectionPooler
|
||||
)
|
||||
|
||||
type syncUserOperation int
|
||||
|
|
@ -180,8 +180,8 @@ func (r RoleOrigin) String() string {
|
|||
return "teams API role"
|
||||
case RoleOriginSystem:
|
||||
return "system role"
|
||||
case RoleConnectionPool:
|
||||
return "connection pool role"
|
||||
case RoleConnectionPooler:
|
||||
return "connection pooler role"
|
||||
default:
|
||||
panic(fmt.Sprintf("bogus role origin value %d", r))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,17 +85,17 @@ type LogicalBackup struct {
|
|||
}
|
||||
|
||||
// Operator options for connection pooler
|
||||
type ConnectionPool struct {
|
||||
NumberOfInstances *int32 `name:"connection_pool_number_of_instances" default:"2"`
|
||||
Schema string `name:"connection_pool_schema" default:"pooler"`
|
||||
User string `name:"connection_pool_user" default:"pooler"`
|
||||
Image string `name:"connection_pool_image" default:"registry.opensource.zalan.do/acid/pgbouncer"`
|
||||
Mode string `name:"connection_pool_mode" default:"transaction"`
|
||||
MaxDBConnections *int32 `name:"connection_pool_max_db_connections" default:"60"`
|
||||
ConnPoolDefaultCPURequest string `name:"connection_pool_default_cpu_request" default:"500m"`
|
||||
ConnPoolDefaultMemoryRequest string `name:"connection_pool_default_memory_request" default:"100Mi"`
|
||||
ConnPoolDefaultCPULimit string `name:"connection_pool_default_cpu_limit" default:"1"`
|
||||
ConnPoolDefaultMemoryLimit string `name:"connection_pool_default_memory_limit" default:"100Mi"`
|
||||
type ConnectionPooler struct {
|
||||
NumberOfInstances *int32 `name:"connection_pooler_number_of_instances" default:"2"`
|
||||
Schema string `name:"connection_pooler_schema" default:"pooler"`
|
||||
User string `name:"connection_pooler_user" default:"pooler"`
|
||||
Image string `name:"connection_pooler_image" default:"registry.opensource.zalan.do/acid/pgbouncer"`
|
||||
Mode string `name:"connection_pooler_mode" default:"transaction"`
|
||||
MaxDBConnections *int32 `name:"connection_pooler_max_db_connections" default:"60"`
|
||||
ConnectionPoolerDefaultCPURequest string `name:"connection_pooler_default_cpu_request" default:"500m"`
|
||||
ConnectionPoolerDefaultMemoryRequest string `name:"connection_pooler_default_memory_request" default:"100Mi"`
|
||||
ConnectionPoolerDefaultCPULimit string `name:"connection_pooler_default_cpu_limit" default:"1"`
|
||||
ConnectionPoolerDefaultMemoryLimit string `name:"connection_pooler_default_memory_limit" default:"100Mi"`
|
||||
}
|
||||
|
||||
// Config describes operator config
|
||||
|
|
@ -105,13 +105,14 @@ type Config struct {
|
|||
Auth
|
||||
Scalyr
|
||||
LogicalBackup
|
||||
ConnectionPool
|
||||
ConnectionPooler
|
||||
|
||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"`
|
||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"`
|
||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
|
||||
PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""`
|
||||
|
|
@ -214,9 +215,9 @@ func validate(cfg *Config) (err error) {
|
|||
err = fmt.Errorf("number of workers should be higher than 0")
|
||||
}
|
||||
|
||||
if *cfg.ConnectionPool.NumberOfInstances < constants.ConnPoolMinInstances {
|
||||
msg := "number of connection pool instances should be higher than %d"
|
||||
err = fmt.Errorf(msg, constants.ConnPoolMinInstances)
|
||||
if *cfg.ConnectionPooler.NumberOfInstances < constants.ConnectionPoolerMinInstances {
|
||||
msg := "number of connection pooler instances should be higher than %d"
|
||||
err = fmt.Errorf(msg, constants.ConnectionPoolerMinInstances)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,18 +1,18 @@
|
|||
package constants
|
||||
|
||||
// Connection pool specific constants
|
||||
// Connection pooler specific constants
|
||||
const (
|
||||
ConnectionPoolUserName = "pooler"
|
||||
ConnectionPoolSchemaName = "pooler"
|
||||
ConnectionPoolDefaultType = "pgbouncer"
|
||||
ConnectionPoolDefaultMode = "transaction"
|
||||
ConnectionPoolDefaultCpuRequest = "500m"
|
||||
ConnectionPoolDefaultCpuLimit = "1"
|
||||
ConnectionPoolDefaultMemoryRequest = "100Mi"
|
||||
ConnectionPoolDefaultMemoryLimit = "100Mi"
|
||||
ConnectionPoolerUserName = "pooler"
|
||||
ConnectionPoolerSchemaName = "pooler"
|
||||
ConnectionPoolerDefaultType = "pgbouncer"
|
||||
ConnectionPoolerDefaultMode = "transaction"
|
||||
ConnectionPoolerDefaultCpuRequest = "500m"
|
||||
ConnectionPoolerDefaultCpuLimit = "1"
|
||||
ConnectionPoolerDefaultMemoryRequest = "100Mi"
|
||||
ConnectionPoolerDefaultMemoryLimit = "100Mi"
|
||||
|
||||
ConnPoolContainer = 0
|
||||
ConnPoolMaxDBConnections = 60
|
||||
ConnPoolMaxClientConnections = 10000
|
||||
ConnPoolMinInstances = 2
|
||||
ConnectionPoolerContainer = 0
|
||||
ConnectionPoolerMaxDBConnections = 60
|
||||
ConnectionPoolerMaxClientConnections = 10000
|
||||
ConnectionPoolerMinInstances = 2
|
||||
)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ package constants
|
|||
const (
|
||||
PasswordLength = 64
|
||||
SuperuserKeyName = "superuser"
|
||||
ConnectionPoolUserKeyName = "pooler"
|
||||
ConnectionPoolerUserKeyName = "pooler"
|
||||
ReplicationUserKeyName = "replication"
|
||||
RoleFlagSuperuser = "SUPERUSER"
|
||||
RoleFlagInherit = "INHERIT"
|
||||
|
|
|
|||
Loading…
Reference in New Issue