merge with master
This commit is contained in:
commit
e580da8c80
|
|
@ -8,7 +8,7 @@ branches:
|
|||
language: go
|
||||
|
||||
go:
|
||||
- "1.12.x"
|
||||
- "1.14.x"
|
||||
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
|
|
|
|||
|
|
@ -17,8 +17,9 @@ pipelines with no access to Kubernetes directly.
|
|||
|
||||
* Rolling updates on Postgres cluster changes
|
||||
* Volume resize without Pod restarts
|
||||
* Database connection pooler
|
||||
* Cloning Postgres clusters
|
||||
* Logical Backups to S3 Bucket
|
||||
* Logical backups to S3 Bucket
|
||||
* Standby cluster from S3 WAL archive
|
||||
* Configurable for non-cloud environments
|
||||
* UI to create and edit Postgres cluster manifests
|
||||
|
|
|
|||
|
|
@ -66,6 +66,8 @@ spec:
|
|||
type: boolean
|
||||
etcd_host:
|
||||
type: string
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
max_instances:
|
||||
type: integer
|
||||
minimum: -1 # -1 = disabled
|
||||
|
|
@ -318,44 +320,44 @@ spec:
|
|||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
scalyr_server_url:
|
||||
type: string
|
||||
connection_pool:
|
||||
connection_pooler:
|
||||
type: object
|
||||
properties:
|
||||
connection_pool_schema:
|
||||
connection_pooler_schema:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
connection_pool_user:
|
||||
connection_pooler_user:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
connection_pool_image:
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
#default: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
connection_pool_max_db_connections:
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
#default: 60
|
||||
connection_pool_mode:
|
||||
connection_pooler_mode:
|
||||
type: string
|
||||
enum:
|
||||
- "session"
|
||||
- "transaction"
|
||||
#default: "transaction"
|
||||
connection_pool_number_of_instances:
|
||||
connection_pooler_number_of_instances:
|
||||
type: integer
|
||||
minimum: 2
|
||||
#default: 2
|
||||
connection_pool_default_cpu_limit:
|
||||
connection_pooler_default_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "1"
|
||||
connection_pool_default_cpu_request:
|
||||
connection_pooler_default_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "500m"
|
||||
connection_pool_default_memory_limit:
|
||||
connection_pooler_default_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
connection_pool_default_memory_request:
|
||||
connection_pooler_default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ spec:
|
|||
uid:
|
||||
format: uuid
|
||||
type: string
|
||||
connectionPool:
|
||||
connectionPooler:
|
||||
type: object
|
||||
properties:
|
||||
dockerImage:
|
||||
|
|
@ -162,7 +162,7 @@ spec:
|
|||
# Note: usernames specified here as database owners must be declared in the users key of the spec key.
|
||||
dockerImage:
|
||||
type: string
|
||||
enableConnectionPool:
|
||||
enableConnectionPooler:
|
||||
type: boolean
|
||||
enableLogicalBackup:
|
||||
type: boolean
|
||||
|
|
|
|||
|
|
@ -20,5 +20,5 @@ data:
|
|||
{{ toYaml .Values.configDebug | indent 2 }}
|
||||
{{ toYaml .Values.configLoggingRestApi | indent 2 }}
|
||||
{{ toYaml .Values.configTeamsApi | indent 2 }}
|
||||
{{ toYaml .Values.configConnectionPool | indent 2 }}
|
||||
{{ toYaml .Values.configConnectionPooler | indent 2 }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,6 @@ configuration:
|
|||
{{ toYaml .Values.configLoggingRestApi | indent 4 }}
|
||||
scalyr:
|
||||
{{ toYaml .Values.configScalyr | indent 4 }}
|
||||
connection_pool:
|
||||
{{ toYaml .Values.configConnectionPool | indent 4 }}
|
||||
connection_pooler:
|
||||
{{ toYaml .Values.configConnectionPooler | indent 4 }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ configGeneral:
|
|||
enable_shm_volume: true
|
||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -267,24 +269,24 @@ configScalyr:
|
|||
# Memory request value for the Scalyr sidecar
|
||||
scalyr_memory_request: 50Mi
|
||||
|
||||
configConnectionPool:
|
||||
configConnectionPooler:
|
||||
# db schema to install lookup function into
|
||||
connection_pool_schema: "pooler"
|
||||
connection_pooler_schema: "pooler"
|
||||
# db user for pooler to use
|
||||
connection_pool_user: "pooler"
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pool_image: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
# max db connections the pooler should hold
|
||||
connection_pool_max_db_connections: 60
|
||||
connection_pooler_max_db_connections: 60
|
||||
# default pooling mode
|
||||
connection_pool_mode: "transaction"
|
||||
connection_pooler_mode: "transaction"
|
||||
# number of pooler instances
|
||||
connection_pool_number_of_instances: 2
|
||||
connection_pooler_number_of_instances: 2
|
||||
# default resources
|
||||
connection_pool_default_cpu_request: 500m
|
||||
connection_pool_default_memory_request: 100Mi
|
||||
connection_pool_default_cpu_limit: "1"
|
||||
connection_pool_default_memory_limit: 100Mi
|
||||
connection_pooler_default_cpu_request: 500m
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_default_cpu_limit: "1"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ configGeneral:
|
|||
enable_shm_volume: "true"
|
||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -244,24 +246,24 @@ configTeamsApi:
|
|||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||
|
||||
# configure connection pooler deployment created by the operator
|
||||
configConnectionPool:
|
||||
configConnectionPooler:
|
||||
# db schema to install lookup function into
|
||||
connection_pool_schema: "pooler"
|
||||
connection_pooler_schema: "pooler"
|
||||
# db user for pooler to use
|
||||
connection_pool_user: "pooler"
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pool_image: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
# max db connections the pooler should hold
|
||||
connection_pool_max_db_connections: 60
|
||||
connection_pooler_max_db_connections: 60
|
||||
# default pooling mode
|
||||
connection_pool_mode: "transaction"
|
||||
connection_pooler_mode: "transaction"
|
||||
# number of pooler instances
|
||||
connection_pool_number_of_instances: 2
|
||||
connection_pooler_number_of_instances: 2
|
||||
# default resources
|
||||
connection_pool_default_cpu_request: 500m
|
||||
connection_pool_default_memory_request: 100Mi
|
||||
connection_pool_default_cpu_limit: "1"
|
||||
connection_pool_default_memory_limit: 100Mi
|
||||
connection_pooler_default_cpu_request: 500m
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_default_cpu_limit: "1"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ pipeline:
|
|||
- desc: 'Install go'
|
||||
cmd: |
|
||||
cd /tmp
|
||||
wget -q https://storage.googleapis.com/golang/go1.12.linux-amd64.tar.gz -O go.tar.gz
|
||||
wget -q https://storage.googleapis.com/golang/go1.14.linux-amd64.tar.gz -O go.tar.gz
|
||||
tar -xf go.tar.gz
|
||||
mv go /usr/local
|
||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||
|
|
|
|||
|
|
@ -140,10 +140,10 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
is `false`, then no volume will be mounted no matter how operator was
|
||||
configured (so you can override the operator configuration). Optional.
|
||||
|
||||
* **enableConnectionPool**
|
||||
Tells the operator to create a connection pool with a database. If this
|
||||
field is true, a connection pool deployment will be created even if
|
||||
`connectionPool` section is empty. Optional, not set by default.
|
||||
* **enableConnectionPooler**
|
||||
Tells the operator to create a connection pooler with a database. If this
|
||||
field is true, a connection pooler deployment will be created even if
|
||||
`connectionPooler` section is empty. Optional, not set by default.
|
||||
|
||||
* **enableLogicalBackup**
|
||||
Determines if the logical backup of this cluster should be taken and uploaded
|
||||
|
|
@ -365,34 +365,34 @@ CPU and memory limits for the sidecar container.
|
|||
memory limits for the sidecar container. Optional, overrides the
|
||||
`default_memory_limits` operator configuration parameter. Optional.
|
||||
|
||||
## Connection pool
|
||||
## Connection pooler
|
||||
|
||||
Parameters are grouped under the `connectionPool` top-level key and specify
|
||||
configuration for connection pool. If this section is not empty, a connection
|
||||
pool will be created for a database even if `enableConnectionPool` is not
|
||||
Parameters are grouped under the `connectionPooler` top-level key and specify
|
||||
configuration for connection pooler. If this section is not empty, a connection
|
||||
pooler will be created for a database even if `enableConnectionPooler` is not
|
||||
present.
|
||||
|
||||
* **numberOfInstances**
|
||||
How many instances of connection pool to create.
|
||||
How many instances of connection pooler to create.
|
||||
|
||||
* **schema**
|
||||
Schema to create for credentials lookup function.
|
||||
|
||||
* **user**
|
||||
User to create for connection pool to be able to connect to a database.
|
||||
User to create for connection pooler to be able to connect to a database.
|
||||
|
||||
* **dockerImage**
|
||||
Which docker image to use for connection pool deployment.
|
||||
Which docker image to use for connection pooler deployment.
|
||||
|
||||
* **maxDBConnections**
|
||||
How many connections the pooler can max hold. This value is divided among the
|
||||
pooler pods.
|
||||
|
||||
* **mode**
|
||||
In which mode to run connection pool, transaction or session.
|
||||
In which mode to run connection pooler, transaction or session.
|
||||
|
||||
* **resources**
|
||||
Resource configuration for connection pool deployment.
|
||||
Resource configuration for connection pooler deployment.
|
||||
|
||||
## Custom TLS certificates
|
||||
|
||||
|
|
|
|||
|
|
@ -80,6 +80,12 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
Patroni native Kubernetes support is used. The default is empty (use
|
||||
Kubernetes-native DCS).
|
||||
|
||||
* **kubernetes_use_configmaps**
|
||||
Select if setup uses endpoints (default), or configmaps to manage leader when
|
||||
DCS is kubernetes (not etcd or similar). In OpenShift it is not possible to
|
||||
use endpoints option, and configmaps is required. By default,
|
||||
`kubernetes_use_configmaps: false`, meaning endpoints will be used.
|
||||
|
||||
* **docker_image**
|
||||
Spilo Docker image for Postgres instances. For production, don't rely on the
|
||||
default image, as it might be not the most up-to-date one. Instead, build
|
||||
|
|
@ -597,39 +603,39 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the
|
|||
* **scalyr_memory_limit**
|
||||
Memory limit value for the Scalyr sidecar. The default is `500Mi`.
|
||||
|
||||
## Connection pool configuration
|
||||
## Connection pooler configuration
|
||||
|
||||
Parameters are grouped under the `connection_pool` top-level key and specify
|
||||
default configuration for connection pool, if a postgres manifest requests it
|
||||
Parameters are grouped under the `connection_pooler` top-level key and specify
|
||||
default configuration for connection pooler, if a postgres manifest requests it
|
||||
but do not specify some of the parameters. All of them are optional with the
|
||||
operator being able to provide some reasonable defaults.
|
||||
|
||||
* **connection_pool_number_of_instances**
|
||||
How many instances of connection pool to create. Default is 2 which is also
|
||||
* **connection_pooler_number_of_instances**
|
||||
How many instances of connection pooler to create. Default is 2 which is also
|
||||
the required minimum.
|
||||
|
||||
* **connection_pool_schema**
|
||||
* **connection_pooler_schema**
|
||||
Schema to create for credentials lookup function. Default is `pooler`.
|
||||
|
||||
* **connection_pool_user**
|
||||
User to create for connection pool to be able to connect to a database.
|
||||
* **connection_pooler_user**
|
||||
User to create for connection pooler to be able to connect to a database.
|
||||
Default is `pooler`.
|
||||
|
||||
* **connection_pool_image**
|
||||
Docker image to use for connection pool deployment.
|
||||
* **connection_pooler_image**
|
||||
Docker image to use for connection pooler deployment.
|
||||
Default: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
|
||||
* **connection_pool_max_db_connections**
|
||||
* **connection_pooler_max_db_connections**
|
||||
How many connections the pooler can max hold. This value is divided among the
|
||||
pooler pods. Default is 60 which will make up 30 connections per pod for the
|
||||
default setup with two instances.
|
||||
|
||||
* **connection_pool_mode**
|
||||
Default pool mode, `session` or `transaction`. Default is `transaction`.
|
||||
* **connection_pooler_mode**
|
||||
Default pooler mode, `session` or `transaction`. Default is `transaction`.
|
||||
|
||||
* **connection_pool_default_cpu_request**
|
||||
**connection_pool_default_memory_reques**
|
||||
**connection_pool_default_cpu_limit**
|
||||
**connection_pool_default_memory_limit**
|
||||
Default resource configuration for connection pool deployment. The internal
|
||||
* **connection_pooler_default_cpu_request**
|
||||
**connection_pooler_default_memory_reques**
|
||||
**connection_pooler_default_cpu_limit**
|
||||
**connection_pooler_default_memory_limit**
|
||||
Default resource configuration for connection pooler deployment. The internal
|
||||
default for memory request and limit is `100Mi`, for CPU it is `500m` and `1`.
|
||||
|
|
|
|||
60
docs/user.md
60
docs/user.md
|
|
@ -512,39 +512,38 @@ monitoring is outside the scope of operator responsibilities. See
|
|||
[administrator documentation](administrator.md) for details on how backups are
|
||||
executed.
|
||||
|
||||
## Connection pool
|
||||
## Connection pooler
|
||||
|
||||
The operator can create a database side connection pool for those applications,
|
||||
where an application side pool is not feasible, but a number of connections is
|
||||
high. To create a connection pool together with a database, modify the
|
||||
The operator can create a database side connection pooler for those applications
|
||||
where an application side pooler is not feasible, but a number of connections is
|
||||
high. To create a connection pooler together with a database, modify the
|
||||
manifest:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
enableConnectionPool: true
|
||||
enableConnectionPooler: true
|
||||
```
|
||||
|
||||
This will tell the operator to create a connection pool with default
|
||||
This will tell the operator to create a connection pooler with default
|
||||
configuration, through which one can access the master via a separate service
|
||||
`{cluster-name}-pooler`. In most of the cases provided default configuration
|
||||
should be good enough.
|
||||
|
||||
To configure a new connection pool, specify:
|
||||
`{cluster-name}-pooler`. In most of the cases the
|
||||
[default configuration](reference/operator_parameters.md#connection-pool-configuration)
|
||||
should be good enough. To configure a new connection pooler individually for
|
||||
each Postgres cluster, specify:
|
||||
|
||||
```
|
||||
spec:
|
||||
connectionPool:
|
||||
# how many instances of connection pool to create
|
||||
number_of_instances: 2
|
||||
connectionPooler:
|
||||
# how many instances of connection pooler to create
|
||||
numberOfInstances: 2
|
||||
|
||||
# in which mode to run, session or transaction
|
||||
mode: "transaction"
|
||||
|
||||
# schema, which operator will create to install credentials lookup
|
||||
# function
|
||||
# schema, which operator will create to install credentials lookup function
|
||||
schema: "pooler"
|
||||
|
||||
# user, which operator will create for connection pool
|
||||
# user, which operator will create for connection pooler
|
||||
user: "pooler"
|
||||
|
||||
# resources for each instance
|
||||
|
|
@ -557,13 +556,17 @@ spec:
|
|||
memory: 100Mi
|
||||
```
|
||||
|
||||
By default `pgbouncer` is used to create a connection pool. To find out about
|
||||
pool modes see [docs](https://www.pgbouncer.org/config.html#pool_mode) (but it
|
||||
should be general approach between different implementation).
|
||||
The `enableConnectionPooler` flag is not required when the `connectionPooler`
|
||||
section is present in the manifest. But, it can be used to disable/remove the
|
||||
pooler while keeping its configuration.
|
||||
|
||||
Note, that using `pgbouncer` means meaningful resource CPU limit should be less
|
||||
than 1 core (there is a way to utilize more than one, but in K8S it's easier
|
||||
just to spin up more instances).
|
||||
By default, `pgbouncer` is used as connection pooler. To find out about pooler
|
||||
modes read the `pgbouncer` [docs](https://www.pgbouncer.org/config.html#pooler_mode)
|
||||
(but it should be the general approach between different implementation).
|
||||
|
||||
Note, that using `pgbouncer` a meaningful resource CPU limit should be 1 core
|
||||
or less (there is a way to utilize more than one, but in K8s it's easier just to
|
||||
spin up more instances).
|
||||
|
||||
## Custom TLS certificates
|
||||
|
||||
|
|
@ -572,10 +575,15 @@ However, this certificate cannot be verified and thus doesn't protect from
|
|||
active MITM attacks. In this section we show how to specify a custom TLS
|
||||
certificate which is mounted in the database pods via a K8s Secret.
|
||||
|
||||
Before applying these changes, the operator must also be configured with the
|
||||
`spilo_fsgroup` set to the GID matching the postgres user group. If the value
|
||||
is not provided, the cluster will default to `103` which is the GID from the
|
||||
default spilo image.
|
||||
Before applying these changes, in k8s the operator must also be configured with
|
||||
the `spilo_fsgroup` set to the GID matching the postgres user group. If you
|
||||
don't know the value, use `103` which is the GID from the default spilo image
|
||||
(`spilo_fsgroup=103` in the cluster request spec).
|
||||
|
||||
OpenShift allocates the users and groups dynamically (based on scc), and their
|
||||
range is different in every namespace. Due to this dynamic behaviour, it's not
|
||||
trivial to know at deploy time the uid/gid of the user in the cluster.
|
||||
This way, in OpenShift, you may want to skip the spilo_fsgroup setting.
|
||||
|
||||
Upload the cert as a kubernetes secret:
|
||||
```sh
|
||||
|
|
|
|||
|
|
@ -69,6 +69,92 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_enable_disable_connection_pooler(self):
|
||||
'''
|
||||
For a database without connection pooler, then turns it on, scale up,
|
||||
turn off and on again. Test with different ways of doing this (via
|
||||
enableConnectionPooler or connectionPooler configuration section). At
|
||||
the end turn connection pooler off to not interfere with other tests.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
service_labels = {
|
||||
'cluster-name': 'acid-minimal-cluster',
|
||||
}
|
||||
pod_labels = dict({
|
||||
'connection-pooler': 'acid-minimal-cluster-pooler',
|
||||
})
|
||||
|
||||
pod_selector = to_selector(pod_labels)
|
||||
service_selector = to_selector(service_labels)
|
||||
|
||||
try:
|
||||
# enable connection pooler
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPooler': True,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pod_start(pod_selector)
|
||||
|
||||
pods = k8s.api.core_v1.list_namespaced_pod(
|
||||
'default', label_selector=pod_selector
|
||||
).items
|
||||
|
||||
self.assertTrue(pods, 'No connection pooler pods')
|
||||
|
||||
k8s.wait_for_service(service_selector)
|
||||
services = k8s.api.core_v1.list_namespaced_service(
|
||||
'default', label_selector=service_selector
|
||||
).items
|
||||
services = [
|
||||
s for s in services
|
||||
if s.metadata.name.endswith('pooler')
|
||||
]
|
||||
|
||||
self.assertTrue(services, 'No connection pooler service')
|
||||
|
||||
# scale up connection pooler deployment
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'connectionPooler': {
|
||||
'numberOfInstances': 2,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
k8s.wait_for_running_pods(pod_selector, 2)
|
||||
|
||||
# turn it off, keeping configuration section
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPooler': False,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pods_to_stop(pod_selector)
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPooler': True,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pod_start(pod_selector)
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_enable_load_balancer(self):
|
||||
'''
|
||||
|
|
@ -290,6 +376,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
# patch also node where master ran before
|
||||
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
|
||||
|
||||
# wait a little before proceeding with the pod distribution test
|
||||
time.sleep(k8s.RETRY_TIMEOUT_SEC)
|
||||
|
||||
# toggle pod anti affinity to move replica away from master node
|
||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
||||
|
||||
|
|
@ -349,92 +439,6 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.update_config(unpatch_custom_service_annotations)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_enable_disable_connection_pool(self):
|
||||
'''
|
||||
For a database without connection pool, then turns it on, scale up,
|
||||
turn off and on again. Test with different ways of doing this (via
|
||||
enableConnectionPool or connectionPool configuration section). At the
|
||||
end turn the connection pool off to not interfere with other tests.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
service_labels = {
|
||||
'cluster-name': 'acid-minimal-cluster',
|
||||
}
|
||||
pod_labels = dict({
|
||||
'connection-pool': 'acid-minimal-cluster-pooler',
|
||||
})
|
||||
|
||||
pod_selector = to_selector(pod_labels)
|
||||
service_selector = to_selector(service_labels)
|
||||
|
||||
try:
|
||||
# enable connection pool
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPool': True,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pod_start(pod_selector)
|
||||
|
||||
pods = k8s.api.core_v1.list_namespaced_pod(
|
||||
'default', label_selector=pod_selector
|
||||
).items
|
||||
|
||||
self.assertTrue(pods, 'No connection pool pods')
|
||||
|
||||
k8s.wait_for_service(service_selector)
|
||||
services = k8s.api.core_v1.list_namespaced_service(
|
||||
'default', label_selector=service_selector
|
||||
).items
|
||||
services = [
|
||||
s for s in services
|
||||
if s.metadata.name.endswith('pooler')
|
||||
]
|
||||
|
||||
self.assertTrue(services, 'No connection pool service')
|
||||
|
||||
# scale up connection pool deployment
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'connectionPool': {
|
||||
'numberOfInstances': 2,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
k8s.wait_for_running_pods(pod_selector, 2)
|
||||
|
||||
# turn it off, keeping configuration section
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPool': False,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pods_to_stop(pod_selector)
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableConnectionPool': True,
|
||||
}
|
||||
})
|
||||
k8s.wait_for_pod_start(pod_selector)
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_taint_based_eviction(self):
|
||||
'''
|
||||
|
|
@ -473,6 +477,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.update_config(patch_toleration_config)
|
||||
|
||||
# wait a little before proceeding with the pod distribution test
|
||||
time.sleep(k8s.RETRY_TIMEOUT_SEC)
|
||||
|
||||
# toggle pod anti affinity to move replica away from master node
|
||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
||||
|
||||
|
|
|
|||
31
go.mod
31
go.mod
|
|
@ -1,26 +1,19 @@
|
|||
module github.com/zalando/postgres-operator
|
||||
|
||||
go 1.12
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.25.44
|
||||
github.com/emicklei/go-restful v2.9.6+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
|
||||
github.com/googleapis/gnostic v0.3.0 // indirect
|
||||
github.com/imdario/mergo v0.3.8 // indirect
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/aws/aws-sdk-go v1.29.33
|
||||
github.com/lib/pq v1.3.0
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a
|
||||
github.com/sirupsen/logrus v1.5.0
|
||||
github.com/stretchr/testify v1.4.0
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 // indirect
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 // indirect
|
||||
golang.org/x/tools v0.0.0-20191209225234-22774f7dae43 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.4
|
||||
k8s.io/api v0.0.0-20191121015604-11707872ac1c
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20191204090421-cd61debedab5
|
||||
k8s.io/apimachinery v0.0.0-20191203211716-adc6f4cd9e7d
|
||||
k8s.io/client-go v0.0.0-20191204082520-bc9b51d240b2
|
||||
k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e
|
||||
golang.org/x/tools v0.0.0-20200326210457-5d86d385bf88 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
k8s.io/api v0.18.0
|
||||
k8s.io/apiextensions-apiserver v0.18.0
|
||||
k8s.io/apimachinery v0.18.0
|
||||
k8s.io/client-go v0.18.0
|
||||
k8s.io/code-generator v0.18.0
|
||||
)
|
||||
|
|
|
|||
168
go.sum
168
go.sum
|
|
@ -11,7 +11,6 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L
|
|||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
|
|
@ -27,12 +26,13 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo
|
|||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.25.44 h1:n9ahFoiyn66smjF34hYr3tb6/ZdBcLuFz7BCDhHyJ7I=
|
||||
github.com/aws/aws-sdk-go v1.25.44/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.29.33 h1:WP85+WHalTFQR2wYp5xR2sjiVAZXew2bBQXGU1QJBXI=
|
||||
github.com/aws/aws-sdk-go v1.29.33/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
|
|
@ -46,7 +46,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
|
|||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
@ -58,15 +57,15 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QL
|
|||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w=
|
||||
github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
|
|
@ -124,11 +123,12 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
|
|||
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
|
@ -136,6 +136,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
|
@ -144,18 +145,20 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
|||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0=
|
||||
github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
|
|
@ -169,15 +172,14 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
|
|
@ -195,8 +197,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
|
||||
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
|
@ -214,7 +216,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
|||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
|
|
@ -227,8 +228,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
|
|||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
|
|
@ -236,9 +237,8 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP
|
|||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
|
|
@ -246,26 +246,30 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
|||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a h1:2v4Ipjxa3sh+xn6GvtgrMub2ci4ZLQMvTaYIba2lfdc=
|
||||
github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a/go.mod h1:ozniNEFS3j1qCwHKdvraMn1WJOsUxHd7lYfukEIS4cs=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q=
|
||||
github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
|
|
@ -273,7 +277,6 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
|||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
|
|
@ -286,6 +289,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
|||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
|
|
@ -302,19 +306,17 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
|||
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -332,8 +334,9 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
|
@ -343,6 +346,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -352,20 +356,21 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -375,23 +380,20 @@ golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGm
|
|||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191209225234-22774f7dae43 h1:NfPq5mgc5ArFgVLCpeS4z07IoxSAqVfV/gQ5vxdgaxI=
|
||||
golang.org/x/tools v0.0.0-20191209225234-22774f7dae43/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200326210457-5d86d385bf88 h1:F7fM2kxXfuWw820fa+MMCCLH6hmYe+jtLnZpwoiLK4Q=
|
||||
golang.org/x/tools v0.0.0-20200326210457-5d86d385bf88/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
@ -400,14 +402,15 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
|
@ -423,45 +426,40 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.0.0-20191121015604-11707872ac1c h1:Z87my3sF4WhG0OMxzARkWY/IKBtOr+MhXZAb4ts6qFc=
|
||||
k8s.io/api v0.0.0-20191121015604-11707872ac1c/go.mod h1:R/s4gKT0V/cWEnbQa9taNRJNbWUK57/Dx6cPj6MD3A0=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20191204090421-cd61debedab5 h1:g+GvnbGqLU1Jxb/9iFm/BFcmkqG9HdsGh52+wHirpsM=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20191204090421-cd61debedab5/go.mod h1:CPw0IHz1YrWGy0+8mG/76oTHXvChlgCb3EAezKQKB2I=
|
||||
k8s.io/apimachinery v0.0.0-20191121015412-41065c7a8c2a/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apimachinery v0.0.0-20191123233150-4c4803ed55e3/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apimachinery v0.0.0-20191128180518-03184f823e28/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apimachinery v0.0.0-20191203211716-adc6f4cd9e7d h1:q+OZmYewHJeMCzwpHkXlNTtk5bvaUMPCikKvf77RBlo=
|
||||
k8s.io/apimachinery v0.0.0-20191203211716-adc6f4cd9e7d/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apiserver v0.0.0-20191204084332-137a9d3b886b/go.mod h1:itgfam5HJbT/4b2BGfpUkkxfheMmDH+Ix+tEAP3uqZk=
|
||||
k8s.io/client-go v0.0.0-20191204082517-8c19b9f4a642/go.mod h1:HMVIZ0dPop3WCrPEaJ+v5/94cjt56avdDFshpX0Fjvo=
|
||||
k8s.io/client-go v0.0.0-20191204082519-e9644b2e3edc/go.mod h1:5lSG1yeDZVwDYAHe9VK48SCe5zmcnkAcf2Mx59TuhmM=
|
||||
k8s.io/client-go v0.0.0-20191204082520-bc9b51d240b2 h1:T2HGghBOPAOEjWuIyFSeCsWEwsxa6unkBvy3PHfqonM=
|
||||
k8s.io/client-go v0.0.0-20191204082520-bc9b51d240b2/go.mod h1:5lSG1yeDZVwDYAHe9VK48SCe5zmcnkAcf2Mx59TuhmM=
|
||||
k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e h1:HB9Zu5ZUvJfNpLiTPhz+CebVKV8C39qTBMQkAgAZLNw=
|
||||
k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
|
||||
k8s.io/component-base v0.0.0-20191204083903-0d4d24e738e4/go.mod h1:8VIh1jErItC4bg9hLBkPneyS77Tin8KwSzbYepHJnQI=
|
||||
k8s.io/component-base v0.0.0-20191204083906-3ac1376c73aa/go.mod h1:mECWvHCPhJudDVDMtBl+AIf/YnTMp5r1F947OYFUwP0=
|
||||
k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ=
|
||||
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
|
||||
k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q=
|
||||
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
|
||||
k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE=
|
||||
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
||||
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
||||
k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4=
|
||||
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
|
||||
k8s.io/code-generator v0.18.0 h1:0xIRWzym+qMgVpGmLESDeMfz/orwgxwxFFAo1xfGNtQ=
|
||||
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
|
||||
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
|
||||
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ spec:
|
|||
- createdb
|
||||
enableMasterLoadBalancer: false
|
||||
enableReplicaLoadBalancer: false
|
||||
# enableConnectionPooler: true # not needed when connectionPooler section is present (see below)
|
||||
allowedSourceRanges: # load balancers' source ranges for both master and replica services
|
||||
- 127.0.0.1/32
|
||||
databases:
|
||||
|
|
@ -96,6 +97,19 @@ spec:
|
|||
# - 01:00-06:00 #UTC
|
||||
# - Sat:00:00-04:00
|
||||
|
||||
connectionPooler:
|
||||
numberOfInstances: 2
|
||||
mode: "transaction"
|
||||
schema: "pooler"
|
||||
user: "pooler"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 300m
|
||||
memory: 100Mi
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 100Mi
|
||||
|
||||
initContainers:
|
||||
- name: date
|
||||
image: busybox
|
||||
|
|
@ -120,3 +134,5 @@ spec:
|
|||
certificateFile: "tls.crt"
|
||||
privateKeyFile: "tls.key"
|
||||
caFile: "" # optionally configure Postgres with a CA certificate
|
||||
# When TLS is enabled, also set spiloFSGroup parameter above to the relevant value.
|
||||
# if unknown, set it to 103 which is the usual value in the default spilo images.
|
||||
|
|
|
|||
|
|
@ -11,16 +11,16 @@ data:
|
|||
cluster_history_entries: "1000"
|
||||
cluster_labels: application:spilo
|
||||
cluster_name_label: cluster-name
|
||||
# connection_pool_default_cpu_limit: "1"
|
||||
# connection_pool_default_cpu_request: "500m"
|
||||
# connection_pool_default_memory_limit: 100Mi
|
||||
# connection_pool_default_memory_request: 100Mi
|
||||
connection_pool_image: "registry.opensource.zalan.do/acid/pgbouncer:master-5"
|
||||
# connection_pool_max_db_connections: 60
|
||||
# connection_pool_mode: "transaction"
|
||||
# connection_pool_number_of_instances: 2
|
||||
# connection_pool_schema: "pooler"
|
||||
# connection_pool_user: "pooler"
|
||||
# connection_pooler_default_cpu_limit: "1"
|
||||
# connection_pooler_default_cpu_request: "500m"
|
||||
# connection_pooler_default_memory_limit: 100Mi
|
||||
# connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-5"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
# connection_pooler_mode: "transaction"
|
||||
# connection_pooler_number_of_instances: 2
|
||||
# connection_pooler_schema: "pooler"
|
||||
# connection_pooler_user: "pooler"
|
||||
# custom_service_annotations: "keyx:valuez,keya:valuea"
|
||||
# custom_pod_annotations: "keya:valuea,keyb:valueb"
|
||||
db_hosted_zone: db.example.com
|
||||
|
|
@ -43,6 +43,7 @@ data:
|
|||
# enable_team_superuser: "false"
|
||||
enable_teams_api: "false"
|
||||
# etcd_host: ""
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||
# inherited_labels: application,environment
|
||||
# kube_iam_role: ""
|
||||
|
|
|
|||
|
|
@ -42,6 +42,8 @@ spec:
|
|||
type: boolean
|
||||
etcd_host:
|
||||
type: string
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
max_instances:
|
||||
type: integer
|
||||
minimum: -1 # -1 = disabled
|
||||
|
|
@ -294,44 +296,44 @@ spec:
|
|||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
scalyr_server_url:
|
||||
type: string
|
||||
connection_pool:
|
||||
connection_pooler:
|
||||
type: object
|
||||
properties:
|
||||
connection_pool_schema:
|
||||
connection_pooler_schema:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
connection_pool_user:
|
||||
connection_pooler_user:
|
||||
type: string
|
||||
#default: "pooler"
|
||||
connection_pool_image:
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
#default: "registry.opensource.zalan.do/acid/pgbouncer"
|
||||
connection_pool_max_db_connections:
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
#default: 60
|
||||
connection_pool_mode:
|
||||
connection_pooler_mode:
|
||||
type: string
|
||||
enum:
|
||||
- "session"
|
||||
- "transaction"
|
||||
#default: "transaction"
|
||||
connection_pool_number_of_instances:
|
||||
connection_pooler_number_of_instances:
|
||||
type: integer
|
||||
minimum: 2
|
||||
#default: 2
|
||||
connection_pool_default_cpu_limit:
|
||||
connection_pooler_default_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "1"
|
||||
connection_pool_default_cpu_request:
|
||||
connection_pooler_default_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
#default: "500m"
|
||||
connection_pool_default_memory_limit:
|
||||
connection_pooler_default_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
connection_pool_default_memory_request:
|
||||
connection_pooler_default_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
#default: "100Mi"
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ metadata:
|
|||
configuration:
|
||||
# enable_crd_validation: true
|
||||
etcd_host: ""
|
||||
# kubernetes_use_configmaps: false
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
|
||||
# enable_shm_volume: true
|
||||
max_instances: -1
|
||||
|
|
@ -121,14 +122,14 @@ configuration:
|
|||
scalyr_memory_limit: 500Mi
|
||||
scalyr_memory_request: 50Mi
|
||||
# scalyr_server_url: ""
|
||||
connection_pool:
|
||||
connection_pool_default_cpu_limit: "1"
|
||||
connection_pool_default_cpu_request: "500m"
|
||||
connection_pool_default_memory_limit: 100Mi
|
||||
connection_pool_default_memory_request: 100Mi
|
||||
connection_pool_image: "registry.opensource.zalan.do/acid/pgbouncer:master-5"
|
||||
# connection_pool_max_db_connections: 60
|
||||
connection_pool_mode: "transaction"
|
||||
connection_pool_number_of_instances: 2
|
||||
# connection_pool_schema: "pooler"
|
||||
# connection_pool_user: "pooler"
|
||||
connection_pooler:
|
||||
connection_pooler_default_cpu_limit: "1"
|
||||
connection_pooler_default_cpu_request: "500m"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-5"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
connection_pooler_mode: "transaction"
|
||||
connection_pooler_number_of_instances: 2
|
||||
# connection_pooler_schema: "pooler"
|
||||
# connection_pooler_user: "pooler"
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ spec:
|
|||
uid:
|
||||
format: uuid
|
||||
type: string
|
||||
connectionPool:
|
||||
connectionPooler:
|
||||
type: object
|
||||
properties:
|
||||
dockerImage:
|
||||
|
|
@ -126,7 +126,7 @@ spec:
|
|||
# Note: usernames specified here as database owners must be declared in the users key of the spec key.
|
||||
dockerImage:
|
||||
type: string
|
||||
enableConnectionPool:
|
||||
enableConnectionPooler:
|
||||
type: boolean
|
||||
enableLogicalBackup:
|
||||
type: boolean
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"connectionPool": {
|
||||
"connectionPooler": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||
"dockerImage": {
|
||||
|
|
@ -259,7 +259,7 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
"dockerImage": {
|
||||
Type: "string",
|
||||
},
|
||||
"enableConnectionPool": {
|
||||
"enableConnectionPooler": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enableLogicalBackup": {
|
||||
|
|
@ -764,6 +764,9 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
"etcd_host": {
|
||||
Type: "string",
|
||||
},
|
||||
"kubernetes_use_configmaps": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"max_instances": {
|
||||
Type: "integer",
|
||||
Description: "-1 = disabled",
|
||||
|
|
@ -1166,32 +1169,32 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
},
|
||||
},
|
||||
},
|
||||
"connection_pool": {
|
||||
"connection_pooler": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1beta1.JSONSchemaProps{
|
||||
"connection_pool_default_cpu_limit": {
|
||||
"connection_pooler_default_cpu_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
},
|
||||
"connection_pool_default_cpu_request": {
|
||||
"connection_pooler_default_cpu_request": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
},
|
||||
"connection_pool_default_memory_limit": {
|
||||
"connection_pooler_default_memory_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"connection_pool_default_memory_request": {
|
||||
"connection_pooler_default_memory_request": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"connection_pool_image": {
|
||||
"connection_pooler_image": {
|
||||
Type: "string",
|
||||
},
|
||||
"connection_pool_max_db_connections": {
|
||||
"connection_pooler_max_db_connections": {
|
||||
Type: "integer",
|
||||
},
|
||||
"connection_pool_mode": {
|
||||
"connection_pooler_mode": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1beta1.JSON{
|
||||
{
|
||||
|
|
@ -1202,14 +1205,14 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
},
|
||||
},
|
||||
},
|
||||
"connection_pool_number_of_instances": {
|
||||
"connection_pooler_number_of_instances": {
|
||||
Type: "integer",
|
||||
Minimum: &min2,
|
||||
},
|
||||
"connection_pool_schema": {
|
||||
"connection_pooler_schema": {
|
||||
Type: "string",
|
||||
},
|
||||
"connection_pool_user": {
|
||||
"connection_pooler_user": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -153,18 +153,18 @@ type ScalyrConfiguration struct {
|
|||
ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"`
|
||||
}
|
||||
|
||||
// Defines default configuration for connection pool
|
||||
type ConnectionPoolConfiguration struct {
|
||||
NumberOfInstances *int32 `json:"connection_pool_number_of_instances,omitempty"`
|
||||
Schema string `json:"connection_pool_schema,omitempty"`
|
||||
User string `json:"connection_pool_user,omitempty"`
|
||||
Image string `json:"connection_pool_image,omitempty"`
|
||||
Mode string `json:"connection_pool_mode,omitempty"`
|
||||
MaxDBConnections *int32 `json:"connection_pool_max_db_connections,omitempty"`
|
||||
DefaultCPURequest string `json:"connection_pool_default_cpu_request,omitempty"`
|
||||
DefaultMemoryRequest string `json:"connection_pool_default_memory_request,omitempty"`
|
||||
DefaultCPULimit string `json:"connection_pool_default_cpu_limit,omitempty"`
|
||||
DefaultMemoryLimit string `json:"connection_pool_default_memory_limit,omitempty"`
|
||||
// Defines default configuration for connection pooler
|
||||
type ConnectionPoolerConfiguration struct {
|
||||
NumberOfInstances *int32 `json:"connection_pooler_number_of_instances,omitempty"`
|
||||
Schema string `json:"connection_pooler_schema,omitempty"`
|
||||
User string `json:"connection_pooler_user,omitempty"`
|
||||
Image string `json:"connection_pooler_image,omitempty"`
|
||||
Mode string `json:"connection_pooler_mode,omitempty"`
|
||||
MaxDBConnections *int32 `json:"connection_pooler_max_db_connections,omitempty"`
|
||||
DefaultCPURequest string `json:"connection_pooler_default_cpu_request,omitempty"`
|
||||
DefaultMemoryRequest string `json:"connection_pooler_default_memory_request,omitempty"`
|
||||
DefaultCPULimit string `json:"connection_pooler_default_cpu_limit,omitempty"`
|
||||
DefaultMemoryLimit string `json:"connection_pooler_default_memory_limit,omitempty"`
|
||||
}
|
||||
|
||||
// OperatorLogicalBackupConfiguration defines configuration for logical backup
|
||||
|
|
@ -183,6 +183,7 @@ type OperatorLogicalBackupConfiguration struct {
|
|||
type OperatorConfigurationData struct {
|
||||
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
|
||||
EtcdHost string `json:"etcd_host,omitempty"`
|
||||
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
|
||||
DockerImage string `json:"docker_image,omitempty"`
|
||||
Workers uint32 `json:"workers,omitempty"`
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
|
|
@ -203,7 +204,7 @@ type OperatorConfigurationData struct {
|
|||
LoggingRESTAPI LoggingRESTAPIConfiguration `json:"logging_rest_api"`
|
||||
Scalyr ScalyrConfiguration `json:"scalyr"`
|
||||
LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"`
|
||||
ConnectionPool ConnectionPoolConfiguration `json:"connection_pool"`
|
||||
ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"`
|
||||
}
|
||||
|
||||
//Duration shortens this frequently used name
|
||||
|
|
|
|||
|
|
@ -29,8 +29,8 @@ type PostgresSpec struct {
|
|||
Patroni `json:"patroni,omitempty"`
|
||||
Resources `json:"resources,omitempty"`
|
||||
|
||||
EnableConnectionPool *bool `json:"enableConnectionPool,omitempty"`
|
||||
ConnectionPool *ConnectionPool `json:"connectionPool,omitempty"`
|
||||
EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"`
|
||||
ConnectionPooler *ConnectionPooler `json:"connectionPooler,omitempty"`
|
||||
|
||||
TeamID string `json:"teamId"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
|
|
@ -189,10 +189,10 @@ type PostgresStatus struct {
|
|||
// resources)
|
||||
// Type string `json:"type,omitempty"`
|
||||
//
|
||||
// TODO: figure out what other important parameters of the connection pool it
|
||||
// TODO: figure out what other important parameters of the connection pooler it
|
||||
// makes sense to expose. E.g. pool size (min/max boundaries), max client
|
||||
// connections etc.
|
||||
type ConnectionPool struct {
|
||||
type ConnectionPooler struct {
|
||||
NumberOfInstances *int32 `json:"numberOfInstances,omitempty"`
|
||||
Schema string `json:"schema,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func (in *CloneDescription) DeepCopy() *CloneDescription {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConnectionPool) DeepCopyInto(out *ConnectionPool) {
|
||||
func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) {
|
||||
*out = *in
|
||||
if in.NumberOfInstances != nil {
|
||||
in, out := &in.NumberOfInstances, &out.NumberOfInstances
|
||||
|
|
@ -85,18 +85,18 @@ func (in *ConnectionPool) DeepCopyInto(out *ConnectionPool) {
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPool.
|
||||
func (in *ConnectionPool) DeepCopy() *ConnectionPool {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPooler.
|
||||
func (in *ConnectionPooler) DeepCopy() *ConnectionPooler {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConnectionPool)
|
||||
out := new(ConnectionPooler)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConnectionPoolConfiguration) DeepCopyInto(out *ConnectionPoolConfiguration) {
|
||||
func (in *ConnectionPoolerConfiguration) DeepCopyInto(out *ConnectionPoolerConfiguration) {
|
||||
*out = *in
|
||||
if in.NumberOfInstances != nil {
|
||||
in, out := &in.NumberOfInstances, &out.NumberOfInstances
|
||||
|
|
@ -111,12 +111,12 @@ func (in *ConnectionPoolConfiguration) DeepCopyInto(out *ConnectionPoolConfigura
|
|||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolConfiguration.
|
||||
func (in *ConnectionPoolConfiguration) DeepCopy() *ConnectionPoolConfiguration {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolerConfiguration.
|
||||
func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConnectionPoolConfiguration)
|
||||
out := new(ConnectionPoolerConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
@ -308,7 +308,7 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
|||
out.LoggingRESTAPI = in.LoggingRESTAPI
|
||||
out.Scalyr = in.Scalyr
|
||||
out.LogicalBackup = in.LogicalBackup
|
||||
in.ConnectionPool.DeepCopyInto(&out.ConnectionPool)
|
||||
in.ConnectionPooler.DeepCopyInto(&out.ConnectionPooler)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -471,14 +471,14 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
out.Volume = in.Volume
|
||||
in.Patroni.DeepCopyInto(&out.Patroni)
|
||||
out.Resources = in.Resources
|
||||
if in.EnableConnectionPool != nil {
|
||||
in, out := &in.EnableConnectionPool, &out.EnableConnectionPool
|
||||
if in.EnableConnectionPooler != nil {
|
||||
in, out := &in.EnableConnectionPooler, &out.EnableConnectionPooler
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.ConnectionPool != nil {
|
||||
in, out := &in.ConnectionPool, &out.ConnectionPool
|
||||
*out = new(ConnectionPool)
|
||||
if in.ConnectionPooler != nil {
|
||||
in, out := &in.ConnectionPooler, &out.ConnectionPooler
|
||||
*out = new(ConnectionPooler)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.SpiloFSGroup != nil {
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package cluster
|
|||
// Postgres CustomResourceDefinition object i.e. Spilo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
|
@ -50,14 +51,14 @@ type Config struct {
|
|||
PodServiceAccountRoleBinding *rbacv1.RoleBinding
|
||||
}
|
||||
|
||||
// K8S objects that are belongs to a connection pool
|
||||
type ConnectionPoolObjects struct {
|
||||
// K8S objects that are belongs to a connection pooler
|
||||
type ConnectionPoolerObjects struct {
|
||||
Deployment *appsv1.Deployment
|
||||
Service *v1.Service
|
||||
|
||||
// It could happen that a connection pool was enabled, but the operator was
|
||||
// not able to properly process a corresponding event or was restarted. In
|
||||
// this case we will miss missing/require situation and a lookup function
|
||||
// It could happen that a connection pooler was enabled, but the operator
|
||||
// was not able to properly process a corresponding event or was restarted.
|
||||
// In this case we will miss missing/require situation and a lookup function
|
||||
// will not be installed. To avoid synchronizing it all the time to prevent
|
||||
// this, we can remember the result in memory at least until the next
|
||||
// restart.
|
||||
|
|
@ -69,7 +70,7 @@ type kubeResources struct {
|
|||
Endpoints map[PostgresRole]*v1.Endpoints
|
||||
Secrets map[types.UID]*v1.Secret
|
||||
Statefulset *appsv1.StatefulSet
|
||||
ConnectionPool *ConnectionPoolObjects
|
||||
ConnectionPooler *ConnectionPoolerObjects
|
||||
PodDisruptionBudget *policybeta1.PodDisruptionBudget
|
||||
//Pods are treated separately
|
||||
//PVCs are treated separately
|
||||
|
|
@ -89,7 +90,7 @@ type Cluster struct {
|
|||
pgDb *sql.DB
|
||||
mu sync.Mutex
|
||||
userSyncStrategy spec.UserSyncer
|
||||
deleteOptions *metav1.DeleteOptions
|
||||
deleteOptions metav1.DeleteOptions
|
||||
podEventsQueue *cache.FIFO
|
||||
|
||||
teamsAPIClient teams.Interface
|
||||
|
|
@ -132,7 +133,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
|||
Services: make(map[PostgresRole]*v1.Service),
|
||||
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
||||
userSyncStrategy: users.DefaultUserSyncStrategy{},
|
||||
deleteOptions: &metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
||||
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
||||
podEventsQueue: podEventsQueue,
|
||||
KubeClient: kubeClient,
|
||||
}
|
||||
|
|
@ -183,7 +184,8 @@ func (c *Cluster) setStatus(status string) {
|
|||
// we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ),
|
||||
// however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11)
|
||||
// we should take advantage of it.
|
||||
newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch(c.Name, types.MergePatchType, patch, "status")
|
||||
newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch(
|
||||
context.TODO(), c.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not update status: %v", err)
|
||||
// return as newspec is empty, see PR654
|
||||
|
|
@ -343,24 +345,24 @@ func (c *Cluster) Create() error {
|
|||
c.logger.Errorf("could not list resources: %v", err)
|
||||
}
|
||||
|
||||
// Create connection pool deployment and services if necessary. Since we
|
||||
// need to peform some operations with the database itself (e.g. install
|
||||
// Create connection pooler deployment and services if necessary. Since we
|
||||
// need to perform some operations with the database itself (e.g. install
|
||||
// lookup function), do it as the last step, when everything is available.
|
||||
//
|
||||
// Do not consider connection pool as a strict requirement, and if
|
||||
// Do not consider connection pooler as a strict requirement, and if
|
||||
// something fails, report warning
|
||||
if c.needConnectionPool() {
|
||||
if c.ConnectionPool != nil {
|
||||
c.logger.Warning("Connection pool already exists in the cluster")
|
||||
if c.needConnectionPooler() {
|
||||
if c.ConnectionPooler != nil {
|
||||
c.logger.Warning("Connection pooler already exists in the cluster")
|
||||
return nil
|
||||
}
|
||||
connPool, err := c.createConnectionPool(c.installLookupFunction)
|
||||
connectionPooler, err := c.createConnectionPooler(c.installLookupFunction)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not create connection pool: %v", err)
|
||||
c.logger.Warningf("could not create connection pooler: %v", err)
|
||||
return nil
|
||||
}
|
||||
c.logger.Infof("connection pool %q has been successfully created",
|
||||
util.NameFromMeta(connPool.Deployment.ObjectMeta))
|
||||
c.logger.Infof("connection pooler %q has been successfully created",
|
||||
util.NameFromMeta(connectionPooler.Deployment.ObjectMeta))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -618,11 +620,11 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
// connection pool needs one system user created, which is done in
|
||||
// connection pooler needs one system user created, which is done in
|
||||
// initUsers. Check if it needs to be called.
|
||||
sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users)
|
||||
needConnPool := c.needConnectionPoolWorker(&newSpec.Spec)
|
||||
if !sameUsers || needConnPool {
|
||||
needConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec)
|
||||
if !sameUsers || needConnectionPooler {
|
||||
c.logger.Debugf("syncing secrets")
|
||||
if err := c.initUsers(); err != nil {
|
||||
c.logger.Errorf("could not init users: %v", err)
|
||||
|
|
@ -754,9 +756,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
// sync connection pool
|
||||
if err := c.syncConnectionPool(oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
return fmt.Errorf("could not sync connection pool: %v", err)
|
||||
// sync connection pooler
|
||||
if err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
return fmt.Errorf("could not sync connection pooler: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -810,11 +812,11 @@ func (c *Cluster) Delete() {
|
|||
c.logger.Warningf("could not remove leftover patroni objects; %v", err)
|
||||
}
|
||||
|
||||
// Delete connection pool objects anyway, even if it's not mentioned in the
|
||||
// Delete connection pooler objects anyway, even if it's not mentioned in the
|
||||
// manifest, just to not keep orphaned components in case if something went
|
||||
// wrong
|
||||
if err := c.deleteConnectionPool(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pool: %v", err)
|
||||
if err := c.deleteConnectionPooler(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -883,32 +885,32 @@ func (c *Cluster) initSystemUsers() {
|
|||
Password: util.RandomPassword(constants.PasswordLength),
|
||||
}
|
||||
|
||||
// Connection pool user is an exception, if requested it's going to be
|
||||
// Connection pooler user is an exception, if requested it's going to be
|
||||
// created by operator as a normal pgUser
|
||||
if c.needConnectionPool() {
|
||||
// initialize empty connection pool if not done yet
|
||||
if c.Spec.ConnectionPool == nil {
|
||||
c.Spec.ConnectionPool = &acidv1.ConnectionPool{}
|
||||
if c.needConnectionPooler() {
|
||||
// initialize empty connection pooler if not done yet
|
||||
if c.Spec.ConnectionPooler == nil {
|
||||
c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
username := util.Coalesce(
|
||||
c.Spec.ConnectionPool.User,
|
||||
c.OpConfig.ConnectionPool.User)
|
||||
c.Spec.ConnectionPooler.User,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
|
||||
// connection pooler application should be able to login with this role
|
||||
connPoolUser := spec.PgUser{
|
||||
Origin: spec.RoleConnectionPool,
|
||||
connectionPoolerUser := spec.PgUser{
|
||||
Origin: spec.RoleConnectionPooler,
|
||||
Name: username,
|
||||
Flags: []string{constants.RoleFlagLogin},
|
||||
Password: util.RandomPassword(constants.PasswordLength),
|
||||
}
|
||||
|
||||
if _, exists := c.pgUsers[username]; !exists {
|
||||
c.pgUsers[username] = connPoolUser
|
||||
c.pgUsers[username] = connectionPoolerUser
|
||||
}
|
||||
|
||||
if _, exists := c.systemUsers[constants.ConnectionPoolUserKeyName]; !exists {
|
||||
c.systemUsers[constants.ConnectionPoolUserKeyName] = connPoolUser
|
||||
if _, exists := c.systemUsers[constants.ConnectionPoolerUserKeyName]; !exists {
|
||||
c.systemUsers[constants.ConnectionPoolerUserKeyName] = connectionPoolerUser
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1297,12 +1299,12 @@ func (c *Cluster) deleteClusterObject(
|
|||
|
||||
func (c *Cluster) deletePatroniClusterServices() error {
|
||||
get := func(name string) (spec.NamespacedName, error) {
|
||||
svc, err := c.KubeClient.Services(c.Namespace).Get(name, metav1.GetOptions{})
|
||||
svc, err := c.KubeClient.Services(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
return util.NameFromMeta(svc.ObjectMeta), err
|
||||
}
|
||||
|
||||
deleteServiceFn := func(name string) error {
|
||||
return c.KubeClient.Services(c.Namespace).Delete(name, c.deleteOptions)
|
||||
return c.KubeClient.Services(c.Namespace).Delete(context.TODO(), name, c.deleteOptions)
|
||||
}
|
||||
|
||||
return c.deleteClusterObject(get, deleteServiceFn, "service")
|
||||
|
|
@ -1310,12 +1312,12 @@ func (c *Cluster) deletePatroniClusterServices() error {
|
|||
|
||||
func (c *Cluster) deletePatroniClusterEndpoints() error {
|
||||
get := func(name string) (spec.NamespacedName, error) {
|
||||
ep, err := c.KubeClient.Endpoints(c.Namespace).Get(name, metav1.GetOptions{})
|
||||
ep, err := c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
return util.NameFromMeta(ep.ObjectMeta), err
|
||||
}
|
||||
|
||||
deleteEndpointFn := func(name string) error {
|
||||
return c.KubeClient.Endpoints(c.Namespace).Delete(name, c.deleteOptions)
|
||||
return c.KubeClient.Endpoints(c.Namespace).Delete(context.TODO(), name, c.deleteOptions)
|
||||
}
|
||||
|
||||
return c.deleteClusterObject(get, deleteEndpointFn, "endpoint")
|
||||
|
|
@ -1323,21 +1325,21 @@ func (c *Cluster) deletePatroniClusterEndpoints() error {
|
|||
|
||||
func (c *Cluster) deletePatroniClusterConfigMaps() error {
|
||||
get := func(name string) (spec.NamespacedName, error) {
|
||||
cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(name, metav1.GetOptions{})
|
||||
cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
return util.NameFromMeta(cm.ObjectMeta), err
|
||||
}
|
||||
|
||||
deleteConfigMapFn := func(name string) error {
|
||||
return c.KubeClient.ConfigMaps(c.Namespace).Delete(name, c.deleteOptions)
|
||||
return c.KubeClient.ConfigMaps(c.Namespace).Delete(context.TODO(), name, c.deleteOptions)
|
||||
}
|
||||
|
||||
return c.deleteClusterObject(get, deleteConfigMapFn, "configmap")
|
||||
}
|
||||
|
||||
// Test if two connection pool configuration needs to be synced. For simplicity
|
||||
// Test if two connection pooler configuration needs to be synced. For simplicity
|
||||
// compare not the actual K8S objects, but the configuration itself and request
|
||||
// sync if there is any difference.
|
||||
func (c *Cluster) needSyncConnPoolSpecs(oldSpec, newSpec *acidv1.ConnectionPool) (sync bool, reasons []string) {
|
||||
func (c *Cluster) needSyncConnectionPoolerSpecs(oldSpec, newSpec *acidv1.ConnectionPooler) (sync bool, reasons []string) {
|
||||
reasons = []string{}
|
||||
sync = false
|
||||
|
||||
|
|
@ -1374,21 +1376,21 @@ func syncResources(a, b *v1.ResourceRequirements) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Check if we need to synchronize connection pool deployment due to new
|
||||
// Check if we need to synchronize connection pooler deployment due to new
|
||||
// defaults, that are different from what we see in the DeploymentSpec
|
||||
func (c *Cluster) needSyncConnPoolDefaults(
|
||||
spec *acidv1.ConnectionPool,
|
||||
func (c *Cluster) needSyncConnectionPoolerDefaults(
|
||||
spec *acidv1.ConnectionPooler,
|
||||
deployment *appsv1.Deployment) (sync bool, reasons []string) {
|
||||
|
||||
reasons = []string{}
|
||||
sync = false
|
||||
|
||||
config := c.OpConfig.ConnectionPool
|
||||
config := c.OpConfig.ConnectionPooler
|
||||
podTemplate := deployment.Spec.Template
|
||||
poolContainer := podTemplate.Spec.Containers[constants.ConnPoolContainer]
|
||||
poolerContainer := podTemplate.Spec.Containers[constants.ConnectionPoolerContainer]
|
||||
|
||||
if spec == nil {
|
||||
spec = &acidv1.ConnectionPool{}
|
||||
spec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
if spec.NumberOfInstances == nil &&
|
||||
|
|
@ -1401,25 +1403,25 @@ func (c *Cluster) needSyncConnPoolDefaults(
|
|||
}
|
||||
|
||||
if spec.DockerImage == "" &&
|
||||
poolContainer.Image != config.Image {
|
||||
poolerContainer.Image != config.Image {
|
||||
|
||||
sync = true
|
||||
msg := fmt.Sprintf("DockerImage is different (having %s, required %s)",
|
||||
poolContainer.Image, config.Image)
|
||||
poolerContainer.Image, config.Image)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
||||
expectedResources, err := generateResourceRequirements(spec.Resources,
|
||||
c.makeDefaultConnPoolResources())
|
||||
c.makeDefaultConnectionPoolerResources())
|
||||
|
||||
// An error to generate expected resources means something is not quite
|
||||
// right, but for the purpose of robustness do not panic here, just report
|
||||
// and ignore resources comparison (in the worst case there will be no
|
||||
// updates for new resource values).
|
||||
if err == nil && syncResources(&poolContainer.Resources, expectedResources) {
|
||||
if err == nil && syncResources(&poolerContainer.Resources, expectedResources) {
|
||||
sync = true
|
||||
msg := fmt.Sprintf("Resources are different (having %+v, required %+v)",
|
||||
poolContainer.Resources, expectedResources)
|
||||
poolerContainer.Resources, expectedResources)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
||||
|
|
@ -1427,13 +1429,13 @@ func (c *Cluster) needSyncConnPoolDefaults(
|
|||
c.logger.Warningf("Cannot generate expected resources, %v", err)
|
||||
}
|
||||
|
||||
for _, env := range poolContainer.Env {
|
||||
for _, env := range poolerContainer.Env {
|
||||
if spec.User == "" && env.Name == "PGUSER" {
|
||||
ref := env.ValueFrom.SecretKeyRef.LocalObjectReference
|
||||
|
||||
if ref.Name != c.credentialSecretName(config.User) {
|
||||
sync = true
|
||||
msg := fmt.Sprintf("Pool user is different (having %s, required %s)",
|
||||
msg := fmt.Sprintf("pooler user is different (having %s, required %s)",
|
||||
ref.Name, config.User)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
|
@ -1441,7 +1443,7 @@ func (c *Cluster) needSyncConnPoolDefaults(
|
|||
|
||||
if spec.Schema == "" && env.Name == "PGSCHEMA" && env.Value != config.Schema {
|
||||
sync = true
|
||||
msg := fmt.Sprintf("Pool schema is different (having %s, required %s)",
|
||||
msg := fmt.Sprintf("pooler schema is different (having %s, required %s)",
|
||||
env.Value, config.Schema)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -709,16 +709,16 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
func TestInitSystemUsers(t *testing.T) {
|
||||
testName := "Test system users initialization"
|
||||
|
||||
// default cluster without connection pool
|
||||
// default cluster without connection pooler
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolUserKeyName]; exist {
|
||||
t.Errorf("%s, connection pool user is present", testName)
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; exist {
|
||||
t.Errorf("%s, connection pooler user is present", testName)
|
||||
}
|
||||
|
||||
// cluster with connection pool
|
||||
cl.Spec.EnableConnectionPool = boolToPointer(true)
|
||||
// cluster with connection pooler
|
||||
cl.Spec.EnableConnectionPooler = boolToPointer(true)
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolUserKeyName]; !exist {
|
||||
t.Errorf("%s, connection pool user is not present", testName)
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist {
|
||||
t.Errorf("%s, connection pooler user is not present", testName)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,10 +56,10 @@ const (
|
|||
ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT EXECUTE ON FUNCTIONS TO "%s","%s";
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA "%s" GRANT USAGE ON TYPES TO "%s","%s";`
|
||||
|
||||
connectionPoolLookup = `
|
||||
CREATE SCHEMA IF NOT EXISTS {{.pool_schema}};
|
||||
connectionPoolerLookup = `
|
||||
CREATE SCHEMA IF NOT EXISTS {{.pooler_schema}};
|
||||
|
||||
CREATE OR REPLACE FUNCTION {{.pool_schema}}.user_lookup(
|
||||
CREATE OR REPLACE FUNCTION {{.pooler_schema}}.user_lookup(
|
||||
in i_username text, out uname text, out phash text)
|
||||
RETURNS record AS $$
|
||||
BEGIN
|
||||
|
|
@ -69,11 +69,12 @@ const (
|
|||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
REVOKE ALL ON FUNCTION {{.pool_schema}}.user_lookup(text)
|
||||
FROM public, {{.pool_user}};
|
||||
GRANT EXECUTE ON FUNCTION {{.pool_schema}}.user_lookup(text)
|
||||
TO {{.pool_user}};
|
||||
GRANT USAGE ON SCHEMA {{.pool_schema}} TO {{.pool_user}}`
|
||||
REVOKE ALL ON FUNCTION {{.pooler_schema}}.user_lookup(text)
|
||||
FROM public, {{.pooler_user}};
|
||||
GRANT EXECUTE ON FUNCTION {{.pooler_schema}}.user_lookup(text)
|
||||
TO {{.pooler_user}};
|
||||
GRANT USAGE ON SCHEMA {{.pooler_schema}} TO {{.pooler_user}};
|
||||
`
|
||||
)
|
||||
|
||||
func (c *Cluster) pgConnectionString(dbname string) string {
|
||||
|
|
@ -462,7 +463,7 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi
|
|||
|
||||
// Creates a connection pool credentials lookup function in every database to
|
||||
// perform remote authentification.
|
||||
func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error {
|
||||
func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
|
||||
var stmtBytes bytes.Buffer
|
||||
c.logger.Info("Installing lookup function")
|
||||
|
||||
|
|
@ -481,11 +482,11 @@ func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error {
|
|||
|
||||
currentDatabases, err := c.getDatabases()
|
||||
if err != nil {
|
||||
msg := "could not get databases to install pool lookup function: %v"
|
||||
msg := "could not get databases to install pooler lookup function: %v"
|
||||
return fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
templater := template.Must(template.New("sql").Parse(connectionPoolLookup))
|
||||
templater := template.Must(template.New("sql").Parse(connectionPoolerLookup))
|
||||
|
||||
for dbname, _ := range currentDatabases {
|
||||
if dbname == "template0" || dbname == "template1" {
|
||||
|
|
@ -496,11 +497,11 @@ func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error {
|
|||
return fmt.Errorf("could not init database connection to %s", dbname)
|
||||
}
|
||||
|
||||
c.logger.Infof("Install pool lookup function into %s", dbname)
|
||||
c.logger.Infof("Install pooler lookup function into %s", dbname)
|
||||
|
||||
params := TemplateParams{
|
||||
"pool_schema": poolSchema,
|
||||
"pool_user": poolUser,
|
||||
"pooler_schema": poolerSchema,
|
||||
"pooler_user": poolerUser,
|
||||
}
|
||||
|
||||
if err := templater.Execute(&stmtBytes, params); err != nil {
|
||||
|
|
@ -535,12 +536,12 @@ func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error {
|
|||
continue
|
||||
}
|
||||
|
||||
c.logger.Infof("Pool lookup function installed into %s", dbname)
|
||||
c.logger.Infof("pooler lookup function installed into %s", dbname)
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close database connection: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.ConnectionPool.LookupFunction = true
|
||||
c.ConnectionPooler.LookupFunction = true
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,10 +2,11 @@ package cluster
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
|
|
@ -23,7 +24,7 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) (
|
|||
execErr bytes.Buffer
|
||||
)
|
||||
|
||||
pod, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{})
|
||||
pod, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get pod info: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
|
|
@ -34,11 +35,8 @@ const (
|
|||
patroniPGParametersParameterName = "parameters"
|
||||
patroniPGHBAConfParameterName = "pg_hba"
|
||||
localHost = "127.0.0.1/32"
|
||||
connectionPoolContainer = "connection-pool"
|
||||
connectionPoolerContainer = "connection-pooler"
|
||||
pgPort = 5432
|
||||
|
||||
// the gid of the postgres user in the default spilo image
|
||||
spiloPostgresGID = 103
|
||||
)
|
||||
|
||||
type pgUser struct {
|
||||
|
|
@ -74,7 +72,7 @@ func (c *Cluster) statefulSetName() string {
|
|||
return c.Name
|
||||
}
|
||||
|
||||
func (c *Cluster) connPoolName() string {
|
||||
func (c *Cluster) connectionPoolerName() string {
|
||||
return c.Name + "-pooler"
|
||||
}
|
||||
|
||||
|
|
@ -141,18 +139,18 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
|
|||
}
|
||||
}
|
||||
|
||||
// Generate default resource section for connection pool deployment, to be used
|
||||
// if nothing custom is specified in the manifest
|
||||
func (c *Cluster) makeDefaultConnPoolResources() acidv1.Resources {
|
||||
// Generate default resource section for connection pooler deployment, to be
|
||||
// used if nothing custom is specified in the manifest
|
||||
func (c *Cluster) makeDefaultConnectionPoolerResources() acidv1.Resources {
|
||||
config := c.OpConfig
|
||||
|
||||
defaultRequests := acidv1.ResourceDescription{
|
||||
CPU: config.ConnectionPool.ConnPoolDefaultCPURequest,
|
||||
Memory: config.ConnectionPool.ConnPoolDefaultMemoryRequest,
|
||||
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest,
|
||||
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest,
|
||||
}
|
||||
defaultLimits := acidv1.ResourceDescription{
|
||||
CPU: config.ConnectionPool.ConnPoolDefaultCPULimit,
|
||||
Memory: config.ConnectionPool.ConnPoolDefaultMemoryLimit,
|
||||
CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit,
|
||||
Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit,
|
||||
}
|
||||
|
||||
return acidv1.Resources{
|
||||
|
|
@ -674,6 +672,10 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
|
|||
envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost})
|
||||
}
|
||||
|
||||
if c.patroniKubernetesUseConfigMaps() {
|
||||
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"})
|
||||
}
|
||||
|
||||
if cloneDescription.ClusterName != "" {
|
||||
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
|
||||
}
|
||||
|
|
@ -914,11 +916,17 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
if c.OpConfig.PodEnvironmentConfigMap != (pkgspec.NamespacedName{}) {
|
||||
var cm *v1.ConfigMap
|
||||
cm, err = c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap.Name, metav1.GetOptions{})
|
||||
cm, err = c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get(
|
||||
context.TODO(),
|
||||
c.OpConfig.PodEnvironmentConfigMap.Name,
|
||||
metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// if not found, try again using the cluster's namespace if it's different (old behavior)
|
||||
if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace {
|
||||
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap.Name, metav1.GetOptions{})
|
||||
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(
|
||||
context.TODO(),
|
||||
c.OpConfig.PodEnvironmentConfigMap.Name,
|
||||
metav1.GetOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
|
||||
|
|
@ -983,13 +991,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
// configure TLS with a custom secret volume
|
||||
if spec.TLS != nil && spec.TLS.SecretName != "" {
|
||||
if effectiveFSGroup == nil {
|
||||
c.logger.Warnf("Setting the default FSGroup to satisfy the TLS configuration")
|
||||
fsGroup := int64(spiloPostgresGID)
|
||||
effectiveFSGroup = &fsGroup
|
||||
}
|
||||
// this is combined with the FSGroup above to give read access to the
|
||||
// postgres user
|
||||
// this is combined with the FSGroup in the section above
|
||||
// to give read access to the postgres user
|
||||
defaultMode := int32(0640)
|
||||
volumes = append(volumes, v1.Volume{
|
||||
Name: "tls-secret",
|
||||
|
|
@ -1414,7 +1417,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
Type: v1.ServiceTypeClusterIP,
|
||||
}
|
||||
|
||||
if role == Replica {
|
||||
if role == Replica || c.patroniKubernetesUseConfigMaps() {
|
||||
serviceSpec.Selector = c.roleLabelsSet(false, role)
|
||||
}
|
||||
|
||||
|
|
@ -1859,33 +1862,33 @@ func (c *Cluster) getLogicalBackupJobName() (jobName string) {
|
|||
//
|
||||
// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when
|
||||
// most of the queries coming through a connection pooler are from the same
|
||||
// user to the same db). In case if we want to spin up more connection pool
|
||||
// user to the same db). In case if we want to spin up more connection pooler
|
||||
// instances, take this into account and maintain the same number of
|
||||
// connections.
|
||||
//
|
||||
// MIN_SIZE is a pool minimal size, to prevent situation when sudden workload
|
||||
// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload
|
||||
// have to wait for spinning up a new connections.
|
||||
//
|
||||
// RESERVE_SIZE is how many additional connections to allow for a pool.
|
||||
func (c *Cluster) getConnPoolEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar {
|
||||
// RESERVE_SIZE is how many additional connections to allow for a pooler.
|
||||
func (c *Cluster) getConnectionPoolerEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar {
|
||||
effectiveMode := util.Coalesce(
|
||||
spec.ConnectionPool.Mode,
|
||||
c.OpConfig.ConnectionPool.Mode)
|
||||
spec.ConnectionPooler.Mode,
|
||||
c.OpConfig.ConnectionPooler.Mode)
|
||||
|
||||
numberOfInstances := spec.ConnectionPool.NumberOfInstances
|
||||
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
|
||||
if numberOfInstances == nil {
|
||||
numberOfInstances = util.CoalesceInt32(
|
||||
c.OpConfig.ConnectionPool.NumberOfInstances,
|
||||
c.OpConfig.ConnectionPooler.NumberOfInstances,
|
||||
k8sutil.Int32ToPointer(1))
|
||||
}
|
||||
|
||||
effectiveMaxDBConn := util.CoalesceInt32(
|
||||
spec.ConnectionPool.MaxDBConnections,
|
||||
c.OpConfig.ConnectionPool.MaxDBConnections)
|
||||
spec.ConnectionPooler.MaxDBConnections,
|
||||
c.OpConfig.ConnectionPooler.MaxDBConnections)
|
||||
|
||||
if effectiveMaxDBConn == nil {
|
||||
effectiveMaxDBConn = k8sutil.Int32ToPointer(
|
||||
constants.ConnPoolMaxDBConnections)
|
||||
constants.ConnectionPoolerMaxDBConnections)
|
||||
}
|
||||
|
||||
maxDBConn := *effectiveMaxDBConn / *numberOfInstances
|
||||
|
|
@ -1896,51 +1899,51 @@ func (c *Cluster) getConnPoolEnvVars(spec *acidv1.PostgresSpec) []v1.EnvVar {
|
|||
|
||||
return []v1.EnvVar{
|
||||
{
|
||||
Name: "CONNECTION_POOL_PORT",
|
||||
Name: "CONNECTION_POOLER_PORT",
|
||||
Value: fmt.Sprint(pgPort),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_MODE",
|
||||
Name: "CONNECTION_POOLER_MODE",
|
||||
Value: effectiveMode,
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_DEFAULT_SIZE",
|
||||
Name: "CONNECTION_POOLER_DEFAULT_SIZE",
|
||||
Value: fmt.Sprint(defaultSize),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_MIN_SIZE",
|
||||
Name: "CONNECTION_POOLER_MIN_SIZE",
|
||||
Value: fmt.Sprint(minSize),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_RESERVE_SIZE",
|
||||
Name: "CONNECTION_POOLER_RESERVE_SIZE",
|
||||
Value: fmt.Sprint(reserveSize),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_MAX_CLIENT_CONN",
|
||||
Value: fmt.Sprint(constants.ConnPoolMaxClientConnections),
|
||||
Name: "CONNECTION_POOLER_MAX_CLIENT_CONN",
|
||||
Value: fmt.Sprint(constants.ConnectionPoolerMaxClientConnections),
|
||||
},
|
||||
{
|
||||
Name: "CONNECTION_POOL_MAX_DB_CONN",
|
||||
Name: "CONNECTION_POOLER_MAX_DB_CONN",
|
||||
Value: fmt.Sprint(maxDBConn),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
||||
func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec) (
|
||||
*v1.PodTemplateSpec, error) {
|
||||
|
||||
gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
|
||||
resources, err := generateResourceRequirements(
|
||||
spec.ConnectionPool.Resources,
|
||||
c.makeDefaultConnPoolResources())
|
||||
spec.ConnectionPooler.Resources,
|
||||
c.makeDefaultConnectionPoolerResources())
|
||||
|
||||
effectiveDockerImage := util.Coalesce(
|
||||
spec.ConnectionPool.DockerImage,
|
||||
c.OpConfig.ConnectionPool.Image)
|
||||
spec.ConnectionPooler.DockerImage,
|
||||
c.OpConfig.ConnectionPooler.Image)
|
||||
|
||||
effectiveSchema := util.Coalesce(
|
||||
spec.ConnectionPool.Schema,
|
||||
c.OpConfig.ConnectionPool.Schema)
|
||||
spec.ConnectionPooler.Schema,
|
||||
c.OpConfig.ConnectionPooler.Schema)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||
|
|
@ -1948,8 +1951,8 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
|||
|
||||
secretSelector := func(key string) *v1.SecretKeySelector {
|
||||
effectiveUser := util.Coalesce(
|
||||
spec.ConnectionPool.User,
|
||||
c.OpConfig.ConnectionPool.User)
|
||||
spec.ConnectionPooler.User,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
|
||||
return &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
|
|
@ -1975,7 +1978,7 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
|||
},
|
||||
},
|
||||
// the convention is to use the same schema name as
|
||||
// connection pool username
|
||||
// connection pooler username
|
||||
{
|
||||
Name: "PGSCHEMA",
|
||||
Value: effectiveSchema,
|
||||
|
|
@ -1988,10 +1991,10 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
|||
},
|
||||
}
|
||||
|
||||
envVars = append(envVars, c.getConnPoolEnvVars(spec)...)
|
||||
envVars = append(envVars, c.getConnectionPoolerEnvVars(spec)...)
|
||||
|
||||
poolerContainer := v1.Container{
|
||||
Name: connectionPoolContainer,
|
||||
Name: connectionPoolerContainer,
|
||||
Image: effectiveDockerImage,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Resources: *resources,
|
||||
|
|
@ -2006,7 +2009,7 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) (
|
|||
|
||||
podTemplate := &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: c.connPoolLabelsSelector().MatchLabels,
|
||||
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
|
||||
Namespace: c.Namespace,
|
||||
Annotations: c.generatePodAnnotations(spec),
|
||||
},
|
||||
|
|
@ -2048,32 +2051,32 @@ func (c *Cluster) ownerReferences() []metav1.OwnerReference {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) (
|
||||
func (c *Cluster) generateConnectionPoolerDeployment(spec *acidv1.PostgresSpec) (
|
||||
*appsv1.Deployment, error) {
|
||||
|
||||
// there are two ways to enable connection pooler, either to specify a
|
||||
// connectionPool section or enableConnectionPool. In the second case
|
||||
// spec.connectionPool will be nil, so to make it easier to calculate
|
||||
// connectionPooler section or enableConnectionPooler. In the second case
|
||||
// spec.connectionPooler will be nil, so to make it easier to calculate
|
||||
// default values, initialize it to an empty structure. It could be done
|
||||
// anywhere, but here is the earliest common entry point between sync and
|
||||
// create code, so init here.
|
||||
if spec.ConnectionPool == nil {
|
||||
spec.ConnectionPool = &acidv1.ConnectionPool{}
|
||||
if spec.ConnectionPooler == nil {
|
||||
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
podTemplate, err := c.generateConnPoolPodTemplate(spec)
|
||||
numberOfInstances := spec.ConnectionPool.NumberOfInstances
|
||||
podTemplate, err := c.generateConnectionPoolerPodTemplate(spec)
|
||||
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
|
||||
if numberOfInstances == nil {
|
||||
numberOfInstances = util.CoalesceInt32(
|
||||
c.OpConfig.ConnectionPool.NumberOfInstances,
|
||||
c.OpConfig.ConnectionPooler.NumberOfInstances,
|
||||
k8sutil.Int32ToPointer(1))
|
||||
}
|
||||
|
||||
if *numberOfInstances < constants.ConnPoolMinInstances {
|
||||
msg := "Adjusted number of connection pool instances from %d to %d"
|
||||
c.logger.Warningf(msg, numberOfInstances, constants.ConnPoolMinInstances)
|
||||
if *numberOfInstances < constants.ConnectionPoolerMinInstances {
|
||||
msg := "Adjusted number of connection pooler instances from %d to %d"
|
||||
c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances)
|
||||
|
||||
*numberOfInstances = constants.ConnPoolMinInstances
|
||||
*numberOfInstances = constants.ConnectionPoolerMinInstances
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -2082,9 +2085,9 @@ func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) (
|
|||
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.connPoolName(),
|
||||
Name: c.connectionPoolerName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.connPoolLabelsSelector().MatchLabels,
|
||||
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
|
||||
Annotations: map[string]string{},
|
||||
// make StatefulSet object its owner to represent the dependency.
|
||||
// By itself StatefulSet is being deleted with "Orphaned"
|
||||
|
|
@ -2096,7 +2099,7 @@ func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) (
|
|||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: numberOfInstances,
|
||||
Selector: c.connPoolLabelsSelector(),
|
||||
Selector: c.connectionPoolerLabelsSelector(),
|
||||
Template: *podTemplate,
|
||||
},
|
||||
}
|
||||
|
|
@ -2104,37 +2107,37 @@ func (c *Cluster) generateConnPoolDeployment(spec *acidv1.PostgresSpec) (
|
|||
return deployment, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) generateConnPoolService(spec *acidv1.PostgresSpec) *v1.Service {
|
||||
func (c *Cluster) generateConnectionPoolerService(spec *acidv1.PostgresSpec) *v1.Service {
|
||||
|
||||
// there are two ways to enable connection pooler, either to specify a
|
||||
// connectionPool section or enableConnectionPool. In the second case
|
||||
// spec.connectionPool will be nil, so to make it easier to calculate
|
||||
// connectionPooler section or enableConnectionPooler. In the second case
|
||||
// spec.connectionPooler will be nil, so to make it easier to calculate
|
||||
// default values, initialize it to an empty structure. It could be done
|
||||
// anywhere, but here is the earliest common entry point between sync and
|
||||
// create code, so init here.
|
||||
if spec.ConnectionPool == nil {
|
||||
spec.ConnectionPool = &acidv1.ConnectionPool{}
|
||||
if spec.ConnectionPooler == nil {
|
||||
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
serviceSpec := v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: c.connPoolName(),
|
||||
Name: c.connectionPoolerName(),
|
||||
Port: pgPort,
|
||||
TargetPort: intstr.IntOrString{StrVal: c.servicePort(Master)},
|
||||
},
|
||||
},
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{
|
||||
"connection-pool": c.connPoolName(),
|
||||
"connection-pooler": c.connectionPoolerName(),
|
||||
},
|
||||
}
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.connPoolName(),
|
||||
Name: c.connectionPoolerName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.connPoolLabelsSelector().MatchLabels,
|
||||
Labels: c.connectionPoolerLabelsSelector().MatchLabels,
|
||||
Annotations: map[string]string{},
|
||||
// make StatefulSet object its owner to represent the dependency.
|
||||
// By itself StatefulSet is being deleted with "Orphaned"
|
||||
|
|
|
|||
|
|
@ -587,38 +587,38 @@ func TestSecretVolume(t *testing.T) {
|
|||
|
||||
func testResources(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
||||
cpuReq := podSpec.Spec.Containers[0].Resources.Requests["cpu"]
|
||||
if cpuReq.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPURequest {
|
||||
if cpuReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest {
|
||||
return fmt.Errorf("CPU request doesn't match, got %s, expected %s",
|
||||
cpuReq.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPURequest)
|
||||
cpuReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPURequest)
|
||||
}
|
||||
|
||||
memReq := podSpec.Spec.Containers[0].Resources.Requests["memory"]
|
||||
if memReq.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryRequest {
|
||||
if memReq.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest {
|
||||
return fmt.Errorf("Memory request doesn't match, got %s, expected %s",
|
||||
memReq.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryRequest)
|
||||
memReq.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest)
|
||||
}
|
||||
|
||||
cpuLim := podSpec.Spec.Containers[0].Resources.Limits["cpu"]
|
||||
if cpuLim.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPULimit {
|
||||
if cpuLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit {
|
||||
return fmt.Errorf("CPU limit doesn't match, got %s, expected %s",
|
||||
cpuLim.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultCPULimit)
|
||||
cpuLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultCPULimit)
|
||||
}
|
||||
|
||||
memLim := podSpec.Spec.Containers[0].Resources.Limits["memory"]
|
||||
if memLim.String() != cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryLimit {
|
||||
if memLim.String() != cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit {
|
||||
return fmt.Errorf("Memory limit doesn't match, got %s, expected %s",
|
||||
memLim.String(), cluster.OpConfig.ConnectionPool.ConnPoolDefaultMemoryLimit)
|
||||
memLim.String(), cluster.OpConfig.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
||||
poolLabels := podSpec.ObjectMeta.Labels["connection-pool"]
|
||||
poolerLabels := podSpec.ObjectMeta.Labels["connection-pooler"]
|
||||
|
||||
if poolLabels != cluster.connPoolLabelsSelector().MatchLabels["connection-pool"] {
|
||||
if poolerLabels != cluster.connectionPoolerLabelsSelector().MatchLabels["connection-pooler"] {
|
||||
return fmt.Errorf("Pod labels do not match, got %+v, expected %+v",
|
||||
podSpec.ObjectMeta.Labels, cluster.connPoolLabelsSelector().MatchLabels)
|
||||
podSpec.ObjectMeta.Labels, cluster.connectionPoolerLabelsSelector().MatchLabels)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -626,13 +626,13 @@ func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
|||
|
||||
func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
||||
required := map[string]bool{
|
||||
"PGHOST": false,
|
||||
"PGPORT": false,
|
||||
"PGUSER": false,
|
||||
"PGSCHEMA": false,
|
||||
"PGPASSWORD": false,
|
||||
"CONNECTION_POOL_MODE": false,
|
||||
"CONNECTION_POOL_PORT": false,
|
||||
"PGHOST": false,
|
||||
"PGPORT": false,
|
||||
"PGUSER": false,
|
||||
"PGSCHEMA": false,
|
||||
"PGPASSWORD": false,
|
||||
"CONNECTION_POOLER_MODE": false,
|
||||
"CONNECTION_POOLER_PORT": false,
|
||||
}
|
||||
|
||||
envs := podSpec.Spec.Containers[0].Env
|
||||
|
|
@ -658,8 +658,8 @@ func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestConnPoolPodSpec(t *testing.T) {
|
||||
testName := "Test connection pool pod template generation"
|
||||
func TestConnectionPoolerPodSpec(t *testing.T) {
|
||||
testName := "Test connection pooler pod template generation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -668,12 +668,12 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
MaxDBConnections: int32ToPointer(60),
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
MaxDBConnections: int32ToPointer(60),
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
|
@ -686,7 +686,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{},
|
||||
ConnectionPooler: config.ConnectionPooler{},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
||||
|
|
@ -702,7 +702,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "default configuration",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -711,7 +711,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "no default resources",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`),
|
||||
cluster: clusterNoDefaultRes,
|
||||
|
|
@ -720,7 +720,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "default resources are set",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -729,7 +729,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "labels for service",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -738,7 +738,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "required envs",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -746,7 +746,7 @@ func TestConnPoolPodSpec(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
podSpec, err := tt.cluster.generateConnPoolPodTemplate(tt.spec)
|
||||
podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(tt.spec)
|
||||
|
||||
if err != tt.expected && err.Error() != tt.expected.Error() {
|
||||
t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v",
|
||||
|
|
@ -774,9 +774,9 @@ func testDeploymentOwnwerReference(cluster *Cluster, deployment *appsv1.Deployme
|
|||
|
||||
func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error {
|
||||
labels := deployment.Spec.Selector.MatchLabels
|
||||
expected := cluster.connPoolLabelsSelector().MatchLabels
|
||||
expected := cluster.connectionPoolerLabelsSelector().MatchLabels
|
||||
|
||||
if labels["connection-pool"] != expected["connection-pool"] {
|
||||
if labels["connection-pooler"] != expected["connection-pooler"] {
|
||||
return fmt.Errorf("Labels are incorrect, got %+v, expected %+v",
|
||||
labels, expected)
|
||||
}
|
||||
|
|
@ -784,8 +784,8 @@ func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestConnPoolDeploymentSpec(t *testing.T) {
|
||||
testName := "Test connection pool deployment spec generation"
|
||||
func TestConnectionPoolerDeploymentSpec(t *testing.T) {
|
||||
testName := "Test connection pooler deployment spec generation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -794,11 +794,11 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
|
@ -822,7 +822,7 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "default configuration",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -831,7 +831,7 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "owner reference",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -840,7 +840,7 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "selector",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
expected: nil,
|
||||
cluster: cluster,
|
||||
|
|
@ -848,7 +848,7 @@ func TestConnPoolDeploymentSpec(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
deployment, err := tt.cluster.generateConnPoolDeployment(tt.spec)
|
||||
deployment, err := tt.cluster.generateConnectionPoolerDeployment(tt.spec)
|
||||
|
||||
if err != tt.expected && err.Error() != tt.expected.Error() {
|
||||
t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v",
|
||||
|
|
@ -877,16 +877,16 @@ func testServiceOwnwerReference(cluster *Cluster, service *v1.Service) error {
|
|||
func testServiceSelector(cluster *Cluster, service *v1.Service) error {
|
||||
selector := service.Spec.Selector
|
||||
|
||||
if selector["connection-pool"] != cluster.connPoolName() {
|
||||
if selector["connection-pooler"] != cluster.connectionPoolerName() {
|
||||
return fmt.Errorf("Selector is incorrect, got %s, expected %s",
|
||||
selector["connection-pool"], cluster.connPoolName())
|
||||
selector["connection-pooler"], cluster.connectionPoolerName())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConnPoolServiceSpec(t *testing.T) {
|
||||
testName := "Test connection pool service spec generation"
|
||||
func TestConnectionPoolerServiceSpec(t *testing.T) {
|
||||
testName := "Test connection pooler service spec generation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -895,11 +895,11 @@ func TestConnPoolServiceSpec(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
|
@ -922,7 +922,7 @@ func TestConnPoolServiceSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "default configuration",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
cluster: cluster,
|
||||
check: noCheck,
|
||||
|
|
@ -930,7 +930,7 @@ func TestConnPoolServiceSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "owner reference",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
cluster: cluster,
|
||||
check: testServiceOwnwerReference,
|
||||
|
|
@ -938,14 +938,14 @@ func TestConnPoolServiceSpec(t *testing.T) {
|
|||
{
|
||||
subTest: "selector",
|
||||
spec: &acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
cluster: cluster,
|
||||
check: testServiceSelector,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service := tt.cluster.generateConnPoolService(tt.spec)
|
||||
service := tt.cluster.generateConnectionPoolerService(tt.spec)
|
||||
|
||||
if err := tt.check(cluster, service); err != nil {
|
||||
t.Errorf("%s [%s]: Service spec is incorrect, %+v",
|
||||
|
|
@ -958,6 +958,7 @@ func TestTLS(t *testing.T) {
|
|||
var err error
|
||||
var spec acidv1.PostgresSpec
|
||||
var cluster *Cluster
|
||||
var spiloFSGroup = int64(103)
|
||||
|
||||
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
|
||||
return acidv1.PostgresSpec{
|
||||
|
|
@ -982,6 +983,9 @@ func TestTLS(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
SpiloFSGroup: &spiloFSGroup,
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
|
|
@ -17,7 +18,7 @@ func (c *Cluster) listPods() ([]v1.Pod, error) {
|
|||
LabelSelector: c.labelsSet(false).String(),
|
||||
}
|
||||
|
||||
pods, err := c.KubeClient.Pods(c.Namespace).List(listOptions)
|
||||
pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get list of pods: %v", err)
|
||||
}
|
||||
|
|
@ -30,7 +31,7 @@ func (c *Cluster) getRolePods(role PostgresRole) ([]v1.Pod, error) {
|
|||
LabelSelector: c.roleLabelsSet(false, role).String(),
|
||||
}
|
||||
|
||||
pods, err := c.KubeClient.Pods(c.Namespace).List(listOptions)
|
||||
pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get list of pods: %v", err)
|
||||
}
|
||||
|
|
@ -73,7 +74,7 @@ func (c *Cluster) deletePod(podName spec.NamespacedName) error {
|
|||
ch := c.registerPodSubscriber(podName)
|
||||
defer c.unregisterPodSubscriber(podName)
|
||||
|
||||
if err := c.KubeClient.Pods(podName.Namespace).Delete(podName.Name, c.deleteOptions); err != nil {
|
||||
if err := c.KubeClient.Pods(podName.Namespace).Delete(context.TODO(), podName.Name, c.deleteOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -183,7 +184,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
eol bool
|
||||
)
|
||||
|
||||
oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{})
|
||||
oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get pod: %v", err)
|
||||
|
|
@ -206,7 +207,9 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
// we must have a statefulset in the cluster for the migration to work
|
||||
if c.Statefulset == nil {
|
||||
var sset *appsv1.StatefulSet
|
||||
if sset, err = c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(),
|
||||
if sset, err = c.KubeClient.StatefulSets(c.Namespace).Get(
|
||||
context.TODO(),
|
||||
c.statefulSetName(),
|
||||
metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not retrieve cluster statefulset: %v", err)
|
||||
}
|
||||
|
|
@ -247,7 +250,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
|
||||
// MigrateReplicaPod recreates pod on a new node
|
||||
func (c *Cluster) MigrateReplicaPod(podName spec.NamespacedName, fromNodeName string) error {
|
||||
replicaPod, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{})
|
||||
replicaPod, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get pod: %v", err)
|
||||
}
|
||||
|
|
@ -276,7 +279,7 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
|
|||
defer c.unregisterPodSubscriber(podName)
|
||||
stopChan := make(chan struct{})
|
||||
|
||||
if err := c.KubeClient.Pods(podName.Namespace).Delete(podName.Name, c.deleteOptions); err != nil {
|
||||
if err := c.KubeClient.Pods(podName.Namespace).Delete(context.TODO(), podName.Name, c.deleteOptions); err != nil {
|
||||
return nil, fmt.Errorf("could not delete pod: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -300,7 +303,7 @@ func (c *Cluster) recreatePods() error {
|
|||
LabelSelector: ls.String(),
|
||||
}
|
||||
|
||||
pods, err := c.KubeClient.Pods(namespace).List(listOptions)
|
||||
pods, err := c.KubeClient.Pods(namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get the list of pods: %v", err)
|
||||
}
|
||||
|
|
@ -349,7 +352,7 @@ func (c *Cluster) recreatePods() error {
|
|||
}
|
||||
|
||||
func (c *Cluster) podIsEndOfLife(pod *v1.Pod) (bool, error) {
|
||||
node, err := c.KubeClient.Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
|
||||
node, err := c.KubeClient.Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -80,7 +81,10 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate statefulset: %v", err)
|
||||
}
|
||||
statefulSet, err := c.KubeClient.StatefulSets(statefulSetSpec.Namespace).Create(statefulSetSpec)
|
||||
statefulSet, err := c.KubeClient.StatefulSets(statefulSetSpec.Namespace).Create(
|
||||
context.TODO(),
|
||||
statefulSetSpec,
|
||||
metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -90,37 +94,37 @@ func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) {
|
|||
return statefulSet, nil
|
||||
}
|
||||
|
||||
// Prepare the database for connection pool to be used, i.e. install lookup
|
||||
// Prepare the database for connection pooler to be used, i.e. install lookup
|
||||
// function (do it first, because it should be fast and if it didn't succeed,
|
||||
// it doesn't makes sense to create more K8S objects. At this moment we assume
|
||||
// that necessary connection pool user exists.
|
||||
// that necessary connection pooler user exists.
|
||||
//
|
||||
// After that create all the objects for connection pool, namely a deployment
|
||||
// After that create all the objects for connection pooler, namely a deployment
|
||||
// with a chosen pooler and a service to expose it.
|
||||
func (c *Cluster) createConnectionPool(lookup InstallFunction) (*ConnectionPoolObjects, error) {
|
||||
func (c *Cluster) createConnectionPooler(lookup InstallFunction) (*ConnectionPoolerObjects, error) {
|
||||
var msg string
|
||||
c.setProcessName("creating connection pool")
|
||||
c.setProcessName("creating connection pooler")
|
||||
|
||||
schema := c.Spec.ConnectionPool.Schema
|
||||
schema := c.Spec.ConnectionPooler.Schema
|
||||
if schema == "" {
|
||||
schema = c.OpConfig.ConnectionPool.Schema
|
||||
schema = c.OpConfig.ConnectionPooler.Schema
|
||||
}
|
||||
|
||||
user := c.Spec.ConnectionPool.User
|
||||
user := c.Spec.ConnectionPooler.User
|
||||
if user == "" {
|
||||
user = c.OpConfig.ConnectionPool.User
|
||||
user = c.OpConfig.ConnectionPooler.User
|
||||
}
|
||||
|
||||
err := lookup(schema, user)
|
||||
|
||||
if err != nil {
|
||||
msg = "could not prepare database for connection pool: %v"
|
||||
msg = "could not prepare database for connection pooler: %v"
|
||||
return nil, fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
deploymentSpec, err := c.generateConnPoolDeployment(&c.Spec)
|
||||
deploymentSpec, err := c.generateConnectionPoolerDeployment(&c.Spec)
|
||||
if err != nil {
|
||||
msg = "could not generate deployment for connection pool: %v"
|
||||
msg = "could not generate deployment for connection pooler: %v"
|
||||
return nil, fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
|
|
@ -129,46 +133,46 @@ func (c *Cluster) createConnectionPool(lookup InstallFunction) (*ConnectionPoolO
|
|||
// should be good enough to not think about it here.
|
||||
deployment, err := c.KubeClient.
|
||||
Deployments(deploymentSpec.Namespace).
|
||||
Create(deploymentSpec)
|
||||
Create(context.TODO(), deploymentSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceSpec := c.generateConnPoolService(&c.Spec)
|
||||
serviceSpec := c.generateConnectionPoolerService(&c.Spec)
|
||||
service, err := c.KubeClient.
|
||||
Services(serviceSpec.Namespace).
|
||||
Create(serviceSpec)
|
||||
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.ConnectionPool = &ConnectionPoolObjects{
|
||||
c.ConnectionPooler = &ConnectionPoolerObjects{
|
||||
Deployment: deployment,
|
||||
Service: service,
|
||||
}
|
||||
c.logger.Debugf("created new connection pool %q, uid: %q",
|
||||
c.logger.Debugf("created new connection pooler %q, uid: %q",
|
||||
util.NameFromMeta(deployment.ObjectMeta), deployment.UID)
|
||||
|
||||
return c.ConnectionPool, nil
|
||||
return c.ConnectionPooler, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deleteConnectionPool() (err error) {
|
||||
c.setProcessName("deleting connection pool")
|
||||
c.logger.Debugln("deleting connection pool")
|
||||
func (c *Cluster) deleteConnectionPooler() (err error) {
|
||||
c.setProcessName("deleting connection pooler")
|
||||
c.logger.Debugln("deleting connection pooler")
|
||||
|
||||
// Lack of connection pooler objects is not a fatal error, just log it if
|
||||
// it was present before in the manifest
|
||||
if c.ConnectionPool == nil {
|
||||
c.logger.Infof("No connection pool to delete")
|
||||
if c.ConnectionPooler == nil {
|
||||
c.logger.Infof("No connection pooler to delete")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clean up the deployment object. If deployment resource we've remembered
|
||||
// is somehow empty, try to delete based on what would we generate
|
||||
deploymentName := c.connPoolName()
|
||||
deployment := c.ConnectionPool.Deployment
|
||||
deploymentName := c.connectionPoolerName()
|
||||
deployment := c.ConnectionPooler.Deployment
|
||||
|
||||
if deployment != nil {
|
||||
deploymentName = deployment.Name
|
||||
|
|
@ -180,19 +184,19 @@ func (c *Cluster) deleteConnectionPool() (err error) {
|
|||
options := metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||
err = c.KubeClient.
|
||||
Deployments(c.Namespace).
|
||||
Delete(deploymentName, &options)
|
||||
Delete(context.TODO(), deploymentName, options)
|
||||
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Debugf("Connection pool deployment was already deleted")
|
||||
c.logger.Debugf("Connection pooler deployment was already deleted")
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not delete deployment: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("Connection pool deployment %q has been deleted", deploymentName)
|
||||
c.logger.Infof("Connection pooler deployment %q has been deleted", deploymentName)
|
||||
|
||||
// Repeat the same for the service object
|
||||
service := c.ConnectionPool.Service
|
||||
serviceName := c.connPoolName()
|
||||
service := c.ConnectionPooler.Service
|
||||
serviceName := c.connectionPoolerName()
|
||||
|
||||
if service != nil {
|
||||
serviceName = service.Name
|
||||
|
|
@ -202,17 +206,17 @@ func (c *Cluster) deleteConnectionPool() (err error) {
|
|||
// will be deleted.
|
||||
err = c.KubeClient.
|
||||
Services(c.Namespace).
|
||||
Delete(serviceName, &options)
|
||||
Delete(context.TODO(), serviceName, options)
|
||||
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Debugf("Connection pool service was already deleted")
|
||||
c.logger.Debugf("Connection pooler service was already deleted")
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not delete service: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("Connection pool service %q has been deleted", serviceName)
|
||||
c.logger.Infof("Connection pooler service %q has been deleted", serviceName)
|
||||
|
||||
c.ConnectionPool = nil
|
||||
c.ConnectionPooler = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -251,7 +255,7 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error {
|
|||
}
|
||||
|
||||
podName := fmt.Sprintf("%s-0", c.Statefulset.Name)
|
||||
masterCandidatePod, err := c.KubeClient.Pods(c.clusterNamespace()).Get(podName, metav1.GetOptions{})
|
||||
masterCandidatePod, err := c.KubeClient.Pods(c.clusterNamespace()).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get master candidate pod: %v", err)
|
||||
}
|
||||
|
|
@ -350,9 +354,12 @@ func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*
|
|||
return nil, fmt.Errorf("could not form patch for the statefulset metadata: %v", err)
|
||||
}
|
||||
result, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch(
|
||||
context.TODO(),
|
||||
c.Statefulset.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(patchData), "")
|
||||
[]byte(patchData),
|
||||
metav1.PatchOptions{},
|
||||
"")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not patch statefulset annotations %q: %v", patchData, err)
|
||||
}
|
||||
|
|
@ -380,9 +387,12 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
|
|||
}
|
||||
|
||||
statefulSet, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch(
|
||||
context.TODO(),
|
||||
c.Statefulset.Name,
|
||||
types.MergePatchType,
|
||||
patchData, "")
|
||||
patchData,
|
||||
metav1.PatchOptions{},
|
||||
"")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch statefulset spec %q: %v", statefulSetName, err)
|
||||
}
|
||||
|
|
@ -414,7 +424,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
|
|||
oldStatefulset := c.Statefulset
|
||||
|
||||
options := metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}
|
||||
err := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Delete(oldStatefulset.Name, &options)
|
||||
err := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Delete(context.TODO(), oldStatefulset.Name, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete statefulset %q: %v", statefulSetName, err)
|
||||
}
|
||||
|
|
@ -425,7 +435,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
|
|||
|
||||
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
|
||||
func() (bool, error) {
|
||||
_, err2 := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Get(oldStatefulset.Name, metav1.GetOptions{})
|
||||
_, err2 := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Get(context.TODO(), oldStatefulset.Name, metav1.GetOptions{})
|
||||
if err2 == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -439,7 +449,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
|
|||
}
|
||||
|
||||
// create the new statefulset with the desired spec. It would take over the remaining pods.
|
||||
createdStatefulset, err := c.KubeClient.StatefulSets(newStatefulSet.Namespace).Create(newStatefulSet)
|
||||
createdStatefulset, err := c.KubeClient.StatefulSets(newStatefulSet.Namespace).Create(context.TODO(), newStatefulSet, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create statefulset %q: %v", statefulSetName, err)
|
||||
}
|
||||
|
|
@ -460,7 +470,7 @@ func (c *Cluster) deleteStatefulSet() error {
|
|||
return fmt.Errorf("there is no statefulset in the cluster")
|
||||
}
|
||||
|
||||
err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Delete(c.Statefulset.Name, c.deleteOptions)
|
||||
err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Delete(context.TODO(), c.Statefulset.Name, c.deleteOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -482,7 +492,7 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) {
|
|||
c.setProcessName("creating %v service", role)
|
||||
|
||||
serviceSpec := c.generateService(role, &c.Spec)
|
||||
service, err := c.KubeClient.Services(serviceSpec.Namespace).Create(serviceSpec)
|
||||
service, err := c.KubeClient.Services(serviceSpec.Namespace).Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -509,9 +519,12 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
if len(newService.ObjectMeta.Annotations) > 0 {
|
||||
if annotationsPatchData, err := metaAnnotationsPatch(newService.ObjectMeta.Annotations); err == nil {
|
||||
_, err = c.KubeClient.Services(serviceName.Namespace).Patch(
|
||||
context.TODO(),
|
||||
serviceName.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(annotationsPatchData), "")
|
||||
[]byte(annotationsPatchData),
|
||||
metav1.PatchOptions{},
|
||||
"")
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not replace annotations for the service %q: %v", serviceName, err)
|
||||
|
|
@ -528,7 +541,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
if newServiceType == "ClusterIP" && newServiceType != oldServiceType {
|
||||
newService.ResourceVersion = c.Services[role].ResourceVersion
|
||||
newService.Spec.ClusterIP = c.Services[role].Spec.ClusterIP
|
||||
svc, err = c.KubeClient.Services(serviceName.Namespace).Update(newService)
|
||||
svc, err = c.KubeClient.Services(serviceName.Namespace).Update(context.TODO(), newService, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not update service %q: %v", serviceName, err)
|
||||
}
|
||||
|
|
@ -539,9 +552,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
}
|
||||
|
||||
svc, err = c.KubeClient.Services(serviceName.Namespace).Patch(
|
||||
serviceName.Name,
|
||||
types.MergePatchType,
|
||||
patchData, "")
|
||||
context.TODO(), serviceName.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch service %q: %v", serviceName, err)
|
||||
}
|
||||
|
|
@ -560,7 +571,7 @@ func (c *Cluster) deleteService(role PostgresRole) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := c.KubeClient.Services(service.Namespace).Delete(service.Name, c.deleteOptions); err != nil {
|
||||
if err := c.KubeClient.Services(service.Namespace).Delete(context.TODO(), service.Name, c.deleteOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -584,7 +595,7 @@ func (c *Cluster) createEndpoint(role PostgresRole) (*v1.Endpoints, error) {
|
|||
}
|
||||
endpointsSpec := c.generateEndpoint(role, subsets)
|
||||
|
||||
endpoints, err := c.KubeClient.Endpoints(endpointsSpec.Namespace).Create(endpointsSpec)
|
||||
endpoints, err := c.KubeClient.Endpoints(endpointsSpec.Namespace).Create(context.TODO(), endpointsSpec, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create %s endpoint: %v", role, err)
|
||||
}
|
||||
|
|
@ -626,7 +637,7 @@ func (c *Cluster) createPodDisruptionBudget() (*policybeta1.PodDisruptionBudget,
|
|||
podDisruptionBudgetSpec := c.generatePodDisruptionBudget()
|
||||
podDisruptionBudget, err := c.KubeClient.
|
||||
PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace).
|
||||
Create(podDisruptionBudgetSpec)
|
||||
Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -647,7 +658,7 @@ func (c *Cluster) updatePodDisruptionBudget(pdb *policybeta1.PodDisruptionBudget
|
|||
|
||||
newPdb, err := c.KubeClient.
|
||||
PodDisruptionBudgets(pdb.Namespace).
|
||||
Create(pdb)
|
||||
Create(context.TODO(), pdb, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create pod disruption budget: %v", err)
|
||||
}
|
||||
|
|
@ -665,7 +676,7 @@ func (c *Cluster) deletePodDisruptionBudget() error {
|
|||
pdbName := util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta)
|
||||
err := c.KubeClient.
|
||||
PodDisruptionBudgets(c.PodDisruptionBudget.Namespace).
|
||||
Delete(c.PodDisruptionBudget.Name, c.deleteOptions)
|
||||
Delete(context.TODO(), c.PodDisruptionBudget.Name, c.deleteOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete pod disruption budget: %v", err)
|
||||
}
|
||||
|
|
@ -674,7 +685,7 @@ func (c *Cluster) deletePodDisruptionBudget() error {
|
|||
|
||||
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
|
||||
func() (bool, error) {
|
||||
_, err2 := c.KubeClient.PodDisruptionBudgets(pdbName.Namespace).Get(pdbName.Name, metav1.GetOptions{})
|
||||
_, err2 := c.KubeClient.PodDisruptionBudgets(pdbName.Namespace).Get(context.TODO(), pdbName.Name, metav1.GetOptions{})
|
||||
if err2 == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -697,7 +708,8 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error {
|
|||
return fmt.Errorf("there is no %s endpoint in the cluster", role)
|
||||
}
|
||||
|
||||
if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(c.Endpoints[role].Name, c.deleteOptions); err != nil {
|
||||
if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(
|
||||
context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil {
|
||||
return fmt.Errorf("could not delete endpoint: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -711,7 +723,7 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error {
|
|||
func (c *Cluster) deleteSecret(secret *v1.Secret) error {
|
||||
c.setProcessName("deleting secret %q", util.NameFromMeta(secret.ObjectMeta))
|
||||
c.logger.Debugf("deleting secret %q", util.NameFromMeta(secret.ObjectMeta))
|
||||
err := c.KubeClient.Secrets(secret.Namespace).Delete(secret.Name, c.deleteOptions)
|
||||
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -736,7 +748,7 @@ func (c *Cluster) createLogicalBackupJob() (err error) {
|
|||
}
|
||||
c.logger.Debugf("Generated cronJobSpec: %v", logicalBackupJobSpec)
|
||||
|
||||
_, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(logicalBackupJobSpec)
|
||||
_, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(context.TODO(), logicalBackupJobSpec, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create k8s cron job: %v", err)
|
||||
}
|
||||
|
|
@ -754,9 +766,12 @@ func (c *Cluster) patchLogicalBackupJob(newJob *batchv1beta1.CronJob) error {
|
|||
|
||||
// update the backup job spec
|
||||
_, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Patch(
|
||||
context.TODO(),
|
||||
c.getLogicalBackupJobName(),
|
||||
types.MergePatchType,
|
||||
patchData, "")
|
||||
patchData,
|
||||
metav1.PatchOptions{},
|
||||
"")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch logical backup job: %v", err)
|
||||
}
|
||||
|
|
@ -768,7 +783,7 @@ func (c *Cluster) deleteLogicalBackupJob() error {
|
|||
|
||||
c.logger.Info("removing the logical backup job")
|
||||
|
||||
return c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(c.getLogicalBackupJobName(), c.deleteOptions)
|
||||
return c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions)
|
||||
}
|
||||
|
||||
// GetServiceMaster returns cluster's kubernetes master Service
|
||||
|
|
@ -801,12 +816,12 @@ func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget {
|
|||
return c.PodDisruptionBudget
|
||||
}
|
||||
|
||||
// Perform actual patching of a connection pool deployment, assuming that all
|
||||
// Perform actual patching of a connection pooler deployment, assuming that all
|
||||
// the check were already done before.
|
||||
func (c *Cluster) updateConnPoolDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) {
|
||||
c.setProcessName("updating connection pool")
|
||||
if c.ConnectionPool == nil || c.ConnectionPool.Deployment == nil {
|
||||
return nil, fmt.Errorf("there is no connection pool in the cluster")
|
||||
func (c *Cluster) updateConnectionPoolerDeployment(oldDeploymentSpec, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) {
|
||||
c.setProcessName("updating connection pooler")
|
||||
if c.ConnectionPooler == nil || c.ConnectionPooler.Deployment == nil {
|
||||
return nil, fmt.Errorf("there is no connection pooler in the cluster")
|
||||
}
|
||||
|
||||
patchData, err := specPatch(newDeployment.Spec)
|
||||
|
|
@ -818,16 +833,18 @@ func (c *Cluster) updateConnPoolDeployment(oldDeploymentSpec, newDeployment *app
|
|||
// worker at one time will try to update it chances of conflicts are
|
||||
// minimal.
|
||||
deployment, err := c.KubeClient.
|
||||
Deployments(c.ConnectionPool.Deployment.Namespace).
|
||||
Patch(
|
||||
c.ConnectionPool.Deployment.Name,
|
||||
types.MergePatchType,
|
||||
patchData, "")
|
||||
Deployments(c.ConnectionPooler.Deployment.Namespace).Patch(
|
||||
context.TODO(),
|
||||
c.ConnectionPooler.Deployment.Name,
|
||||
types.MergePatchType,
|
||||
patchData,
|
||||
metav1.PatchOptions{},
|
||||
"")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not patch deployment: %v", err)
|
||||
}
|
||||
|
||||
c.ConnectionPool.Deployment = deployment
|
||||
c.ConnectionPooler.Deployment = deployment
|
||||
|
||||
return deployment, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,8 +19,8 @@ func boolToPointer(value bool) *bool {
|
|||
return &value
|
||||
}
|
||||
|
||||
func TestConnPoolCreationAndDeletion(t *testing.T) {
|
||||
testName := "Test connection pool creation"
|
||||
func TestConnectionPoolerCreationAndDeletion(t *testing.T) {
|
||||
testName := "Test connection pooler creation"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -29,11 +29,11 @@ func TestConnPoolCreationAndDeletion(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger)
|
||||
|
|
@ -45,31 +45,31 @@ func TestConnPoolCreationAndDeletion(t *testing.T) {
|
|||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
}
|
||||
poolResources, err := cluster.createConnectionPool(mockInstallLookupFunction)
|
||||
poolerResources, err := cluster.createConnectionPooler(mockInstallLookupFunction)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot create connection pool, %s, %+v",
|
||||
testName, err, poolResources)
|
||||
t.Errorf("%s: Cannot create connection pooler, %s, %+v",
|
||||
testName, err, poolerResources)
|
||||
}
|
||||
|
||||
if poolResources.Deployment == nil {
|
||||
t.Errorf("%s: Connection pool deployment is empty", testName)
|
||||
if poolerResources.Deployment == nil {
|
||||
t.Errorf("%s: Connection pooler deployment is empty", testName)
|
||||
}
|
||||
|
||||
if poolResources.Service == nil {
|
||||
t.Errorf("%s: Connection pool service is empty", testName)
|
||||
if poolerResources.Service == nil {
|
||||
t.Errorf("%s: Connection pooler service is empty", testName)
|
||||
}
|
||||
|
||||
err = cluster.deleteConnectionPool()
|
||||
err = cluster.deleteConnectionPooler()
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cannot delete connection pool, %s", testName, err)
|
||||
t.Errorf("%s: Cannot delete connection pooler, %s", testName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedConnPool(t *testing.T) {
|
||||
testName := "Test how connection pool can be enabled"
|
||||
func TestNeedConnectionPooler(t *testing.T) {
|
||||
testName := "Test how connection pooler can be enabled"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -78,50 +78,50 @@ func TestNeedConnPool(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
},
|
||||
},
|
||||
}, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger)
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
}
|
||||
|
||||
if !cluster.needConnectionPool() {
|
||||
t.Errorf("%s: Connection pool is not enabled with full definition",
|
||||
if !cluster.needConnectionPooler() {
|
||||
t.Errorf("%s: Connection pooler is not enabled with full definition",
|
||||
testName)
|
||||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
EnableConnectionPool: boolToPointer(true),
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
}
|
||||
|
||||
if !cluster.needConnectionPool() {
|
||||
t.Errorf("%s: Connection pool is not enabled with flag",
|
||||
if !cluster.needConnectionPooler() {
|
||||
t.Errorf("%s: Connection pooler is not enabled with flag",
|
||||
testName)
|
||||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
EnableConnectionPool: boolToPointer(false),
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
EnableConnectionPooler: boolToPointer(false),
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
}
|
||||
|
||||
if cluster.needConnectionPool() {
|
||||
t.Errorf("%s: Connection pool is still enabled with flag being false",
|
||||
if cluster.needConnectionPooler() {
|
||||
t.Errorf("%s: Connection pooler is still enabled with flag being false",
|
||||
testName)
|
||||
}
|
||||
|
||||
cluster.Spec = acidv1.PostgresSpec{
|
||||
EnableConnectionPool: boolToPointer(true),
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
}
|
||||
|
||||
if !cluster.needConnectionPool() {
|
||||
t.Errorf("%s: Connection pool is not enabled with flag and full",
|
||||
if !cluster.needConnectionPooler() {
|
||||
t.Errorf("%s: Connection pooler is not enabled with flag and full",
|
||||
testName)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
|
|
@ -115,9 +116,9 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
// sync connection pool
|
||||
if err = c.syncConnectionPool(&oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
return fmt.Errorf("could not sync connection pool: %v", err)
|
||||
// sync connection pooler
|
||||
if err = c.syncConnectionPooler(&oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
return fmt.Errorf("could not sync connection pooler: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
@ -146,7 +147,7 @@ func (c *Cluster) syncService(role PostgresRole) error {
|
|||
)
|
||||
c.setProcessName("syncing %s service", role)
|
||||
|
||||
if svc, err = c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}); err == nil {
|
||||
if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil {
|
||||
c.Services[role] = svc
|
||||
desiredSvc := c.generateService(role, &c.Spec)
|
||||
if match, reason := k8sutil.SameService(svc, desiredSvc); !match {
|
||||
|
|
@ -172,7 +173,7 @@ func (c *Cluster) syncService(role PostgresRole) error {
|
|||
return fmt.Errorf("could not create missing %s service: %v", role, err)
|
||||
}
|
||||
c.logger.Infof("%s service %q already exists", role, util.NameFromMeta(svc.ObjectMeta))
|
||||
if svc, err = c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}); err != nil {
|
||||
if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not fetch existing %s service: %v", role, err)
|
||||
}
|
||||
}
|
||||
|
|
@ -187,7 +188,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
|||
)
|
||||
c.setProcessName("syncing %s endpoint", role)
|
||||
|
||||
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}); err == nil {
|
||||
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err == nil {
|
||||
// TODO: No syncing of endpoints here, is this covered completely by updateService?
|
||||
c.Endpoints[role] = ep
|
||||
return nil
|
||||
|
|
@ -206,7 +207,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
|||
return fmt.Errorf("could not create missing %s endpoint: %v", role, err)
|
||||
}
|
||||
c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta))
|
||||
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}); err != nil {
|
||||
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err)
|
||||
}
|
||||
}
|
||||
|
|
@ -219,7 +220,7 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
|||
pdb *policybeta1.PodDisruptionBudget
|
||||
err error
|
||||
)
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
|
||||
c.PodDisruptionBudget = pdb
|
||||
newPDB := c.generatePodDisruptionBudget()
|
||||
if match, reason := k8sutil.SamePDB(pdb, newPDB); !match {
|
||||
|
|
@ -245,7 +246,7 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
|||
return fmt.Errorf("could not create pod disruption budget: %v", err)
|
||||
}
|
||||
c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta))
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil {
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta))
|
||||
}
|
||||
}
|
||||
|
|
@ -261,7 +262,7 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
podsRollingUpdateRequired bool
|
||||
)
|
||||
// NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early.
|
||||
sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), metav1.GetOptions{})
|
||||
sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(context.TODO(), c.statefulSetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
return fmt.Errorf("could not get statefulset: %v", err)
|
||||
|
|
@ -410,14 +411,14 @@ func (c *Cluster) syncSecrets() error {
|
|||
secrets := c.generateUserSecrets()
|
||||
|
||||
for secretUsername, secretSpec := range secrets {
|
||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(secretSpec); err == nil {
|
||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(context.TODO(), secretSpec, metav1.CreateOptions{}); err == nil {
|
||||
c.Secrets[secret.UID] = secret
|
||||
c.logger.Debugf("created new secret %q, uid: %q", util.NameFromMeta(secret.ObjectMeta), secret.UID)
|
||||
continue
|
||||
}
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
var userMap map[string]spec.PgUser
|
||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Get(secretSpec.Name, metav1.GetOptions{}); err != nil {
|
||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Get(context.TODO(), secretSpec.Name, metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not get current secret: %v", err)
|
||||
}
|
||||
if secretUsername != string(secret.Data["username"]) {
|
||||
|
|
@ -440,7 +441,7 @@ func (c *Cluster) syncSecrets() error {
|
|||
pwdUser.Origin == spec.RoleOriginInfrastructure {
|
||||
|
||||
c.logger.Debugf("updating the secret %q from the infrastructure roles", secretSpec.Name)
|
||||
if _, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(secretSpec); err != nil {
|
||||
if _, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(context.TODO(), secretSpec, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update infrastructure role secret for role %q: %v", secretUsername, err)
|
||||
}
|
||||
} else {
|
||||
|
|
@ -483,12 +484,12 @@ func (c *Cluster) syncRoles() (err error) {
|
|||
userNames = append(userNames, u.Name)
|
||||
}
|
||||
|
||||
if c.needConnectionPool() {
|
||||
connPoolUser := c.systemUsers[constants.ConnectionPoolUserKeyName]
|
||||
userNames = append(userNames, connPoolUser.Name)
|
||||
if c.needConnectionPooler() {
|
||||
connectionPoolerUser := c.systemUsers[constants.ConnectionPoolerUserKeyName]
|
||||
userNames = append(userNames, connectionPoolerUser.Name)
|
||||
|
||||
if _, exists := c.pgUsers[connPoolUser.Name]; !exists {
|
||||
c.pgUsers[connPoolUser.Name] = connPoolUser
|
||||
if _, exists := c.pgUsers[connectionPoolerUser.Name]; !exists {
|
||||
c.pgUsers[connectionPoolerUser.Name] = connectionPoolerUser
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -695,7 +696,7 @@ func (c *Cluster) syncLogicalBackupJob() error {
|
|||
// sync the job if it exists
|
||||
|
||||
jobName := c.getLogicalBackupJobName()
|
||||
if job, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(jobName, metav1.GetOptions{}); err == nil {
|
||||
if job, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), jobName, metav1.GetOptions{}); err == nil {
|
||||
|
||||
desiredJob, err = c.generateLogicalBackupJob()
|
||||
if err != nil {
|
||||
|
|
@ -729,7 +730,7 @@ func (c *Cluster) syncLogicalBackupJob() error {
|
|||
return fmt.Errorf("could not create missing logical backup job: %v", err)
|
||||
}
|
||||
c.logger.Infof("logical backup job %q already exists", jobName)
|
||||
if _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(jobName, metav1.GetOptions{}); err != nil {
|
||||
if _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), jobName, metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not fetch existing logical backup job: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -737,69 +738,69 @@ func (c *Cluster) syncLogicalBackupJob() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncConnectionPool(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) error {
|
||||
if c.ConnectionPool == nil {
|
||||
c.ConnectionPool = &ConnectionPoolObjects{}
|
||||
func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) error {
|
||||
if c.ConnectionPooler == nil {
|
||||
c.ConnectionPooler = &ConnectionPoolerObjects{}
|
||||
}
|
||||
|
||||
newNeedConnPool := c.needConnectionPoolWorker(&newSpec.Spec)
|
||||
oldNeedConnPool := c.needConnectionPoolWorker(&oldSpec.Spec)
|
||||
newNeedConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec)
|
||||
oldNeedConnectionPooler := c.needConnectionPoolerWorker(&oldSpec.Spec)
|
||||
|
||||
if newNeedConnPool {
|
||||
// Try to sync in any case. If we didn't needed connection pool before,
|
||||
if newNeedConnectionPooler {
|
||||
// Try to sync in any case. If we didn't needed connection pooler before,
|
||||
// it means we want to create it. If it was already present, still sync
|
||||
// since it could happen that there is no difference in specs, and all
|
||||
// the resources are remembered, but the deployment was manualy deleted
|
||||
// the resources are remembered, but the deployment was manually deleted
|
||||
// in between
|
||||
c.logger.Debug("syncing connection pool")
|
||||
c.logger.Debug("syncing connection pooler")
|
||||
|
||||
// in this case also do not forget to install lookup function as for
|
||||
// creating cluster
|
||||
if !oldNeedConnPool || !c.ConnectionPool.LookupFunction {
|
||||
newConnPool := newSpec.Spec.ConnectionPool
|
||||
if !oldNeedConnectionPooler || !c.ConnectionPooler.LookupFunction {
|
||||
newConnectionPooler := newSpec.Spec.ConnectionPooler
|
||||
|
||||
specSchema := ""
|
||||
specUser := ""
|
||||
|
||||
if newConnPool != nil {
|
||||
specSchema = newConnPool.Schema
|
||||
specUser = newConnPool.User
|
||||
if newConnectionPooler != nil {
|
||||
specSchema = newConnectionPooler.Schema
|
||||
specUser = newConnectionPooler.User
|
||||
}
|
||||
|
||||
schema := util.Coalesce(
|
||||
specSchema,
|
||||
c.OpConfig.ConnectionPool.Schema)
|
||||
c.OpConfig.ConnectionPooler.Schema)
|
||||
|
||||
user := util.Coalesce(
|
||||
specUser,
|
||||
c.OpConfig.ConnectionPool.User)
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
|
||||
if err := lookup(schema, user); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.syncConnectionPoolWorker(oldSpec, newSpec); err != nil {
|
||||
c.logger.Errorf("could not sync connection pool: %v", err)
|
||||
if err := c.syncConnectionPoolerWorker(oldSpec, newSpec); err != nil {
|
||||
c.logger.Errorf("could not sync connection pooler: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if oldNeedConnPool && !newNeedConnPool {
|
||||
if oldNeedConnectionPooler && !newNeedConnectionPooler {
|
||||
// delete and cleanup resources
|
||||
if err := c.deleteConnectionPool(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pool: %v", err)
|
||||
if err := c.deleteConnectionPooler(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !oldNeedConnPool && !newNeedConnPool {
|
||||
if !oldNeedConnectionPooler && !newNeedConnectionPooler {
|
||||
// delete and cleanup resources if not empty
|
||||
if c.ConnectionPool != nil &&
|
||||
(c.ConnectionPool.Deployment != nil ||
|
||||
c.ConnectionPool.Service != nil) {
|
||||
if c.ConnectionPooler != nil &&
|
||||
(c.ConnectionPooler.Deployment != nil ||
|
||||
c.ConnectionPooler.Service != nil) {
|
||||
|
||||
if err := c.deleteConnectionPool(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pool: %v", err)
|
||||
if err := c.deleteConnectionPooler(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pooler: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -807,58 +808,58 @@ func (c *Cluster) syncConnectionPool(oldSpec, newSpec *acidv1.Postgresql, lookup
|
|||
return nil
|
||||
}
|
||||
|
||||
// Synchronize connection pool resources. Effectively we're interested only in
|
||||
// Synchronize connection pooler resources. Effectively we're interested only in
|
||||
// synchronizing the corresponding deployment, but in case of deployment or
|
||||
// service is missing, create it. After checking, also remember an object for
|
||||
// the future references.
|
||||
func (c *Cluster) syncConnectionPoolWorker(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||
func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||
deployment, err := c.KubeClient.
|
||||
Deployments(c.Namespace).
|
||||
Get(c.connPoolName(), metav1.GetOptions{})
|
||||
Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{})
|
||||
|
||||
if err != nil && k8sutil.ResourceNotFound(err) {
|
||||
msg := "Deployment %s for connection pool synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connPoolName())
|
||||
msg := "Deployment %s for connection pooler synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connectionPoolerName())
|
||||
|
||||
deploymentSpec, err := c.generateConnPoolDeployment(&newSpec.Spec)
|
||||
deploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec)
|
||||
if err != nil {
|
||||
msg = "could not generate deployment for connection pool: %v"
|
||||
msg = "could not generate deployment for connection pooler: %v"
|
||||
return fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
deployment, err := c.KubeClient.
|
||||
Deployments(deploymentSpec.Namespace).
|
||||
Create(deploymentSpec)
|
||||
Create(context.TODO(), deploymentSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.ConnectionPool.Deployment = deployment
|
||||
c.ConnectionPooler.Deployment = deployment
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not get connection pool deployment to sync: %v", err)
|
||||
return fmt.Errorf("could not get connection pooler deployment to sync: %v", err)
|
||||
} else {
|
||||
c.ConnectionPool.Deployment = deployment
|
||||
c.ConnectionPooler.Deployment = deployment
|
||||
|
||||
// actual synchronization
|
||||
oldConnPool := oldSpec.Spec.ConnectionPool
|
||||
newConnPool := newSpec.Spec.ConnectionPool
|
||||
specSync, specReason := c.needSyncConnPoolSpecs(oldConnPool, newConnPool)
|
||||
defaultsSync, defaultsReason := c.needSyncConnPoolDefaults(newConnPool, deployment)
|
||||
oldConnectionPooler := oldSpec.Spec.ConnectionPooler
|
||||
newConnectionPooler := newSpec.Spec.ConnectionPooler
|
||||
specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler)
|
||||
defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment)
|
||||
reason := append(specReason, defaultsReason...)
|
||||
if specSync || defaultsSync {
|
||||
c.logger.Infof("Update connection pool deployment %s, reason: %+v",
|
||||
c.connPoolName(), reason)
|
||||
c.logger.Infof("Update connection pooler deployment %s, reason: %+v",
|
||||
c.connectionPoolerName(), reason)
|
||||
|
||||
newDeploymentSpec, err := c.generateConnPoolDeployment(&newSpec.Spec)
|
||||
newDeploymentSpec, err := c.generateConnectionPoolerDeployment(&newSpec.Spec)
|
||||
if err != nil {
|
||||
msg := "could not generate deployment for connection pool: %v"
|
||||
msg := "could not generate deployment for connection pooler: %v"
|
||||
return fmt.Errorf(msg, err)
|
||||
}
|
||||
|
||||
oldDeploymentSpec := c.ConnectionPool.Deployment
|
||||
oldDeploymentSpec := c.ConnectionPooler.Deployment
|
||||
|
||||
deployment, err := c.updateConnPoolDeployment(
|
||||
deployment, err := c.updateConnectionPoolerDeployment(
|
||||
oldDeploymentSpec,
|
||||
newDeploymentSpec)
|
||||
|
||||
|
|
@ -866,34 +867,34 @@ func (c *Cluster) syncConnectionPoolWorker(oldSpec, newSpec *acidv1.Postgresql)
|
|||
return err
|
||||
}
|
||||
|
||||
c.ConnectionPool.Deployment = deployment
|
||||
c.ConnectionPooler.Deployment = deployment
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
service, err := c.KubeClient.
|
||||
Services(c.Namespace).
|
||||
Get(c.connPoolName(), metav1.GetOptions{})
|
||||
Get(context.TODO(), c.connectionPoolerName(), metav1.GetOptions{})
|
||||
|
||||
if err != nil && k8sutil.ResourceNotFound(err) {
|
||||
msg := "Service %s for connection pool synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connPoolName())
|
||||
msg := "Service %s for connection pooler synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connectionPoolerName())
|
||||
|
||||
serviceSpec := c.generateConnPoolService(&newSpec.Spec)
|
||||
serviceSpec := c.generateConnectionPoolerService(&newSpec.Spec)
|
||||
service, err := c.KubeClient.
|
||||
Services(serviceSpec.Namespace).
|
||||
Create(serviceSpec)
|
||||
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.ConnectionPool.Service = service
|
||||
c.ConnectionPooler.Service = service
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("could not get connection pool service to sync: %v", err)
|
||||
return fmt.Errorf("could not get connection pooler service to sync: %v", err)
|
||||
} else {
|
||||
// Service updates are not supported and probably not that useful anyway
|
||||
c.ConnectionPool.Service = service
|
||||
c.ConnectionPooler.Service = service
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ func int32ToPointer(value int32) *int32 {
|
|||
}
|
||||
|
||||
func deploymentUpdated(cluster *Cluster, err error) error {
|
||||
if cluster.ConnectionPool.Deployment.Spec.Replicas == nil ||
|
||||
*cluster.ConnectionPool.Deployment.Spec.Replicas != 2 {
|
||||
if cluster.ConnectionPooler.Deployment.Spec.Replicas == nil ||
|
||||
*cluster.ConnectionPooler.Deployment.Spec.Replicas != 2 {
|
||||
return fmt.Errorf("Wrong nubmer of instances")
|
||||
}
|
||||
|
||||
|
|
@ -27,15 +27,15 @@ func deploymentUpdated(cluster *Cluster, err error) error {
|
|||
}
|
||||
|
||||
func objectsAreSaved(cluster *Cluster, err error) error {
|
||||
if cluster.ConnectionPool == nil {
|
||||
return fmt.Errorf("Connection pool resources are empty")
|
||||
if cluster.ConnectionPooler == nil {
|
||||
return fmt.Errorf("Connection pooler resources are empty")
|
||||
}
|
||||
|
||||
if cluster.ConnectionPool.Deployment == nil {
|
||||
if cluster.ConnectionPooler.Deployment == nil {
|
||||
return fmt.Errorf("Deployment was not saved")
|
||||
}
|
||||
|
||||
if cluster.ConnectionPool.Service == nil {
|
||||
if cluster.ConnectionPooler.Service == nil {
|
||||
return fmt.Errorf("Service was not saved")
|
||||
}
|
||||
|
||||
|
|
@ -43,15 +43,15 @@ func objectsAreSaved(cluster *Cluster, err error) error {
|
|||
}
|
||||
|
||||
func objectsAreDeleted(cluster *Cluster, err error) error {
|
||||
if cluster.ConnectionPool != nil {
|
||||
return fmt.Errorf("Connection pool was not deleted")
|
||||
if cluster.ConnectionPooler != nil {
|
||||
return fmt.Errorf("Connection pooler was not deleted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestConnPoolSynchronization(t *testing.T) {
|
||||
testName := "Test connection pool synchronization"
|
||||
func TestConnectionPoolerSynchronization(t *testing.T) {
|
||||
testName := "Test connection pooler synchronization"
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -60,12 +60,12 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPool: config.ConnectionPool{
|
||||
ConnPoolDefaultCPURequest: "100m",
|
||||
ConnPoolDefaultCPULimit: "100m",
|
||||
ConnPoolDefaultMemoryRequest: "100Mi",
|
||||
ConnPoolDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
|
@ -84,15 +84,15 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
|
||||
clusterDirtyMock := *cluster
|
||||
clusterDirtyMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||
clusterDirtyMock.ConnectionPool = &ConnectionPoolObjects{
|
||||
clusterDirtyMock.ConnectionPooler = &ConnectionPoolerObjects{
|
||||
Deployment: &appsv1.Deployment{},
|
||||
Service: &v1.Service{},
|
||||
}
|
||||
|
||||
clusterNewDefaultsMock := *cluster
|
||||
clusterNewDefaultsMock.KubeClient = k8sutil.NewMockKubernetesClient()
|
||||
cluster.OpConfig.ConnectionPool.Image = "pooler:2.0"
|
||||
cluster.OpConfig.ConnectionPool.NumberOfInstances = int32ToPointer(2)
|
||||
cluster.OpConfig.ConnectionPooler.Image = "pooler:2.0"
|
||||
cluster.OpConfig.ConnectionPooler.NumberOfInstances = int32ToPointer(2)
|
||||
|
||||
tests := []struct {
|
||||
subTest string
|
||||
|
|
@ -105,12 +105,12 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
subTest: "create if doesn't exist",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: &clusterMissingObjects,
|
||||
|
|
@ -123,7 +123,7 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
EnableConnectionPool: boolToPointer(true),
|
||||
EnableConnectionPooler: boolToPointer(true),
|
||||
},
|
||||
},
|
||||
cluster: &clusterMissingObjects,
|
||||
|
|
@ -136,7 +136,7 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: &clusterMissingObjects,
|
||||
|
|
@ -146,7 +146,7 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
subTest: "delete if not needed",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
|
|
@ -170,14 +170,14 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
subTest: "update deployment",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{
|
||||
NumberOfInstances: int32ToPointer(2),
|
||||
},
|
||||
},
|
||||
|
|
@ -189,12 +189,12 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
subTest: "update image from changed defaults",
|
||||
oldSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
newSpec: &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPool: &acidv1.ConnectionPool{},
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
},
|
||||
cluster: &clusterNewDefaultsMock,
|
||||
|
|
@ -202,7 +202,7 @@ func TestConnPoolSynchronization(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
err := tt.cluster.syncConnectionPool(tt.oldSpec, tt.newSpec, mockInstallLookupFunction)
|
||||
err := tt.cluster.syncConnectionPooler(tt.oldSpec, tt.newSpec, mockInstallLookupFunction)
|
||||
|
||||
if err := tt.check(tt.cluster, err); err != nil {
|
||||
t.Errorf("%s [%s]: Could not synchronize, %+v",
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package cluster
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
|
@ -47,7 +48,7 @@ func (g *SecretOauthTokenGetter) getOAuthToken() (string, error) {
|
|||
// Temporary getting postgresql-operator secret from the NamespaceDefault
|
||||
credentialsSecret, err := g.kubeClient.
|
||||
Secrets(g.OAuthTokenSecretName.Namespace).
|
||||
Get(g.OAuthTokenSecretName.Name, metav1.GetOptions{})
|
||||
Get(context.TODO(), g.OAuthTokenSecretName.Name, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get credentials secret: %v", err)
|
||||
|
|
@ -278,7 +279,7 @@ func (c *Cluster) waitStatefulsetReady() error {
|
|||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: c.labelsSet(false).String(),
|
||||
}
|
||||
ss, err := c.KubeClient.StatefulSets(c.Namespace).List(listOptions)
|
||||
ss, err := c.KubeClient.StatefulSets(c.Namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -313,7 +314,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
|
|||
}
|
||||
podsNumber = 1
|
||||
if !anyReplica {
|
||||
pods, err := c.KubeClient.Pods(namespace).List(listOptions)
|
||||
pods, err := c.KubeClient.Pods(namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -327,7 +328,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
|
|||
func() (bool, error) {
|
||||
masterCount := 0
|
||||
if !anyReplica {
|
||||
masterPods, err2 := c.KubeClient.Pods(namespace).List(masterListOption)
|
||||
masterPods, err2 := c.KubeClient.Pods(namespace).List(context.TODO(), masterListOption)
|
||||
if err2 != nil {
|
||||
return false, err2
|
||||
}
|
||||
|
|
@ -337,7 +338,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
|
|||
}
|
||||
masterCount = len(masterPods.Items)
|
||||
}
|
||||
replicaPods, err2 := c.KubeClient.Pods(namespace).List(replicaListOption)
|
||||
replicaPods, err2 := c.KubeClient.Pods(namespace).List(context.TODO(), replicaListOption)
|
||||
if err2 != nil {
|
||||
return false, err2
|
||||
}
|
||||
|
|
@ -414,24 +415,24 @@ func (c *Cluster) labelsSelector() *metav1.LabelSelector {
|
|||
}
|
||||
}
|
||||
|
||||
// Return connection pool labels selector, which should from one point of view
|
||||
// Return connection pooler labels selector, which should from one point of view
|
||||
// inherit most of the labels from the cluster itself, but at the same time
|
||||
// have e.g. different `application` label, so that recreatePod operation will
|
||||
// not interfere with it (it lists all the pods via labels, and if there would
|
||||
// be no difference, it will recreate also pooler pods).
|
||||
func (c *Cluster) connPoolLabelsSelector() *metav1.LabelSelector {
|
||||
connPoolLabels := labels.Set(map[string]string{})
|
||||
func (c *Cluster) connectionPoolerLabelsSelector() *metav1.LabelSelector {
|
||||
connectionPoolerLabels := labels.Set(map[string]string{})
|
||||
|
||||
extraLabels := labels.Set(map[string]string{
|
||||
"connection-pool": c.connPoolName(),
|
||||
"application": "db-connection-pool",
|
||||
"connection-pooler": c.connectionPoolerName(),
|
||||
"application": "db-connection-pooler",
|
||||
})
|
||||
|
||||
connPoolLabels = labels.Merge(connPoolLabels, c.labelsSet(false))
|
||||
connPoolLabels = labels.Merge(connPoolLabels, extraLabels)
|
||||
connectionPoolerLabels = labels.Merge(connectionPoolerLabels, c.labelsSet(false))
|
||||
connectionPoolerLabels = labels.Merge(connectionPoolerLabels, extraLabels)
|
||||
|
||||
return &metav1.LabelSelector{
|
||||
MatchLabels: connPoolLabels,
|
||||
MatchLabels: connectionPoolerLabels,
|
||||
MatchExpressions: nil,
|
||||
}
|
||||
}
|
||||
|
|
@ -509,14 +510,23 @@ func (c *Cluster) patroniUsesKubernetes() bool {
|
|||
return c.OpConfig.EtcdHost == ""
|
||||
}
|
||||
|
||||
func (c *Cluster) needConnectionPoolWorker(spec *acidv1.PostgresSpec) bool {
|
||||
if spec.EnableConnectionPool == nil {
|
||||
return spec.ConnectionPool != nil
|
||||
func (c *Cluster) patroniKubernetesUseConfigMaps() bool {
|
||||
if !c.patroniUsesKubernetes() {
|
||||
return false
|
||||
}
|
||||
|
||||
// otherwise, follow the operator configuration
|
||||
return c.OpConfig.KubernetesUseConfigMaps
|
||||
}
|
||||
|
||||
func (c *Cluster) needConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
|
||||
if spec.EnableConnectionPooler == nil {
|
||||
return spec.ConnectionPooler != nil
|
||||
} else {
|
||||
return *spec.EnableConnectionPool
|
||||
return *spec.EnableConnectionPooler
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) needConnectionPool() bool {
|
||||
return c.needConnectionPoolWorker(&c.Spec)
|
||||
func (c *Cluster) needConnectionPooler() bool {
|
||||
return c.needConnectionPoolerWorker(&c.Spec)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
|
|
@ -23,7 +24,7 @@ func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, erro
|
|||
LabelSelector: c.labelsSet(false).String(),
|
||||
}
|
||||
|
||||
pvcs, err := c.KubeClient.PersistentVolumeClaims(ns).List(listOptions)
|
||||
pvcs, err := c.KubeClient.PersistentVolumeClaims(ns).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not list of PersistentVolumeClaims: %v", err)
|
||||
}
|
||||
|
|
@ -38,7 +39,7 @@ func (c *Cluster) deletePersistentVolumeClaims() error {
|
|||
}
|
||||
for _, pvc := range pvcs {
|
||||
c.logger.Debugf("deleting PVC %q", util.NameFromMeta(pvc.ObjectMeta))
|
||||
if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, c.deleteOptions); err != nil {
|
||||
if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, c.deleteOptions); err != nil {
|
||||
c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -78,7 +79,7 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) {
|
|||
continue
|
||||
}
|
||||
}
|
||||
pv, err := c.KubeClient.PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
pv, err := c.KubeClient.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get PersistentVolume: %v", err)
|
||||
}
|
||||
|
|
@ -143,7 +144,7 @@ func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.Volu
|
|||
c.logger.Debugf("filesystem resize successful on volume %q", pv.Name)
|
||||
pv.Spec.Capacity[v1.ResourceStorage] = newQuantity
|
||||
c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name)
|
||||
if _, err := c.KubeClient.PersistentVolumes().Update(pv); err != nil {
|
||||
if _, err := c.KubeClient.PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update persistent volume: %q", err)
|
||||
}
|
||||
c.logger.Debugf("successfully updated persistent volume %q", pv.Name)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
|
@ -99,7 +100,7 @@ func (c *Controller) initOperatorConfig() {
|
|||
|
||||
if c.config.ConfigMapName != (spec.NamespacedName{}) {
|
||||
configMap, err := c.KubeClient.ConfigMaps(c.config.ConfigMapName.Namespace).
|
||||
Get(c.config.ConfigMapName.Name, metav1.GetOptions{})
|
||||
Get(context.TODO(), c.config.ConfigMapName.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
@ -406,7 +407,7 @@ func (c *Controller) getEffectiveNamespace(namespaceFromEnvironment, namespaceFr
|
|||
|
||||
} else {
|
||||
|
||||
if _, err := c.KubeClient.Namespaces().Get(namespace, metav1.GetOptions{}); err != nil {
|
||||
if _, err := c.KubeClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); err != nil {
|
||||
c.logger.Fatalf("Could not find the watched namespace %q", namespace)
|
||||
} else {
|
||||
c.logger.Infof("Listenting to the specific namespace %q", namespace)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
|
@ -22,7 +23,7 @@ func (c *Controller) nodeListFunc(options metav1.ListOptions) (runtime.Object, e
|
|||
TimeoutSeconds: options.TimeoutSeconds,
|
||||
}
|
||||
|
||||
return c.KubeClient.Nodes().List(opts)
|
||||
return c.KubeClient.Nodes().List(context.TODO(), opts)
|
||||
}
|
||||
|
||||
func (c *Controller) nodeWatchFunc(options metav1.ListOptions) (watch.Interface, error) {
|
||||
|
|
@ -32,7 +33,7 @@ func (c *Controller) nodeWatchFunc(options metav1.ListOptions) (watch.Interface,
|
|||
TimeoutSeconds: options.TimeoutSeconds,
|
||||
}
|
||||
|
||||
return c.KubeClient.Nodes().Watch(opts)
|
||||
return c.KubeClient.Nodes().Watch(context.TODO(), opts)
|
||||
}
|
||||
|
||||
func (c *Controller) nodeAdd(obj interface{}) {
|
||||
|
|
@ -87,7 +88,7 @@ func (c *Controller) attemptToMoveMasterPodsOffNode(node *v1.Node) error {
|
|||
opts := metav1.ListOptions{
|
||||
LabelSelector: labels.Set(c.opConfig.ClusterLabels).String(),
|
||||
}
|
||||
podList, err := c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(opts)
|
||||
podList, err := c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not fetch list of the pods: %v", err)
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"time"
|
||||
|
|
@ -14,7 +15,8 @@ import (
|
|||
|
||||
func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*acidv1.OperatorConfiguration, error) {
|
||||
|
||||
config, err := c.KubeClient.AcidV1ClientSet.AcidV1().OperatorConfigurations(configObjectNamespace).Get(configObjectName, metav1.GetOptions{})
|
||||
config, err := c.KubeClient.AcidV1ClientSet.AcidV1().OperatorConfigurations(configObjectNamespace).Get(
|
||||
context.TODO(), configObjectName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get operator configuration object %q: %v", configObjectName, err)
|
||||
}
|
||||
|
|
@ -33,6 +35,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
// general config
|
||||
result.EnableCRDValidation = fromCRD.EnableCRDValidation
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = fromCRD.DockerImage
|
||||
result.Workers = fromCRD.Workers
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
|
|
@ -148,51 +151,51 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit
|
||||
result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit
|
||||
|
||||
// Connection pool. Looks like we can't use defaulting in CRD before 1.17,
|
||||
// Connection pooler. Looks like we can't use defaulting in CRD before 1.17,
|
||||
// so ensure default values here.
|
||||
result.ConnectionPool.NumberOfInstances = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPool.NumberOfInstances,
|
||||
result.ConnectionPooler.NumberOfInstances = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPooler.NumberOfInstances,
|
||||
int32ToPointer(2))
|
||||
|
||||
result.ConnectionPool.NumberOfInstances = util.MaxInt32(
|
||||
result.ConnectionPool.NumberOfInstances,
|
||||
result.ConnectionPooler.NumberOfInstances = util.MaxInt32(
|
||||
result.ConnectionPooler.NumberOfInstances,
|
||||
int32ToPointer(2))
|
||||
|
||||
result.ConnectionPool.Schema = util.Coalesce(
|
||||
fromCRD.ConnectionPool.Schema,
|
||||
constants.ConnectionPoolSchemaName)
|
||||
result.ConnectionPooler.Schema = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.Schema,
|
||||
constants.ConnectionPoolerSchemaName)
|
||||
|
||||
result.ConnectionPool.User = util.Coalesce(
|
||||
fromCRD.ConnectionPool.User,
|
||||
constants.ConnectionPoolUserName)
|
||||
result.ConnectionPooler.User = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.User,
|
||||
constants.ConnectionPoolerUserName)
|
||||
|
||||
result.ConnectionPool.Image = util.Coalesce(
|
||||
fromCRD.ConnectionPool.Image,
|
||||
result.ConnectionPooler.Image = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.Image,
|
||||
"registry.opensource.zalan.do/acid/pgbouncer")
|
||||
|
||||
result.ConnectionPool.Mode = util.Coalesce(
|
||||
fromCRD.ConnectionPool.Mode,
|
||||
constants.ConnectionPoolDefaultMode)
|
||||
result.ConnectionPooler.Mode = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.Mode,
|
||||
constants.ConnectionPoolerDefaultMode)
|
||||
|
||||
result.ConnectionPool.ConnPoolDefaultCPURequest = util.Coalesce(
|
||||
fromCRD.ConnectionPool.DefaultCPURequest,
|
||||
constants.ConnectionPoolDefaultCpuRequest)
|
||||
result.ConnectionPooler.ConnectionPoolerDefaultCPURequest = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.DefaultCPURequest,
|
||||
constants.ConnectionPoolerDefaultCpuRequest)
|
||||
|
||||
result.ConnectionPool.ConnPoolDefaultMemoryRequest = util.Coalesce(
|
||||
fromCRD.ConnectionPool.DefaultMemoryRequest,
|
||||
constants.ConnectionPoolDefaultMemoryRequest)
|
||||
result.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.DefaultMemoryRequest,
|
||||
constants.ConnectionPoolerDefaultMemoryRequest)
|
||||
|
||||
result.ConnectionPool.ConnPoolDefaultCPULimit = util.Coalesce(
|
||||
fromCRD.ConnectionPool.DefaultCPULimit,
|
||||
constants.ConnectionPoolDefaultCpuLimit)
|
||||
result.ConnectionPooler.ConnectionPoolerDefaultCPULimit = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.DefaultCPULimit,
|
||||
constants.ConnectionPoolerDefaultCpuLimit)
|
||||
|
||||
result.ConnectionPool.ConnPoolDefaultMemoryLimit = util.Coalesce(
|
||||
fromCRD.ConnectionPool.DefaultMemoryLimit,
|
||||
constants.ConnectionPoolDefaultMemoryLimit)
|
||||
result.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.DefaultMemoryLimit,
|
||||
constants.ConnectionPoolerDefaultMemoryLimit)
|
||||
|
||||
result.ConnectionPool.MaxDBConnections = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPool.MaxDBConnections,
|
||||
int32ToPointer(constants.ConnPoolMaxDBConnections))
|
||||
result.ConnectionPooler.MaxDBConnections = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPooler.MaxDBConnections,
|
||||
int32ToPointer(constants.ConnectionPoolerMaxDBConnections))
|
||||
|
||||
return result
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
|
@ -19,7 +21,7 @@ func (c *Controller) podListFunc(options metav1.ListOptions) (runtime.Object, er
|
|||
TimeoutSeconds: options.TimeoutSeconds,
|
||||
}
|
||||
|
||||
return c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(opts)
|
||||
return c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(context.TODO(), opts)
|
||||
}
|
||||
|
||||
func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface, error) {
|
||||
|
|
@ -29,7 +31,7 @@ func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface,
|
|||
TimeoutSeconds: options.TimeoutSeconds,
|
||||
}
|
||||
|
||||
return c.KubeClient.Pods(c.opConfig.WatchedNamespace).Watch(opts)
|
||||
return c.KubeClient.Pods(c.opConfig.WatchedNamespace).Watch(context.TODO(), opts)
|
||||
}
|
||||
|
||||
func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event cluster.PodEvent) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
|
@ -43,7 +44,7 @@ func (c *Controller) listClusters(options metav1.ListOptions) (*acidv1.Postgresq
|
|||
var pgList acidv1.PostgresqlList
|
||||
|
||||
// TODO: use the SharedInformer cache instead of quering Kubernetes API directly.
|
||||
list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(options)
|
||||
list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not list postgresql objects: %v", err)
|
||||
}
|
||||
|
|
@ -535,7 +536,7 @@ func (c *Controller) submitRBACCredentials(event ClusterEvent) error {
|
|||
func (c *Controller) createPodServiceAccount(namespace string) error {
|
||||
|
||||
podServiceAccountName := c.opConfig.PodServiceAccountName
|
||||
_, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{})
|
||||
_, err := c.KubeClient.ServiceAccounts(namespace).Get(context.TODO(), podServiceAccountName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
|
||||
c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace))
|
||||
|
|
@ -543,7 +544,7 @@ func (c *Controller) createPodServiceAccount(namespace string) error {
|
|||
// get a separate copy of service account
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
sa := *c.PodServiceAccount
|
||||
if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil {
|
||||
if _, err = c.KubeClient.ServiceAccounts(namespace).Create(context.TODO(), &sa, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("cannot deploy the pod service account %q defined in the configuration to the %q namespace: %v", podServiceAccountName, namespace, err)
|
||||
}
|
||||
|
||||
|
|
@ -560,7 +561,7 @@ func (c *Controller) createRoleBindings(namespace string) error {
|
|||
podServiceAccountName := c.opConfig.PodServiceAccountName
|
||||
podServiceAccountRoleBindingName := c.PodServiceAccountRoleBinding.Name
|
||||
|
||||
_, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{})
|
||||
_, err := c.KubeClient.RoleBindings(namespace).Get(context.TODO(), podServiceAccountRoleBindingName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
|
||||
c.logger.Infof("Creating the role binding %q in the %q namespace", podServiceAccountRoleBindingName, namespace)
|
||||
|
|
@ -568,7 +569,7 @@ func (c *Controller) createRoleBindings(namespace string) error {
|
|||
// get a separate copy of role binding
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
rb := *c.PodServiceAccountRoleBinding
|
||||
_, err = c.KubeClient.RoleBindings(namespace).Create(&rb)
|
||||
_, err = c.KubeClient.RoleBindings(namespace).Create(context.TODO(), &rb, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot bind the pod service account %q defined in the configuration to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
|
|
@ -50,7 +51,7 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 {
|
|||
}
|
||||
|
||||
func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefinition) error {
|
||||
if _, err := c.KubeClient.CustomResourceDefinitions().Create(crd); err != nil {
|
||||
if _, err := c.KubeClient.CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil {
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
||||
|
||||
|
|
@ -58,7 +59,8 @@ func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefiniti
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not marshal new customResourceDefintion: %v", err)
|
||||
}
|
||||
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil {
|
||||
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(
|
||||
context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update customResourceDefinition: %v", err)
|
||||
}
|
||||
} else {
|
||||
|
|
@ -69,7 +71,7 @@ func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefiniti
|
|||
}
|
||||
|
||||
return wait.Poll(c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, func() (bool, error) {
|
||||
c, err := c.KubeClient.CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{})
|
||||
c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
|
@ -115,7 +117,7 @@ func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (m
|
|||
|
||||
infraRolesSecret, err := c.KubeClient.
|
||||
Secrets(rolesSecret.Namespace).
|
||||
Get(rolesSecret.Name, metav1.GetOptions{})
|
||||
Get(context.TODO(), rolesSecret.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
c.logger.Debugf("infrastructure roles secret name: %q", *rolesSecret)
|
||||
return nil, fmt.Errorf("could not get infrastructure roles secret: %v", err)
|
||||
|
|
@ -161,7 +163,8 @@ Users:
|
|||
}
|
||||
|
||||
// perhaps we have some map entries with usernames, passwords, let's check if we have those users in the configmap
|
||||
if infraRolesMap, err := c.KubeClient.ConfigMaps(rolesSecret.Namespace).Get(rolesSecret.Name, metav1.GetOptions{}); err == nil {
|
||||
if infraRolesMap, err := c.KubeClient.ConfigMaps(rolesSecret.Namespace).Get(
|
||||
context.TODO(), rolesSecret.Name, metav1.GetOptions{}); err == nil {
|
||||
// we have a configmap with username - json description, let's read and decode it
|
||||
for role, s := range infraRolesMap.Data {
|
||||
roleDescr, err := readDecodedRole(s)
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
|||
configShallowCopy := *c
|
||||
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
|
||||
if configShallowCopy.Burst <= 0 {
|
||||
return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
|
||||
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
|
||||
}
|
||||
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ SOFTWARE.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
|
@ -42,7 +44,7 @@ var operatorconfigurationsResource = schema.GroupVersionResource{Group: "acid.za
|
|||
var operatorconfigurationsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "OperatorConfiguration"}
|
||||
|
||||
// Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any.
|
||||
func (c *FakeOperatorConfigurations) Get(name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) {
|
||||
func (c *FakeOperatorConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(operatorconfigurationsResource, c.ns, name), &acidzalandov1.OperatorConfiguration{})
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ SOFTWARE.
|
|||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
|
|
@ -45,7 +47,7 @@ var postgresqlsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Ve
|
|||
var postgresqlsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "Postgresql"}
|
||||
|
||||
// Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any.
|
||||
func (c *FakePostgresqls) Get(name string, options v1.GetOptions) (result *acidzalandov1.Postgresql, err error) {
|
||||
func (c *FakePostgresqls) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.Postgresql, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{})
|
||||
|
||||
|
|
@ -56,7 +58,7 @@ func (c *FakePostgresqls) Get(name string, options v1.GetOptions) (result *acidz
|
|||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Postgresqls that match those selectors.
|
||||
func (c *FakePostgresqls) List(opts v1.ListOptions) (result *acidzalandov1.PostgresqlList, err error) {
|
||||
func (c *FakePostgresqls) List(ctx context.Context, opts v1.ListOptions) (result *acidzalandov1.PostgresqlList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(postgresqlsResource, postgresqlsKind, c.ns, opts), &acidzalandov1.PostgresqlList{})
|
||||
|
||||
|
|
@ -78,14 +80,14 @@ func (c *FakePostgresqls) List(opts v1.ListOptions) (result *acidzalandov1.Postg
|
|||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested postgresqls.
|
||||
func (c *FakePostgresqls) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
func (c *FakePostgresqls) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(postgresqlsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
||||
func (c *FakePostgresqls) Create(postgresql *acidzalandov1.Postgresql) (result *acidzalandov1.Postgresql, err error) {
|
||||
func (c *FakePostgresqls) Create(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.CreateOptions) (result *acidzalandov1.Postgresql, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{})
|
||||
|
||||
|
|
@ -96,7 +98,7 @@ func (c *FakePostgresqls) Create(postgresql *acidzalandov1.Postgresql) (result *
|
|||
}
|
||||
|
||||
// Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
||||
func (c *FakePostgresqls) Update(postgresql *acidzalandov1.Postgresql) (result *acidzalandov1.Postgresql, err error) {
|
||||
func (c *FakePostgresqls) Update(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.UpdateOptions) (result *acidzalandov1.Postgresql, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{})
|
||||
|
||||
|
|
@ -108,7 +110,7 @@ func (c *FakePostgresqls) Update(postgresql *acidzalandov1.Postgresql) (result *
|
|||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakePostgresqls) UpdateStatus(postgresql *acidzalandov1.Postgresql) (*acidzalandov1.Postgresql, error) {
|
||||
func (c *FakePostgresqls) UpdateStatus(ctx context.Context, postgresql *acidzalandov1.Postgresql, opts v1.UpdateOptions) (*acidzalandov1.Postgresql, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(postgresqlsResource, "status", c.ns, postgresql), &acidzalandov1.Postgresql{})
|
||||
|
||||
|
|
@ -119,7 +121,7 @@ func (c *FakePostgresqls) UpdateStatus(postgresql *acidzalandov1.Postgresql) (*a
|
|||
}
|
||||
|
||||
// Delete takes name of the postgresql and deletes it. Returns an error if one occurs.
|
||||
func (c *FakePostgresqls) Delete(name string, options *v1.DeleteOptions) error {
|
||||
func (c *FakePostgresqls) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{})
|
||||
|
||||
|
|
@ -127,15 +129,15 @@ func (c *FakePostgresqls) Delete(name string, options *v1.DeleteOptions) error {
|
|||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakePostgresqls) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(postgresqlsResource, c.ns, listOptions)
|
||||
func (c *FakePostgresqls) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(postgresqlsResource, c.ns, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &acidzalandov1.PostgresqlList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched postgresql.
|
||||
func (c *FakePostgresqls) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *acidzalandov1.Postgresql, err error) {
|
||||
func (c *FakePostgresqls) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *acidzalandov1.Postgresql, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(postgresqlsResource, c.ns, name, pt, data, subresources...), &acidzalandov1.Postgresql{})
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ SOFTWARE.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -39,7 +41,7 @@ type OperatorConfigurationsGetter interface {
|
|||
|
||||
// OperatorConfigurationInterface has methods to work with OperatorConfiguration resources.
|
||||
type OperatorConfigurationInterface interface {
|
||||
Get(name string, options v1.GetOptions) (*acidzalandov1.OperatorConfiguration, error)
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*acidzalandov1.OperatorConfiguration, error)
|
||||
OperatorConfigurationExpansion
|
||||
}
|
||||
|
||||
|
|
@ -58,14 +60,14 @@ func newOperatorConfigurations(c *AcidV1Client, namespace string) *operatorConfi
|
|||
}
|
||||
|
||||
// Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any.
|
||||
func (c *operatorConfigurations) Get(name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) {
|
||||
func (c *operatorConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) {
|
||||
result = &acidzalandov1.OperatorConfiguration{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("operatorconfigurations").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ SOFTWARE.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
|
|
@ -43,15 +44,15 @@ type PostgresqlsGetter interface {
|
|||
|
||||
// PostgresqlInterface has methods to work with Postgresql resources.
|
||||
type PostgresqlInterface interface {
|
||||
Create(*v1.Postgresql) (*v1.Postgresql, error)
|
||||
Update(*v1.Postgresql) (*v1.Postgresql, error)
|
||||
UpdateStatus(*v1.Postgresql) (*v1.Postgresql, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
|
||||
Get(name string, options metav1.GetOptions) (*v1.Postgresql, error)
|
||||
List(opts metav1.ListOptions) (*v1.PostgresqlList, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Postgresql, err error)
|
||||
Create(ctx context.Context, postgresql *v1.Postgresql, opts metav1.CreateOptions) (*v1.Postgresql, error)
|
||||
Update(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (*v1.Postgresql, error)
|
||||
UpdateStatus(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (*v1.Postgresql, error)
|
||||
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Postgresql, error)
|
||||
List(ctx context.Context, opts metav1.ListOptions) (*v1.PostgresqlList, error)
|
||||
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Postgresql, err error)
|
||||
PostgresqlExpansion
|
||||
}
|
||||
|
||||
|
|
@ -70,20 +71,20 @@ func newPostgresqls(c *AcidV1Client, namespace string) *postgresqls {
|
|||
}
|
||||
|
||||
// Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any.
|
||||
func (c *postgresqls) Get(name string, options metav1.GetOptions) (result *v1.Postgresql, err error) {
|
||||
func (c *postgresqls) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Postgresqls that match those selectors.
|
||||
func (c *postgresqls) List(opts metav1.ListOptions) (result *v1.PostgresqlList, err error) {
|
||||
func (c *postgresqls) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PostgresqlList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
|
|
@ -94,13 +95,13 @@ func (c *postgresqls) List(opts metav1.ListOptions) (result *v1.PostgresqlList,
|
|||
Resource("postgresqls").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested postgresqls.
|
||||
func (c *postgresqls) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
func (c *postgresqls) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
|
|
@ -111,87 +112,90 @@ func (c *postgresqls) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
|||
Resource("postgresqls").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
||||
func (c *postgresqls) Create(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) {
|
||||
func (c *postgresqls) Create(ctx context.Context, postgresql *v1.Postgresql, opts metav1.CreateOptions) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(postgresql).
|
||||
Do().
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
||||
func (c *postgresqls) Update(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) {
|
||||
func (c *postgresqls) Update(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Name(postgresql.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(postgresql).
|
||||
Do().
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *postgresqls) UpdateStatus(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) {
|
||||
func (c *postgresqls) UpdateStatus(ctx context.Context, postgresql *v1.Postgresql, opts metav1.UpdateOptions) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Name(postgresql.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(postgresql).
|
||||
Do().
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the postgresql and deletes it. Returns an error if one occurs.
|
||||
func (c *postgresqls) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
func (c *postgresqls) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *postgresqls) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
func (c *postgresqls) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched postgresql.
|
||||
func (c *postgresqls) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Postgresql, err error) {
|
||||
func (c *postgresqls) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do().
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ SOFTWARE.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
time "time"
|
||||
|
||||
acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
|
|
@ -67,13 +68,13 @@ func NewFilteredPostgresqlInformer(client versioned.Interface, namespace string,
|
|||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AcidV1().Postgresqls(namespace).List(options)
|
||||
return client.AcidV1().Postgresqls(namespace).List(context.TODO(), options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AcidV1().Postgresqls(namespace).Watch(options)
|
||||
return client.AcidV1().Postgresqls(namespace).Watch(context.TODO(), options)
|
||||
},
|
||||
},
|
||||
&acidzalandov1.Postgresql{},
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ const (
|
|||
RoleOriginTeamsAPI
|
||||
RoleOriginSystem
|
||||
RoleOriginBootstrap
|
||||
RoleConnectionPool
|
||||
RoleConnectionPooler
|
||||
)
|
||||
|
||||
type syncUserOperation int
|
||||
|
|
@ -183,8 +183,8 @@ func (r RoleOrigin) String() string {
|
|||
return "system role"
|
||||
case RoleOriginBootstrap:
|
||||
return "bootstrapped role"
|
||||
case RoleConnectionPool:
|
||||
return "connection pool role"
|
||||
case RoleConnectionPooler:
|
||||
return "connection pooler role"
|
||||
default:
|
||||
panic(fmt.Sprintf("bogus role origin value %d", r))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,17 +85,17 @@ type LogicalBackup struct {
|
|||
}
|
||||
|
||||
// Operator options for connection pooler
|
||||
type ConnectionPool struct {
|
||||
NumberOfInstances *int32 `name:"connection_pool_number_of_instances" default:"2"`
|
||||
Schema string `name:"connection_pool_schema" default:"pooler"`
|
||||
User string `name:"connection_pool_user" default:"pooler"`
|
||||
Image string `name:"connection_pool_image" default:"registry.opensource.zalan.do/acid/pgbouncer"`
|
||||
Mode string `name:"connection_pool_mode" default:"transaction"`
|
||||
MaxDBConnections *int32 `name:"connection_pool_max_db_connections" default:"60"`
|
||||
ConnPoolDefaultCPURequest string `name:"connection_pool_default_cpu_request" default:"500m"`
|
||||
ConnPoolDefaultMemoryRequest string `name:"connection_pool_default_memory_request" default:"100Mi"`
|
||||
ConnPoolDefaultCPULimit string `name:"connection_pool_default_cpu_limit" default:"1"`
|
||||
ConnPoolDefaultMemoryLimit string `name:"connection_pool_default_memory_limit" default:"100Mi"`
|
||||
type ConnectionPooler struct {
|
||||
NumberOfInstances *int32 `name:"connection_pooler_number_of_instances" default:"2"`
|
||||
Schema string `name:"connection_pooler_schema" default:"pooler"`
|
||||
User string `name:"connection_pooler_user" default:"pooler"`
|
||||
Image string `name:"connection_pooler_image" default:"registry.opensource.zalan.do/acid/pgbouncer"`
|
||||
Mode string `name:"connection_pooler_mode" default:"transaction"`
|
||||
MaxDBConnections *int32 `name:"connection_pooler_max_db_connections" default:"60"`
|
||||
ConnectionPoolerDefaultCPURequest string `name:"connection_pooler_default_cpu_request" default:"500m"`
|
||||
ConnectionPoolerDefaultMemoryRequest string `name:"connection_pooler_default_memory_request" default:"100Mi"`
|
||||
ConnectionPoolerDefaultCPULimit string `name:"connection_pooler_default_cpu_limit" default:"1"`
|
||||
ConnectionPoolerDefaultMemoryLimit string `name:"connection_pooler_default_memory_limit" default:"100Mi"`
|
||||
}
|
||||
|
||||
// Config describes operator config
|
||||
|
|
@ -105,13 +105,14 @@ type Config struct {
|
|||
Auth
|
||||
Scalyr
|
||||
LogicalBackup
|
||||
ConnectionPool
|
||||
ConnectionPooler
|
||||
|
||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"`
|
||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p2"`
|
||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
|
||||
PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""`
|
||||
|
|
@ -213,9 +214,9 @@ func validate(cfg *Config) (err error) {
|
|||
err = fmt.Errorf("number of workers should be higher than 0")
|
||||
}
|
||||
|
||||
if *cfg.ConnectionPool.NumberOfInstances < constants.ConnPoolMinInstances {
|
||||
msg := "number of connection pool instances should be higher than %d"
|
||||
err = fmt.Errorf(msg, constants.ConnPoolMinInstances)
|
||||
if *cfg.ConnectionPooler.NumberOfInstances < constants.ConnectionPoolerMinInstances {
|
||||
msg := "number of connection pooler instances should be higher than %d"
|
||||
err = fmt.Errorf(msg, constants.ConnectionPoolerMinInstances)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,18 +1,18 @@
|
|||
package constants
|
||||
|
||||
// Connection pool specific constants
|
||||
// Connection pooler specific constants
|
||||
const (
|
||||
ConnectionPoolUserName = "pooler"
|
||||
ConnectionPoolSchemaName = "pooler"
|
||||
ConnectionPoolDefaultType = "pgbouncer"
|
||||
ConnectionPoolDefaultMode = "transaction"
|
||||
ConnectionPoolDefaultCpuRequest = "500m"
|
||||
ConnectionPoolDefaultCpuLimit = "1"
|
||||
ConnectionPoolDefaultMemoryRequest = "100Mi"
|
||||
ConnectionPoolDefaultMemoryLimit = "100Mi"
|
||||
ConnectionPoolerUserName = "pooler"
|
||||
ConnectionPoolerSchemaName = "pooler"
|
||||
ConnectionPoolerDefaultType = "pgbouncer"
|
||||
ConnectionPoolerDefaultMode = "transaction"
|
||||
ConnectionPoolerDefaultCpuRequest = "500m"
|
||||
ConnectionPoolerDefaultCpuLimit = "1"
|
||||
ConnectionPoolerDefaultMemoryRequest = "100Mi"
|
||||
ConnectionPoolerDefaultMemoryLimit = "100Mi"
|
||||
|
||||
ConnPoolContainer = 0
|
||||
ConnPoolMaxDBConnections = 60
|
||||
ConnPoolMaxClientConnections = 10000
|
||||
ConnPoolMinInstances = 2
|
||||
ConnectionPoolerContainer = 0
|
||||
ConnectionPoolerMaxDBConnections = 60
|
||||
ConnectionPoolerMaxClientConnections = 10000
|
||||
ConnectionPoolerMinInstances = 2
|
||||
)
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ package constants
|
|||
const (
|
||||
PasswordLength = 64
|
||||
SuperuserKeyName = "superuser"
|
||||
ConnectionPoolUserKeyName = "pooler"
|
||||
ConnectionPoolerUserKeyName = "pooler"
|
||||
ReplicationUserKeyName = "replication"
|
||||
RoleFlagSuperuser = "SUPERUSER"
|
||||
RoleFlagInherit = "INHERIT"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package k8sutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
|
|
@ -237,7 +238,7 @@ func SameLogicalBackupJob(cur, new *batchv1beta1.CronJob) (match bool, reason st
|
|||
return true, ""
|
||||
}
|
||||
|
||||
func (c *mockSecret) Get(name string, options metav1.GetOptions) (*v1.Secret, error) {
|
||||
func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) {
|
||||
if name != "infrastructureroles-test" {
|
||||
return nil, fmt.Errorf("NotFound")
|
||||
}
|
||||
|
|
@ -253,7 +254,7 @@ func (c *mockSecret) Get(name string, options metav1.GetOptions) (*v1.Secret, er
|
|||
|
||||
}
|
||||
|
||||
func (c *mockConfigMap) Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error) {
|
||||
func (c *mockConfigMap) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.ConfigMap, error) {
|
||||
if name != "infrastructureroles-test" {
|
||||
return nil, fmt.Errorf("NotFound")
|
||||
}
|
||||
|
|
@ -283,7 +284,7 @@ func (mock *MockDeploymentNotExistGetter) Deployments(namespace string) appsv1.D
|
|||
return &mockDeploymentNotExist{}
|
||||
}
|
||||
|
||||
func (mock *mockDeployment) Create(*apiappsv1.Deployment) (*apiappsv1.Deployment, error) {
|
||||
func (mock *mockDeployment) Create(context.Context, *apiappsv1.Deployment, metav1.CreateOptions) (*apiappsv1.Deployment, error) {
|
||||
return &apiappsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
|
|
@ -294,11 +295,11 @@ func (mock *mockDeployment) Create(*apiappsv1.Deployment) (*apiappsv1.Deployment
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (mock *mockDeployment) Delete(name string, opts *metav1.DeleteOptions) error {
|
||||
func (mock *mockDeployment) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockDeployment) Get(name string, opts metav1.GetOptions) (*apiappsv1.Deployment, error) {
|
||||
func (mock *mockDeployment) Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiappsv1.Deployment, error) {
|
||||
return &apiappsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
|
|
@ -318,7 +319,7 @@ func (mock *mockDeployment) Get(name string, opts metav1.GetOptions) (*apiappsv1
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (mock *mockDeployment) Patch(name string, t types.PatchType, data []byte, subres ...string) (*apiappsv1.Deployment, error) {
|
||||
func (mock *mockDeployment) Patch(ctx context.Context, name string, t types.PatchType, data []byte, opts metav1.PatchOptions, subres ...string) (*apiappsv1.Deployment, error) {
|
||||
return &apiappsv1.Deployment{
|
||||
Spec: apiappsv1.DeploymentSpec{
|
||||
Replicas: Int32ToPointer(2),
|
||||
|
|
@ -329,7 +330,7 @@ func (mock *mockDeployment) Patch(name string, t types.PatchType, data []byte, s
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (mock *mockDeploymentNotExist) Get(name string, opts metav1.GetOptions) (*apiappsv1.Deployment, error) {
|
||||
func (mock *mockDeploymentNotExist) Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiappsv1.Deployment, error) {
|
||||
return nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonNotFound,
|
||||
|
|
@ -337,7 +338,7 @@ func (mock *mockDeploymentNotExist) Get(name string, opts metav1.GetOptions) (*a
|
|||
}
|
||||
}
|
||||
|
||||
func (mock *mockDeploymentNotExist) Create(*apiappsv1.Deployment) (*apiappsv1.Deployment, error) {
|
||||
func (mock *mockDeploymentNotExist) Create(context.Context, *apiappsv1.Deployment, metav1.CreateOptions) (*apiappsv1.Deployment, error) {
|
||||
return &apiappsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
|
|
@ -356,7 +357,7 @@ func (mock *MockServiceNotExistGetter) Services(namespace string) corev1.Service
|
|||
return &mockServiceNotExist{}
|
||||
}
|
||||
|
||||
func (mock *mockService) Create(*v1.Service) (*v1.Service, error) {
|
||||
func (mock *mockService) Create(context.Context, *v1.Service, metav1.CreateOptions) (*v1.Service, error) {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
|
|
@ -364,11 +365,11 @@ func (mock *mockService) Create(*v1.Service) (*v1.Service, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (mock *mockService) Delete(name string, opts *metav1.DeleteOptions) error {
|
||||
func (mock *mockService) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mock *mockService) Get(name string, opts metav1.GetOptions) (*v1.Service, error) {
|
||||
func (mock *mockService) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
|
|
@ -376,7 +377,7 @@ func (mock *mockService) Get(name string, opts metav1.GetOptions) (*v1.Service,
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (mock *mockServiceNotExist) Create(*v1.Service) (*v1.Service, error) {
|
||||
func (mock *mockServiceNotExist) Create(context.Context, *v1.Service, metav1.CreateOptions) (*v1.Service, error) {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-service",
|
||||
|
|
@ -384,7 +385,7 @@ func (mock *mockServiceNotExist) Create(*v1.Service) (*v1.Service, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (mock *mockServiceNotExist) Get(name string, opts metav1.GetOptions) (*v1.Service, error) {
|
||||
func (mock *mockServiceNotExist) Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error) {
|
||||
return nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonNotFound,
|
||||
|
|
|
|||
|
|
@ -133,11 +133,11 @@ var requestsURLtc = []struct {
|
|||
}{
|
||||
{
|
||||
"coffee://localhost/",
|
||||
fmt.Errorf(`Get coffee://localhost/teams/acid: unsupported protocol scheme "coffee"`),
|
||||
fmt.Errorf(`Get "coffee://localhost/teams/acid": unsupported protocol scheme "coffee"`),
|
||||
},
|
||||
{
|
||||
"http://192.168.0.%31/",
|
||||
fmt.Errorf(`parse http://192.168.0.%%31/teams/acid: invalid URL escape "%%31"`),
|
||||
fmt.Errorf(`parse "http://192.168.0.%%31/teams/acid": invalid URL escape "%%31"`),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue