Merge branch 'master' into stand
This commit is contained in:
commit
cc10c3ea27
|
|
@ -23,6 +23,9 @@ metadata:
|
|||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.ingressClassName }}
|
||||
ingressClassName: {{ .Values.ingress.ingressClassName }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
|
|
|
|||
|
|
@ -93,6 +93,7 @@ ingress:
|
|||
{}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
ingressClassName: ""
|
||||
hosts:
|
||||
- host: ui.example.org
|
||||
paths: [""]
|
||||
|
|
|
|||
|
|
@ -130,6 +130,11 @@ spec:
|
|||
users:
|
||||
type: object
|
||||
properties:
|
||||
additional_owner_roles:
|
||||
type: array
|
||||
nullable: true
|
||||
items:
|
||||
type: string
|
||||
enable_password_rotation:
|
||||
type: boolean
|
||||
default: false
|
||||
|
|
@ -207,6 +212,10 @@ spec:
|
|||
enable_sidecars:
|
||||
type: boolean
|
||||
default: true
|
||||
ignored_annotations:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
infrastructure_roles_secret_name:
|
||||
type: string
|
||||
infrastructure_roles_secrets:
|
||||
|
|
@ -349,6 +358,12 @@ spec:
|
|||
timeouts:
|
||||
type: object
|
||||
properties:
|
||||
patroni_api_check_interval:
|
||||
type: string
|
||||
default: "1s"
|
||||
patroni_api_check_timeout:
|
||||
type: string
|
||||
default: "5s"
|
||||
pod_label_wait_timeout:
|
||||
type: string
|
||||
default: "10m"
|
||||
|
|
@ -380,9 +395,15 @@ spec:
|
|||
enable_master_load_balancer:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_master_pooler_load_balancer:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_replica_load_balancer:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_replica_pooler_load_balancer:
|
||||
type: boolean
|
||||
default: false
|
||||
external_traffic_policy:
|
||||
type: string
|
||||
enum:
|
||||
|
|
@ -450,6 +471,8 @@ spec:
|
|||
type: string
|
||||
logical_backup_s3_sse:
|
||||
type: string
|
||||
logical_backup_s3_retention_time:
|
||||
type: string
|
||||
logical_backup_schedule:
|
||||
type: string
|
||||
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
|
||||
|
|
@ -500,6 +523,7 @@ spec:
|
|||
type: string
|
||||
default:
|
||||
- admin
|
||||
- cron_admin
|
||||
role_deletion_suffix:
|
||||
type: string
|
||||
default: "_deleted"
|
||||
|
|
|
|||
|
|
@ -150,15 +150,9 @@ spec:
|
|||
minimum: 1
|
||||
resources:
|
||||
type: object
|
||||
required:
|
||||
- requests
|
||||
- limits
|
||||
properties:
|
||||
limits:
|
||||
type: object
|
||||
required:
|
||||
- cpu
|
||||
- memory
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
|
|
@ -168,9 +162,6 @@ spec:
|
|||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
requests:
|
||||
type: object
|
||||
required:
|
||||
- cpu
|
||||
- memory
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
|
|
@ -197,8 +188,12 @@ spec:
|
|||
type: boolean
|
||||
enableMasterLoadBalancer:
|
||||
type: boolean
|
||||
enableMasterPoolerLoadBalancer:
|
||||
type: boolean
|
||||
enableReplicaLoadBalancer:
|
||||
type: boolean
|
||||
enableReplicaPoolerLoadBalancer:
|
||||
type: boolean
|
||||
enableShmVolume:
|
||||
type: boolean
|
||||
init_containers:
|
||||
|
|
@ -402,15 +397,9 @@ spec:
|
|||
description: deprecated
|
||||
resources:
|
||||
type: object
|
||||
required:
|
||||
- requests
|
||||
- limits
|
||||
properties:
|
||||
limits:
|
||||
type: object
|
||||
required:
|
||||
- cpu
|
||||
- memory
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
|
|
@ -439,9 +428,6 @@ spec:
|
|||
# than the corresponding limit.
|
||||
requests:
|
||||
type: object
|
||||
required:
|
||||
- cpu
|
||||
- memory
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
|
|
@ -532,10 +518,6 @@ spec:
|
|||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
- effect
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
|
|
|
|||
|
|
@ -59,6 +59,16 @@ configGeneral:
|
|||
|
||||
# parameters describing Postgres users
|
||||
configUsers:
|
||||
# roles to be granted to database owners
|
||||
# additional_owner_roles:
|
||||
# - cron_admin
|
||||
|
||||
# enable password rotation for app users that are not database owners
|
||||
enable_password_rotation: false
|
||||
# rotation interval for updating credentials in K8s secrets of app users
|
||||
password_rotation_interval: 90
|
||||
# retention interval to keep rotation users
|
||||
password_rotation_user_retention: 180
|
||||
# postgres username used for replication between instances
|
||||
replication_username: standby
|
||||
# postgres superuser name to be created by initdb
|
||||
|
|
@ -114,6 +124,11 @@ configKubernetes:
|
|||
enable_pod_disruption_budget: true
|
||||
# enables sidecar containers to run alongside Spilo in the same pod
|
||||
enable_sidecars: true
|
||||
|
||||
# annotations to be ignored when comparing statefulsets, services etc.
|
||||
# ignored_annotations:
|
||||
# - k8s.v1.cni.cncf.io/network-status
|
||||
|
||||
# namespaced name of the secret containing infrastructure roles names and passwords
|
||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||
|
||||
|
|
@ -204,6 +219,10 @@ configPostgresPodResources:
|
|||
|
||||
# timeouts related to some operator actions
|
||||
configTimeouts:
|
||||
# interval between consecutive attempts of operator calling the Patroni API
|
||||
patroni_api_check_interval: 1s
|
||||
# timeout when waiting for successful response from Patroni API
|
||||
patroni_api_check_timeout: 5s
|
||||
# timeout when waiting for the Postgres pods to be deleted
|
||||
pod_deletion_wait_timeout: 10m
|
||||
# timeout when waiting for pod role and cluster labels
|
||||
|
|
@ -228,8 +247,12 @@ configLoadBalancer:
|
|||
|
||||
# toggles service type load balancer pointing to the master pod of the cluster
|
||||
enable_master_load_balancer: false
|
||||
# toggles service type load balancer pointing to the master pooler pod of the cluster
|
||||
enable_master_pooler_load_balancer: false
|
||||
# toggles service type load balancer pointing to the replica pod of the cluster
|
||||
enable_replica_load_balancer: false
|
||||
# toggles service type load balancer pointing to the replica pooler pod of the cluster
|
||||
enable_replica_pooler_load_balancer: false
|
||||
# define external traffic policy for the load balancer
|
||||
external_traffic_policy: "Cluster"
|
||||
# defines the DNS name string template for the master load balancer cluster
|
||||
|
|
@ -310,6 +333,8 @@ configLogicalBackup:
|
|||
logical_backup_s3_secret_access_key: ""
|
||||
# S3 server side encryption
|
||||
logical_backup_s3_sse: "AES256"
|
||||
# S3 retention time for stored backups for example "2 week" or "7 days"
|
||||
logical_backup_s3_retention_time: ""
|
||||
# backup schedule in the cron format
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
|
||||
|
|
@ -338,6 +363,7 @@ configTeamsApi:
|
|||
# List of roles that cannot be overwritten by an application, team or infrastructure role
|
||||
protected_role_names:
|
||||
- admin
|
||||
- cron_admin
|
||||
# Suffix to add if members are removed from TeamsAPI or PostgresTeam CRD
|
||||
role_deletion_suffix: "_deleted"
|
||||
# role name to grant to team members created from the Teams API
|
||||
|
|
|
|||
|
|
@ -2,13 +2,14 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/controller"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
|
|
|
|||
|
|
@ -12,9 +12,21 @@ DUMP_SIZE_COEFF=5
|
|||
ERRORCOUNT=0
|
||||
|
||||
TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
|
||||
K8S_API_URL=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1
|
||||
if [ "$KUBERNETES_SERVICE_HOST" != "${KUBERNETES_SERVICE_HOST#*[0-9].[0-9]}" ]; then
|
||||
echo "IPv4"
|
||||
K8S_API_URL=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1
|
||||
elif [ "$KUBERNETES_SERVICE_HOST" != "${KUBERNETES_SERVICE_HOST#*:[0-9a-fA-F]}" ]; then
|
||||
echo "IPv6"
|
||||
K8S_API_URL=https://[$KUBERNETES_SERVICE_HOST]:$KUBERNETES_SERVICE_PORT/api/v1
|
||||
else
|
||||
echo "Unrecognized IP format '$KUBERNETES_SERVICE_HOST'"
|
||||
fi
|
||||
echo "API Endpoint: ${K8S_API_URL}"
|
||||
CERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
|
||||
LOGICAL_BACKUP_PROVIDER=${LOGICAL_BACKUP_PROVIDER:="s3"}
|
||||
LOGICAL_BACKUP_S3_RETENTION_TIME=${LOGICAL_BACKUP_S3_RETENTION_TIME:=""}
|
||||
|
||||
function estimate_size {
|
||||
"$PG_BIN"/psql -tqAc "${ALL_DB_SIZE_QUERY}"
|
||||
}
|
||||
|
|
@ -28,6 +40,57 @@ function compress {
|
|||
pigz
|
||||
}
|
||||
|
||||
function aws_delete_objects {
|
||||
args=(
|
||||
"--bucket=$LOGICAL_BACKUP_S3_BUCKET"
|
||||
)
|
||||
|
||||
[[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT")
|
||||
[[ ! -z "$LOGICAL_BACKUP_S3_REGION" ]] && args+=("--region=$LOGICAL_BACKUP_S3_REGION")
|
||||
|
||||
aws s3api delete-objects "${args[@]}" --delete Objects=["$(printf {Key=%q}, "$@")"],Quiet=true
|
||||
}
|
||||
export -f aws_delete_objects
|
||||
|
||||
function aws_delete_outdated {
|
||||
if [[ -z "$LOGICAL_BACKUP_S3_RETENTION_TIME" ]] ; then
|
||||
echo "no retention time configured: skip cleanup of outdated backups"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# define cutoff date for outdated backups (day precision)
|
||||
cutoff_date=$(date -d "$LOGICAL_BACKUP_S3_RETENTION_TIME ago" +%F)
|
||||
|
||||
# mimic bucket setup from Spilo
|
||||
prefix="spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"
|
||||
|
||||
args=(
|
||||
"--no-paginate"
|
||||
"--output=text"
|
||||
"--prefix=$prefix"
|
||||
"--bucket=$LOGICAL_BACKUP_S3_BUCKET"
|
||||
)
|
||||
|
||||
[[ ! -z "$LOGICAL_BACKUP_S3_ENDPOINT" ]] && args+=("--endpoint-url=$LOGICAL_BACKUP_S3_ENDPOINT")
|
||||
[[ ! -z "$LOGICAL_BACKUP_S3_REGION" ]] && args+=("--region=$LOGICAL_BACKUP_S3_REGION")
|
||||
|
||||
# list objects older than the cutoff date
|
||||
aws s3api list-objects "${args[@]}" --query="Contents[?LastModified<='$cutoff_date'].[Key]" > /tmp/outdated-backups
|
||||
|
||||
# spare the last backup
|
||||
sed -i '$d' /tmp/outdated-backups
|
||||
|
||||
count=$(wc -l < /tmp/outdated-backups)
|
||||
if [[ $count == 0 ]] ; then
|
||||
echo "no outdated backups to delete"
|
||||
return 0
|
||||
fi
|
||||
echo "deleting $count outdated backups created before $cutoff_date"
|
||||
|
||||
# deleted outdated files in batches with 100 at a time
|
||||
tr '\n' '\0' < /tmp/outdated-backups | xargs -0 -P1 -n100 bash -c 'aws_delete_objects "$@"' _
|
||||
}
|
||||
|
||||
function aws_upload {
|
||||
declare -r EXPECTED_SIZE="$1"
|
||||
|
||||
|
|
@ -59,6 +122,7 @@ function upload {
|
|||
;;
|
||||
*)
|
||||
aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF))
|
||||
aws_delete_outdated
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
|
|
|||
|
|
@ -750,6 +750,11 @@ lead to K8s removing this field from the manifest due to its
|
|||
Then the resultant manifest will not contain the necessary change, and the
|
||||
operator will respectively do nothing with the existing source ranges.
|
||||
|
||||
Load balancer services can also be enabled for the [connection pooler](user.md#connection-pooler)
|
||||
pods with manifest flags `enableMasterPoolerLoadBalancer` and/or
|
||||
`enableReplicaPoolerLoadBalancer` or in the operator configuration with
|
||||
`enable_master_pooler_load_balancer` and/or `enable_replica_pooler_load_balancer`.
|
||||
|
||||
## Running periodic 'autorepair' scans of K8s objects
|
||||
|
||||
The Postgres Operator periodically scans all K8s objects belonging to each
|
||||
|
|
|
|||
|
|
@ -91,11 +91,23 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
`enable_master_load_balancer` parameter) to define whether to enable the load
|
||||
balancer pointing to the Postgres primary. Optional.
|
||||
|
||||
* **enableMasterPoolerLoadBalancer**
|
||||
boolean flag to override the operator defaults (set by the
|
||||
`enable_master_pooler_load_balancer` parameter) to define whether to enable
|
||||
the load balancer for master pooler pods pointing to the Postgres primary.
|
||||
Optional.
|
||||
|
||||
* **enableReplicaLoadBalancer**
|
||||
boolean flag to override the operator defaults (set by the
|
||||
`enable_replica_load_balancer` parameter) to define whether to enable the
|
||||
load balancer pointing to the Postgres standby instances. Optional.
|
||||
|
||||
* **enableReplicaPoolerLoadBalancer**
|
||||
boolean flag to override the operator defaults (set by the
|
||||
`enable_replica_pooler_load_balancer` parameter) to define whether to enable
|
||||
the load balancer for replica pooler pods pointing to the Postgres standby
|
||||
instances. Optional.
|
||||
|
||||
* **allowedSourceRanges**
|
||||
when one or more load balancers are enabled for the cluster, this parameter
|
||||
defines the comma-separated range of IP networks (in CIDR-notation). The
|
||||
|
|
@ -310,9 +322,7 @@ explanation of `ttl` and `loop_wait` parameters.
|
|||
|
||||
Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
|
||||
for the Postgres container. They are grouped under the `resources` top-level
|
||||
key with subgroups `requests` and `limits`. The whole section is optional,
|
||||
however if you specify a request or limit you have to define everything
|
||||
(unless you are not modifying the default CRD schema validation).
|
||||
key with subgroups `requests` and `limits`.
|
||||
|
||||
### Requests
|
||||
|
||||
|
|
|
|||
|
|
@ -159,9 +159,9 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
at the cost of overprovisioning memory and potential scheduling problems for
|
||||
containers with high memory limits due to the lack of memory on Kubernetes
|
||||
cluster nodes. This affects all containers created by the operator (Postgres,
|
||||
Scalyr sidecar, and other sidecars except **sidecars** defined in the operator
|
||||
configuration); to set resources for the operator's own container, change the
|
||||
[operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L20).
|
||||
connection pooler, logical backup, scalyr sidecar, and other sidecars except
|
||||
**sidecars** defined in the operator configuration); to set resources for the
|
||||
operator's own container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L20).
|
||||
The default is `false`.
|
||||
|
||||
## Postgres users
|
||||
|
|
@ -177,6 +177,15 @@ under the `users` key.
|
|||
Postgres username used for replication between instances. The default is
|
||||
`standby`.
|
||||
|
||||
* **additional_owner_roles**
|
||||
Specifies database roles that will become members of all database owners.
|
||||
Then owners can use `SET ROLE` to obtain privileges of these roles to e.g.
|
||||
create/update functionality from extensions as part of a migration script.
|
||||
Note, that roles listed here should be preconfigured in the docker image
|
||||
and already exist in the database cluster on startup. One such role can be
|
||||
`cron_admin` which is provided by the Spilo docker image to set up cron
|
||||
jobs inside the `postgres` database. Default is `empty`.
|
||||
|
||||
* **enable_password_rotation**
|
||||
For all `LOGIN` roles that are not database owners the operator can rotate
|
||||
credentials in the corresponding K8s secrets by replacing the username and
|
||||
|
|
@ -278,6 +287,12 @@ configuration they are grouped under the `kubernetes` key.
|
|||
Regular expressions like `downscaler/*` etc. are also accepted. Can be used
|
||||
with [kube-downscaler](https://github.com/hjacobs/kube-downscaler).
|
||||
|
||||
* **ignored_annotations**
|
||||
Some K8s tools inject and update annotations out of the Postgres Operator
|
||||
control. This can cause rolling updates on each cluster sync cycle. With
|
||||
this option you can specify an array of annotation keys that should be
|
||||
ignored when comparing K8s resources on sync. The default is empty.
|
||||
|
||||
* **watched_namespace**
|
||||
The operator watches for Postgres objects in the given namespace. If not
|
||||
specified, the value is taken from the operator namespace. A special `*`
|
||||
|
|
@ -498,6 +513,13 @@ configuration `resource_check_interval` and `resource_check_timeout` have no
|
|||
effect, and the parameters are grouped under the `timeouts` key in the
|
||||
CRD-based configuration.
|
||||
|
||||
* **PatroniAPICheckInterval**
|
||||
the interval between consecutive attempts waiting for the return of
|
||||
Patroni Api. The default is `1s`.
|
||||
|
||||
* **PatroniAPICheckTimeout**
|
||||
the timeout for a response from Patroni Api. The default is `5s`.
|
||||
|
||||
* **resource_check_interval**
|
||||
interval to wait between consecutive attempts to check for the presence of
|
||||
some Kubernetes resource (i.e. `StatefulSet` or `PodDisruptionBudget`). The
|
||||
|
|
@ -546,11 +568,21 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
|
|||
toggles service type load balancer pointing to the master pod of the cluster.
|
||||
Can be overridden by individual cluster settings. The default is `true`.
|
||||
|
||||
* **enable_replica_load_balancer**
|
||||
toggles service type load balancer pointing to the replica pod of the
|
||||
cluster. Can be overridden by individual cluster settings. The default is
|
||||
* **enable_master_pooler_load_balancer**
|
||||
toggles service type load balancer pointing to the master pooler pod of the
|
||||
cluster. Can be overridden by individual cluster settings. The default is
|
||||
`false`.
|
||||
|
||||
* **enable_replica_load_balancer**
|
||||
toggles service type load balancer pointing to the replica pod(s) of the
|
||||
cluster. Can be overridden by individual cluster settings. The default is
|
||||
`false`.
|
||||
|
||||
* **enable_replica_pooler_load_balancer**
|
||||
toggles service type load balancer pointing to the replica pooler pod(s) of
|
||||
the cluster. Can be overridden by individual cluster settings. The default
|
||||
is `false`.
|
||||
|
||||
* **external_traffic_policy** defines external traffic policy for load
|
||||
balancers. Allowed values are `Cluster` (default) and `Local`.
|
||||
|
||||
|
|
@ -676,6 +708,11 @@ grouped under the `logical_backup` key.
|
|||
Specify server side encryption that S3 storage is using. If empty string
|
||||
is specified, no argument will be passed to `aws s3` command. Default: "AES256".
|
||||
|
||||
* **logical_backup_s3_retention_time**
|
||||
Specify a retention time for logical backups stored in S3. Backups older than the specified retention
|
||||
time will be deleted after a new backup was uploaded. If empty, all backups will be kept. Example values are
|
||||
"3 days", "2 weeks", or "1 month". The default is empty.
|
||||
|
||||
* **logical_backup_schedule**
|
||||
Backup schedule in the cron format. Please take the
|
||||
[reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule)
|
||||
|
|
@ -748,7 +785,7 @@ key.
|
|||
|
||||
* **protected_role_names**
|
||||
List of roles that cannot be overwritten by an application, team or
|
||||
infrastructure role. The default is `admin`.
|
||||
infrastructure role. The default list is `admin` and `cron_admin`.
|
||||
|
||||
* **postgres_superuser_teams**
|
||||
List of teams which members need the superuser role in each PG database
|
||||
|
|
|
|||
|
|
@ -158,6 +158,37 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_additional_owner_roles(self):
|
||||
'''
|
||||
Test adding additional member roles to existing database owner roles
|
||||
'''
|
||||
k8s = self.k8s
|
||||
|
||||
# enable PostgresTeam CRD and lower resync
|
||||
owner_roles = {
|
||||
"data": {
|
||||
"additional_owner_roles": "cron_admin",
|
||||
},
|
||||
}
|
||||
k8s.update_config(owner_roles)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
|
||||
leader = k8s.get_cluster_leader_pod()
|
||||
owner_query = """
|
||||
SELECT a2.rolname
|
||||
FROM pg_catalog.pg_authid a
|
||||
JOIN pg_catalog.pg_auth_members am
|
||||
ON a.oid = am.member
|
||||
AND a.rolname = 'cron_admin'
|
||||
JOIN pg_catalog.pg_authid a2
|
||||
ON a2.oid = am.roleid
|
||||
WHERE a2.rolname IN ('zalando', 'bar_owner', 'bar_data_owner');
|
||||
"""
|
||||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", owner_query)), 3,
|
||||
"Not all additional users found in database", 10, 5)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_additional_pod_capabilities(self):
|
||||
'''
|
||||
|
|
@ -177,13 +208,12 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
try:
|
||||
k8s.update_config(patch_capabilities)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
|
||||
# changed security context of postgres container should trigger a rolling update
|
||||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
|
||||
2, "Container capabilities not updated")
|
||||
|
||||
|
|
@ -209,8 +239,6 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
},
|
||||
}
|
||||
k8s.update_config(enable_postgres_team_crd)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
|
|
@ -335,7 +363,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
try:
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_config)
|
||||
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
def compare_config():
|
||||
|
|
@ -452,7 +480,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label("cluster-name=acid-minimal-cluster,application=spilo", self.test_namespace),
|
||||
|
|
@ -467,6 +495,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
the end turn connection pooler off to not interfere with other tests.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
pooler_label = 'application=db-connection-pooler,cluster-name=acid-minimal-cluster'
|
||||
master_pooler_label = 'connection-pooler=acid-minimal-cluster-pooler'
|
||||
replica_pooler_label = master_pooler_label + '-repl'
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -478,20 +509,30 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'enableReplicaConnectionPooler': True,
|
||||
}
|
||||
})
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2,
|
||||
"Deployment replicas is 2 default")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(
|
||||
"connection-pooler=acid-minimal-cluster-pooler"),
|
||||
2, "No pooler pods found")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(
|
||||
"connection-pooler=acid-minimal-cluster-pooler-repl"),
|
||||
2, "No pooler replica pods found")
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label(
|
||||
'application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
2, "No pooler service found")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
1, "Pooler secret not created")
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2, "Deployment replicas is 2 default")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(master_pooler_label), 2, "No pooler pods found")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(replica_pooler_label), 2, "No pooler replica pods found")
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label), 2, "No pooler service found")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label), 1, "Pooler secret not created")
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresqls', 'acid-minimal-cluster',
|
||||
{
|
||||
'spec': {
|
||||
'enableMasterPoolerLoadBalancer': True,
|
||||
'enableReplicaPoolerLoadBalancer': True,
|
||||
}
|
||||
})
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.get_service_type(master_pooler_label+","+pooler_label),
|
||||
'LoadBalancer',
|
||||
"Expected LoadBalancer service type for master pooler pod, found {}")
|
||||
self.eventuallyEqual(lambda: k8s.get_service_type(replica_pooler_label+","+pooler_label),
|
||||
'LoadBalancer',
|
||||
"Expected LoadBalancer service type for replica pooler pod, found {}")
|
||||
|
||||
# Turn off only master connection pooler
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -504,20 +545,17 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
})
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler-repl"), 2,
|
||||
"Deployment replicas is 2 default")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(
|
||||
"connection-pooler=acid-minimal-cluster-pooler"),
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(master_pooler_label),
|
||||
0, "Master pooler pods not deleted")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(
|
||||
"connection-pooler=acid-minimal-cluster-pooler-repl"),
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(replica_pooler_label),
|
||||
2, "Pooler replica pods not found")
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label(
|
||||
'application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label),
|
||||
1, "No pooler service found")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label),
|
||||
1, "Secret not created")
|
||||
|
||||
# Turn off only replica connection pooler
|
||||
|
|
@ -528,20 +566,24 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'spec': {
|
||||
'enableConnectionPooler': True,
|
||||
'enableReplicaConnectionPooler': False,
|
||||
'enableMasterPoolerLoadBalancer': False,
|
||||
}
|
||||
})
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2,
|
||||
"Deployment replicas is 2 default")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(master_pooler_label),
|
||||
2, "Master pooler pods not found")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler-repl"),
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(replica_pooler_label),
|
||||
0, "Pooler replica pods not deleted")
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label),
|
||||
1, "No pooler service found")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
self.eventuallyEqual(lambda: k8s.get_service_type(master_pooler_label+","+pooler_label),
|
||||
'ClusterIP',
|
||||
"Expected LoadBalancer service type for master, found {}")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label),
|
||||
1, "Secret not created")
|
||||
|
||||
# scale up connection pooler deployment
|
||||
|
|
@ -558,7 +600,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 3,
|
||||
"Deployment replicas is scaled to 3")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(master_pooler_label),
|
||||
3, "Scale up of pooler pods does not work")
|
||||
|
||||
# turn it off, keeping config should be overwritten by false
|
||||
|
|
@ -569,12 +611,13 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'spec': {
|
||||
'enableConnectionPooler': False,
|
||||
'enableReplicaConnectionPooler': False,
|
||||
'enableReplicaPoolerLoadBalancer': False,
|
||||
}
|
||||
})
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(master_pooler_label),
|
||||
0, "Pooler pods not scaled down")
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'),
|
||||
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label),
|
||||
0, "Pooler service not removed")
|
||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=spilo,cluster-name=acid-minimal-cluster'),
|
||||
4, "Secrets not deleted")
|
||||
|
|
@ -661,6 +704,49 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_ignored_annotations(self):
|
||||
'''
|
||||
Test if injected annotation does not cause replacement of resources when listed under ignored_annotations
|
||||
'''
|
||||
k8s = self.k8s
|
||||
|
||||
annotation_patch = {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"k8s-status": "healthy"
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default')
|
||||
old_sts_creation_timestamp = sts.metadata.creation_timestamp
|
||||
k8s.api.apps_v1.patch_namespaced_stateful_set(sts.metadata.name, sts.metadata.namespace, annotation_patch)
|
||||
svc = k8s.api.core_v1.read_namespaced_service('acid-minimal-cluster', 'default')
|
||||
old_svc_creation_timestamp = svc.metadata.creation_timestamp
|
||||
k8s.api.core_v1.patch_namespaced_service(svc.metadata.name, svc.metadata.namespace, annotation_patch)
|
||||
|
||||
patch_config_ignored_annotations = {
|
||||
"data": {
|
||||
"ignored_annotations": "k8s-status",
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_config_ignored_annotations)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default')
|
||||
new_sts_creation_timestamp = sts.metadata.creation_timestamp
|
||||
svc = k8s.api.core_v1.read_namespaced_service('acid-minimal-cluster', 'default')
|
||||
new_svc_creation_timestamp = svc.metadata.creation_timestamp
|
||||
|
||||
self.assertEqual(old_sts_creation_timestamp, new_sts_creation_timestamp, "unexpected replacement of statefulset on sync")
|
||||
self.assertEqual(old_svc_creation_timestamp, new_svc_creation_timestamp, "unexpected replacement of master service on sync")
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_infrastructure_roles(self):
|
||||
'''
|
||||
|
|
@ -956,12 +1042,12 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
|
||||
# wait for switched over
|
||||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running")
|
||||
|
||||
def verify_pod_limits():
|
||||
pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items
|
||||
|
|
@ -1063,7 +1149,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
plural="postgresqls",
|
||||
name="acid-minimal-cluster",
|
||||
body=patch_node_affinity_config)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
|
||||
# node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||
|
|
@ -1177,10 +1264,11 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_overwrite_pooler_deployment(self):
|
||||
pooler_name = 'acid-minimal-cluster-pooler'
|
||||
k8s = self.k8s
|
||||
k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1,
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(name=pooler_name), 1,
|
||||
"Initial broken deployment not rolled out")
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -1193,7 +1281,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
})
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 2,
|
||||
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(name=pooler_name), 2,
|
||||
"Operator did not succeed in overwriting labels")
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
|
|
@ -1206,7 +1294,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
})
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler="+pooler_name),
|
||||
0, "Pooler pods not scaled down")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ kind: ConfigMap
|
|||
metadata:
|
||||
name: postgres-operator
|
||||
data:
|
||||
# additional_owner_roles: "cron_admin"
|
||||
# additional_pod_capabilities: "SYS_NICE"
|
||||
# additional_secret_mount: "some-secret-name"
|
||||
# additional_secret_mount_path: "/some/dir"
|
||||
|
|
@ -44,6 +45,7 @@ data:
|
|||
# enable_init_containers: "true"
|
||||
# enable_lazy_spilo_upgrade: "false"
|
||||
enable_master_load_balancer: "false"
|
||||
enable_master_pooler_load_balancer: "false"
|
||||
enable_password_rotation: "false"
|
||||
enable_pgversion_env_var: "true"
|
||||
# enable_pod_antiaffinity: "false"
|
||||
|
|
@ -51,6 +53,7 @@ data:
|
|||
# enable_postgres_team_crd: "false"
|
||||
# enable_postgres_team_crd_superusers: "false"
|
||||
enable_replica_load_balancer: "false"
|
||||
enable_replica_pooler_load_balancer: "false"
|
||||
# enable_shm_volume: "true"
|
||||
# enable_sidecars: "true"
|
||||
enable_spilo_wal_path_compat: "true"
|
||||
|
|
@ -61,6 +64,7 @@ data:
|
|||
external_traffic_policy: "Cluster"
|
||||
# gcp_credentials: ""
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# ignored_annotations: ""
|
||||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
||||
# inherited_annotations: owned-by
|
||||
|
|
@ -77,6 +81,7 @@ data:
|
|||
# logical_backup_s3_endpoint: ""
|
||||
# logical_backup_s3_secret_access_key: ""
|
||||
logical_backup_s3_sse: "AES256"
|
||||
# logical_backup_s3_retention_time: ""
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
major_version_upgrade_mode: "manual"
|
||||
# major_version_upgrade_team_allow_list: ""
|
||||
|
|
@ -93,6 +98,8 @@ data:
|
|||
# pam_configuration: |
|
||||
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
||||
# pam_role_name: zalandos
|
||||
patroni_api_check_interval: "1s"
|
||||
patroni_api_check_timeout: "5s"
|
||||
# password_rotation_interval: "90"
|
||||
# password_rotation_user_retention: "180"
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
|
|
@ -109,7 +116,7 @@ data:
|
|||
# pod_service_account_role_binding_definition: ""
|
||||
pod_terminate_grace_period: 5m
|
||||
# postgres_superuser_teams: "postgres_superusers"
|
||||
# protected_role_names: "admin"
|
||||
# protected_role_names: "admin,cron_admin"
|
||||
ready_wait_interval: 3s
|
||||
ready_wait_timeout: 30s
|
||||
repair_period: 5m
|
||||
|
|
|
|||
|
|
@ -0,0 +1,131 @@
|
|||
# Here we use https://github.com/prometheus-community/helm-charts/charts/kube-prometheus-stack
|
||||
# Please keep the ServiceMonitor's label same as the Helm release name of kube-prometheus-stack
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test-pg
|
||||
---
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-minimal-cluster
|
||||
namespace: test-pg
|
||||
labels:
|
||||
app: test-pg
|
||||
spec:
|
||||
teamId: "acid"
|
||||
volume:
|
||||
size: 1Gi
|
||||
numberOfInstances: 2
|
||||
users:
|
||||
zalando: # database owner
|
||||
- superuser
|
||||
- createdb
|
||||
foo_user: [] # role for application foo
|
||||
databases:
|
||||
foo: zalando # dbname: owner
|
||||
preparedDatabases:
|
||||
bar: {}
|
||||
postgresql:
|
||||
version: "13"
|
||||
sidecars:
|
||||
- name: "exporter"
|
||||
image: "wrouesnel/postgres_exporter"
|
||||
ports:
|
||||
- name: exporter
|
||||
containerPort: 9187
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256M
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200M
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: acid-minimal-cluster-svc-metrics-master
|
||||
namespace: test-pg
|
||||
labels:
|
||||
app: test-pg
|
||||
spilo-role: master
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9187"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: exporter
|
||||
port: 9187
|
||||
targetPort: exporter
|
||||
selector:
|
||||
application: spilo
|
||||
cluster-name: acid-minimal-cluster
|
||||
spilo-role: master
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: acid-minimal-cluster-svc-metrics-replica
|
||||
namespace: test-pg
|
||||
labels:
|
||||
app: test-pg
|
||||
spilo-role: replica
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9187"
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: exporter
|
||||
port: 9187
|
||||
targetPort: exporter
|
||||
selector:
|
||||
application: spilo
|
||||
cluster-name: acid-minimal-cluster
|
||||
spilo-role: replica
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: acid-minimal-cluster-svcm-master
|
||||
namespace: test-pg
|
||||
labels:
|
||||
app: test-pg
|
||||
spilo-role: master
|
||||
spec:
|
||||
endpoints:
|
||||
- port: exporter
|
||||
interval: 15s
|
||||
scrapeTimeout: 10s
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- test-pg
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-pg
|
||||
spilo-role: master
|
||||
---
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: acid-minimal-cluster-svcm-replica
|
||||
namespace: test-pg
|
||||
labels:
|
||||
app: test-pg
|
||||
spilo-role: replica
|
||||
spec:
|
||||
endpoints:
|
||||
- port: exporter
|
||||
interval: 15s
|
||||
scrapeTimeout: 10s
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- test-pg
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-pg
|
||||
spilo-role: replica
|
||||
|
|
@ -128,6 +128,11 @@ spec:
|
|||
users:
|
||||
type: object
|
||||
properties:
|
||||
additional_owner_roles:
|
||||
type: array
|
||||
nullable: true
|
||||
items:
|
||||
type: string
|
||||
enable_password_rotation:
|
||||
type: boolean
|
||||
default: false
|
||||
|
|
@ -205,6 +210,10 @@ spec:
|
|||
enable_sidecars:
|
||||
type: boolean
|
||||
default: true
|
||||
ignored_annotations:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
infrastructure_roles_secret_name:
|
||||
type: string
|
||||
infrastructure_roles_secrets:
|
||||
|
|
@ -347,6 +356,12 @@ spec:
|
|||
timeouts:
|
||||
type: object
|
||||
properties:
|
||||
patroni_api_check_interval:
|
||||
type: string
|
||||
default: "1s"
|
||||
patroni_api_check_timeout:
|
||||
type: string
|
||||
default: "5s"
|
||||
pod_label_wait_timeout:
|
||||
type: string
|
||||
default: "10m"
|
||||
|
|
@ -378,9 +393,15 @@ spec:
|
|||
enable_master_load_balancer:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_master_pooler_load_balancer:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_replica_load_balancer:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_replica_pooler_load_balancer:
|
||||
type: boolean
|
||||
default: false
|
||||
external_traffic_policy:
|
||||
type: string
|
||||
enum:
|
||||
|
|
@ -448,6 +469,8 @@ spec:
|
|||
type: string
|
||||
logical_backup_s3_sse:
|
||||
type: string
|
||||
logical_backup_s3_retention_time:
|
||||
type: string
|
||||
logical_backup_schedule:
|
||||
type: string
|
||||
pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$'
|
||||
|
|
@ -498,6 +521,7 @@ spec:
|
|||
type: string
|
||||
default:
|
||||
- admin
|
||||
- cron_admin
|
||||
role_deletion_suffix:
|
||||
type: string
|
||||
default: "_deleted"
|
||||
|
|
|
|||
|
|
@ -26,6 +26,8 @@ configuration:
|
|||
# protocol: TCP
|
||||
workers: 8
|
||||
users:
|
||||
# additional_owner_roles:
|
||||
# - cron_admin
|
||||
enable_password_rotation: false
|
||||
password_rotation_interval: 90
|
||||
password_rotation_user_retention: 180
|
||||
|
|
@ -57,6 +59,8 @@ configuration:
|
|||
enable_pod_antiaffinity: false
|
||||
enable_pod_disruption_budget: true
|
||||
enable_sidecars: true
|
||||
# ignored_annotations:
|
||||
# - k8s.v1.cni.cncf.io/network-status
|
||||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||
# infrastructure_roles_secrets:
|
||||
# - secretname: "monitoring-roles"
|
||||
|
|
@ -107,6 +111,8 @@ configuration:
|
|||
# min_cpu_limit: 250m
|
||||
# min_memory_limit: 250Mi
|
||||
timeouts:
|
||||
patroni_api_check_interval: 1s
|
||||
patroni_api_check_timeout: 5s
|
||||
pod_label_wait_timeout: 10m
|
||||
pod_deletion_wait_timeout: 10m
|
||||
ready_wait_interval: 4s
|
||||
|
|
@ -119,7 +125,9 @@ configuration:
|
|||
# keyy: valuey
|
||||
# db_hosted_zone: ""
|
||||
enable_master_load_balancer: false
|
||||
enable_master_pooler_load_balancer: false
|
||||
enable_replica_load_balancer: false
|
||||
enable_replica_pooler_load_balancer: false
|
||||
external_traffic_policy: "Cluster"
|
||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
|
|
@ -146,6 +154,7 @@ configuration:
|
|||
# logical_backup_s3_region: ""
|
||||
# logical_backup_s3_secret_access_key: ""
|
||||
logical_backup_s3_sse: "AES256"
|
||||
# logical_backup_s3_retention_time: ""
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
debug:
|
||||
debug_logging: true
|
||||
|
|
@ -163,6 +172,7 @@ configuration:
|
|||
# - postgres_superusers
|
||||
protected_role_names:
|
||||
- admin
|
||||
- cron_admin
|
||||
role_deletion_suffix: "_deleted"
|
||||
team_admin_role: admin
|
||||
team_api_role_configuration:
|
||||
|
|
|
|||
|
|
@ -148,15 +148,9 @@ spec:
|
|||
minimum: 1
|
||||
resources:
|
||||
type: object
|
||||
required:
|
||||
- requests
|
||||
- limits
|
||||
properties:
|
||||
limits:
|
||||
type: object
|
||||
required:
|
||||
- cpu
|
||||
- memory
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
|
|
@ -166,9 +160,6 @@ spec:
|
|||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
requests:
|
||||
type: object
|
||||
required:
|
||||
- cpu
|
||||
- memory
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
|
|
@ -195,8 +186,12 @@ spec:
|
|||
type: boolean
|
||||
enableMasterLoadBalancer:
|
||||
type: boolean
|
||||
enableMasterPoolerLoadBalancer:
|
||||
type: boolean
|
||||
enableReplicaLoadBalancer:
|
||||
type: boolean
|
||||
enableReplicaPoolerLoadBalancer:
|
||||
type: boolean
|
||||
enableShmVolume:
|
||||
type: boolean
|
||||
init_containers:
|
||||
|
|
@ -400,15 +395,9 @@ spec:
|
|||
description: deprecated
|
||||
resources:
|
||||
type: object
|
||||
required:
|
||||
- requests
|
||||
- limits
|
||||
properties:
|
||||
limits:
|
||||
type: object
|
||||
required:
|
||||
- cpu
|
||||
- memory
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
|
|
@ -437,9 +426,6 @@ spec:
|
|||
# than the corresponding limit.
|
||||
requests:
|
||||
type: object
|
||||
required:
|
||||
- cpu
|
||||
- memory
|
||||
properties:
|
||||
cpu:
|
||||
type: string
|
||||
|
|
@ -530,10 +516,6 @@ spec:
|
|||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
- effect
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
|
|
|
|||
|
|
@ -238,12 +238,10 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
Minimum: &min1,
|
||||
},
|
||||
"resources": {
|
||||
Type: "object",
|
||||
Required: []string{"requests", "limits"},
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"limits": {
|
||||
Type: "object",
|
||||
Required: []string{"cpu", "memory"},
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"cpu": {
|
||||
Type: "string",
|
||||
|
|
@ -256,8 +254,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
"requests": {
|
||||
Type: "object",
|
||||
Required: []string{"cpu", "memory"},
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"cpu": {
|
||||
Type: "string",
|
||||
|
|
@ -302,9 +299,15 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"enableMasterLoadBalancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enableMasterPoolerLoadBalancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enableReplicaLoadBalancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enableReplicaPoolerLoadBalancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enableShmVolume": {
|
||||
Type: "boolean",
|
||||
},
|
||||
|
|
@ -642,12 +645,10 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
Description: "deprecated",
|
||||
},
|
||||
"resources": {
|
||||
Type: "object",
|
||||
Required: []string{"requests", "limits"},
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"limits": {
|
||||
Type: "object",
|
||||
Required: []string{"cpu", "memory"},
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"cpu": {
|
||||
Type: "string",
|
||||
|
|
@ -660,8 +661,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
"requests": {
|
||||
Type: "object",
|
||||
Required: []string{"cpu", "memory"},
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"cpu": {
|
||||
Type: "string",
|
||||
|
|
@ -798,8 +798,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Required: []string{"key", "operator", "effect"},
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"key": {
|
||||
Type: "string",
|
||||
|
|
@ -1148,6 +1147,24 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"users": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"additional_owner_roles": {
|
||||
Type: "array",
|
||||
Nullable: true,
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"enable_password_rotation": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"password_rotation_interval": {
|
||||
Type: "integer",
|
||||
},
|
||||
"password_rotation_user_retention": {
|
||||
Type: "integer",
|
||||
},
|
||||
"replication_username": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
@ -1240,6 +1257,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"enable_sidecars": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"ignored_annotations": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"infrastructure_roles_secret_name": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
@ -1438,6 +1463,12 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"timeouts": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"patroni_api_check_interval": {
|
||||
Type: "string",
|
||||
},
|
||||
"patroni_api_check_timeout": {
|
||||
Type: "string",
|
||||
},
|
||||
"pod_label_wait_timeout": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
@ -1475,9 +1506,15 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"enable_master_load_balancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enable_master_pooler_load_balancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enable_replica_load_balancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enable_replica_pooler_load_balancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"external_traffic_policy": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1.JSON{
|
||||
|
|
@ -1562,6 +1599,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"logical_backup_s3_sse": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_s3_retention_time": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_schedule": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$",
|
||||
|
|
|
|||
|
|
@ -37,11 +37,12 @@ type OperatorConfigurationList struct {
|
|||
|
||||
// PostgresUsersConfiguration defines the system users of Postgres.
|
||||
type PostgresUsersConfiguration struct {
|
||||
SuperUsername string `json:"super_username,omitempty"`
|
||||
ReplicationUsername string `json:"replication_username,omitempty"`
|
||||
EnablePasswordRotation bool `json:"enable_password_rotation,omitempty"`
|
||||
PasswordRotationInterval uint32 `json:"password_rotation_interval,omitempty"`
|
||||
PasswordRotationUserRetention uint32 `json:"password_rotation_user_retention,omitempty"`
|
||||
SuperUsername string `json:"super_username,omitempty"`
|
||||
ReplicationUsername string `json:"replication_username,omitempty"`
|
||||
AdditionalOwnerRoles []string `json:"additional_owner_roles,omitempty"`
|
||||
EnablePasswordRotation bool `json:"enable_password_rotation,omitempty"`
|
||||
PasswordRotationInterval uint32 `json:"password_rotation_interval,omitempty"`
|
||||
PasswordRotationUserRetention uint32 `json:"password_rotation_user_retention,omitempty"`
|
||||
}
|
||||
|
||||
// MajorVersionUpgradeConfiguration defines how to execute major version upgrades of Postgres.
|
||||
|
|
@ -81,6 +82,7 @@ type KubernetesMetaConfiguration struct {
|
|||
InheritedLabels []string `json:"inherited_labels,omitempty"`
|
||||
InheritedAnnotations []string `json:"inherited_annotations,omitempty"`
|
||||
DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"`
|
||||
IgnoredAnnotations []string `json:"ignored_annotations,omitempty"`
|
||||
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
|
||||
DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"`
|
||||
DeleteAnnotationNameKey string `json:"delete_annotation_name_key,omitempty"`
|
||||
|
|
@ -111,23 +113,27 @@ type PostgresPodResourcesDefaults struct {
|
|||
|
||||
// OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait
|
||||
type OperatorTimeouts struct {
|
||||
ResourceCheckInterval Duration `json:"resource_check_interval,omitempty"`
|
||||
ResourceCheckTimeout Duration `json:"resource_check_timeout,omitempty"`
|
||||
PodLabelWaitTimeout Duration `json:"pod_label_wait_timeout,omitempty"`
|
||||
PodDeletionWaitTimeout Duration `json:"pod_deletion_wait_timeout,omitempty"`
|
||||
ReadyWaitInterval Duration `json:"ready_wait_interval,omitempty"`
|
||||
ReadyWaitTimeout Duration `json:"ready_wait_timeout,omitempty"`
|
||||
ResourceCheckInterval Duration `json:"resource_check_interval,omitempty"`
|
||||
ResourceCheckTimeout Duration `json:"resource_check_timeout,omitempty"`
|
||||
PodLabelWaitTimeout Duration `json:"pod_label_wait_timeout,omitempty"`
|
||||
PodDeletionWaitTimeout Duration `json:"pod_deletion_wait_timeout,omitempty"`
|
||||
ReadyWaitInterval Duration `json:"ready_wait_interval,omitempty"`
|
||||
ReadyWaitTimeout Duration `json:"ready_wait_timeout,omitempty"`
|
||||
PatroniAPICheckInterval Duration `json:"patroni_api_check_interval,omitempty"`
|
||||
PatroniAPICheckTimeout Duration `json:"patroni_api_check_timeout,omitempty"`
|
||||
}
|
||||
|
||||
// LoadBalancerConfiguration defines the LB configuration
|
||||
type LoadBalancerConfiguration struct {
|
||||
DbHostedZone string `json:"db_hosted_zone,omitempty"`
|
||||
EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"`
|
||||
EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"`
|
||||
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
|
||||
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
|
||||
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
|
||||
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
|
||||
DbHostedZone string `json:"db_hosted_zone,omitempty"`
|
||||
EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"`
|
||||
EnableMasterPoolerLoadBalancer bool `json:"enable_master_pooler_load_balancer,omitempty"`
|
||||
EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"`
|
||||
EnableReplicaPoolerLoadBalancer bool `json:"enable_replica_pooler_load_balancer,omitempty"`
|
||||
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
|
||||
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
|
||||
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
|
||||
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
|
||||
}
|
||||
|
||||
// AWSGCPConfiguration defines the configuration for AWS
|
||||
|
|
@ -213,6 +219,7 @@ type OperatorLogicalBackupConfiguration struct {
|
|||
S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"`
|
||||
S3SecretAccessKey string `json:"logical_backup_s3_secret_access_key,omitempty"`
|
||||
S3SSE string `json:"logical_backup_s3_sse,omitempty"`
|
||||
RetentionTime string `json:"logical_backup_s3_retention_time,omitempty"`
|
||||
GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"`
|
||||
JobPrefix string `json:"logical_backup_job_prefix,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ type PostgresSpec struct {
|
|||
PostgresqlParam `json:"postgresql"`
|
||||
Volume `json:"volume,omitempty"`
|
||||
Patroni `json:"patroni,omitempty"`
|
||||
Resources `json:"resources,omitempty"`
|
||||
*Resources `json:"resources,omitempty"`
|
||||
|
||||
EnableConnectionPooler *bool `json:"enableConnectionPooler,omitempty"`
|
||||
EnableReplicaConnectionPooler *bool `json:"enableReplicaConnectionPooler,omitempty"`
|
||||
|
|
@ -42,8 +42,10 @@ type PostgresSpec struct {
|
|||
|
||||
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
||||
// in that case the var evaluates to nil and the value is taken from the operator config
|
||||
EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"`
|
||||
EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"`
|
||||
EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"`
|
||||
EnableMasterPoolerLoadBalancer *bool `json:"enableMasterPoolerLoadBalancer,omitempty"`
|
||||
EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"`
|
||||
EnableReplicaPoolerLoadBalancer *bool `json:"enableReplicaPoolerLoadBalancer,omitempty"`
|
||||
|
||||
// deprecated load balancer settings maintained for backward compatibility
|
||||
// see "Load balancers" operator docs
|
||||
|
|
@ -165,7 +167,7 @@ type Patroni struct {
|
|||
Slots map[string]map[string]string `json:"slots,omitempty"`
|
||||
SynchronousMode bool `json:"synchronous_mode,omitempty"`
|
||||
SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"`
|
||||
SynchronousNodeCount uint32 `json:"synchronous_node_count,omitempty" defaults:1`
|
||||
SynchronousNodeCount uint32 `json:"synchronous_node_count,omitempty" defaults:"1"`
|
||||
}
|
||||
|
||||
// StandbyDescription contains remote primary config or s3 wal path
|
||||
|
|
@ -199,7 +201,7 @@ type CloneDescription struct {
|
|||
|
||||
// Sidecar defines a container to be run in the same pod as the Postgres container.
|
||||
type Sidecar struct {
|
||||
Resources `json:"resources,omitempty"`
|
||||
*Resources `json:"resources,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
DockerImage string `json:"image,omitempty"`
|
||||
Ports []v1.ContainerPort `json:"ports,omitempty"`
|
||||
|
|
@ -232,19 +234,19 @@ type ConnectionPooler struct {
|
|||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
MaxDBConnections *int32 `json:"maxDBConnections,omitempty"`
|
||||
|
||||
Resources `json:"resources,omitempty"`
|
||||
*Resources `json:"resources,omitempty"`
|
||||
}
|
||||
|
||||
type Stream struct {
|
||||
ApplicationId string `json:"applicationId"`
|
||||
Database string `json:"database"`
|
||||
Tables map[string]StreamTable `json:"tables"`
|
||||
Filter map[string]string `json:"filter,omitempty"`
|
||||
BatchSize uint32 `json:"batchSize,omitempty"`
|
||||
Filter map[string]*string `json:"filter,omitempty"`
|
||||
BatchSize *uint32 `json:"batchSize,omitempty"`
|
||||
}
|
||||
|
||||
type StreamTable struct {
|
||||
EventType string `json:"eventType"`
|
||||
IdColumn string `json:"idColumn,omitempty" defaults:"id"`
|
||||
PayloadColumn string `json:"payloadColumn,omitempty" defaults:"payload"`
|
||||
EventType string `json:"eventType"`
|
||||
IdColumn *string `json:"idColumn,omitempty"`
|
||||
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ var unmarshalCluster = []struct {
|
|||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":"Invalid"}`),
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":"Invalid"}`),
|
||||
err: nil},
|
||||
{
|
||||
about: "example with /status subresource",
|
||||
|
|
@ -184,7 +184,7 @@ var unmarshalCluster = []struct {
|
|||
"kind": "Postgresql","apiVersion": "acid.zalan.do/v1",
|
||||
"metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), &tmp).Error(),
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||
err: nil},
|
||||
{
|
||||
about: "example with detailed input manifest and deprecated pod_priority_class_name -> podPriorityClassName",
|
||||
|
|
@ -300,7 +300,7 @@ var unmarshalCluster = []struct {
|
|||
MaximumLagOnFailover: 33554432,
|
||||
Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}},
|
||||
},
|
||||
Resources: Resources{
|
||||
Resources: &Resources{
|
||||
ResourceRequests: ResourceDescription{CPU: "10m", Memory: "50Mi"},
|
||||
ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"},
|
||||
},
|
||||
|
|
@ -351,7 +351,7 @@ var unmarshalCluster = []struct {
|
|||
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
|
||||
Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null} ,"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||
err: nil},
|
||||
{
|
||||
about: "example with clone",
|
||||
|
|
@ -373,7 +373,7 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
Error: "",
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
err: nil},
|
||||
{
|
||||
about: "standby example",
|
||||
|
|
@ -395,7 +395,7 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
Error: "",
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"standby":{"s3_wal_path":"s3://custom/path/to/bucket/"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"standby":{"s3_wal_path":"s3://custom/path/to/bucket/"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
err: nil},
|
||||
{
|
||||
about: "expect error on malformatted JSON",
|
||||
|
|
|
|||
|
|
@ -106,7 +106,11 @@ func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) {
|
|||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
out.Resources = in.Resources
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(Resources)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -224,6 +228,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IgnoredAnnotations != nil {
|
||||
in, out := &in.IgnoredAnnotations, &out.IgnoredAnnotations
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.NodeReadinessLabel != nil {
|
||||
in, out := &in.NodeReadinessLabel, &out.NodeReadinessLabel
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
|
@ -401,7 +410,7 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
out.PostgresUsersConfiguration = in.PostgresUsersConfiguration
|
||||
in.PostgresUsersConfiguration.DeepCopyInto(&out.PostgresUsersConfiguration)
|
||||
in.MajorVersionUpgrade.DeepCopyInto(&out.MajorVersionUpgrade)
|
||||
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
|
||||
out.PostgresPodResources = in.PostgresPodResources
|
||||
|
|
@ -575,7 +584,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam)
|
||||
in.Volume.DeepCopyInto(&out.Volume)
|
||||
in.Patroni.DeepCopyInto(&out.Patroni)
|
||||
out.Resources = in.Resources
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(Resources)
|
||||
**out = **in
|
||||
}
|
||||
if in.EnableConnectionPooler != nil {
|
||||
in, out := &in.EnableConnectionPooler, &out.EnableConnectionPooler
|
||||
*out = new(bool)
|
||||
|
|
@ -611,11 +624,21 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EnableMasterPoolerLoadBalancer != nil {
|
||||
in, out := &in.EnableMasterPoolerLoadBalancer, &out.EnableMasterPoolerLoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EnableReplicaLoadBalancer != nil {
|
||||
in, out := &in.EnableReplicaLoadBalancer, &out.EnableReplicaLoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EnableReplicaPoolerLoadBalancer != nil {
|
||||
in, out := &in.EnableReplicaPoolerLoadBalancer, &out.EnableReplicaPoolerLoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.UseLoadBalancer != nil {
|
||||
in, out := &in.UseLoadBalancer, &out.UseLoadBalancer
|
||||
*out = new(bool)
|
||||
|
|
@ -916,6 +939,11 @@ func (in *PostgresTeamSpec) DeepCopy() *PostgresTeamSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) {
|
||||
*out = *in
|
||||
if in.AdditionalOwnerRoles != nil {
|
||||
in, out := &in.AdditionalOwnerRoles, &out.AdditionalOwnerRoles
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -1117,7 +1145,11 @@ func (in *ScalyrConfiguration) DeepCopy() *ScalyrConfiguration {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Sidecar) DeepCopyInto(out *Sidecar) {
|
||||
*out = *in
|
||||
out.Resources = in.Resources
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(Resources)
|
||||
**out = **in
|
||||
}
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]corev1.ContainerPort, len(*in))
|
||||
|
|
@ -1166,16 +1198,29 @@ func (in *Stream) DeepCopyInto(out *Stream) {
|
|||
in, out := &in.Tables, &out.Tables
|
||||
*out = make(map[string]StreamTable, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.Filter != nil {
|
||||
in, out := &in.Filter, &out.Filter
|
||||
*out = make(map[string]string, len(*in))
|
||||
*out = make(map[string]*string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
var outVal *string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
in, out := &val, &outVal
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.BatchSize != nil {
|
||||
in, out := &in.BatchSize, &out.BatchSize
|
||||
*out = new(uint32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -1192,6 +1237,16 @@ func (in *Stream) DeepCopy() *Stream {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StreamTable) DeepCopyInto(out *StreamTable) {
|
||||
*out = *in
|
||||
if in.IdColumn != nil {
|
||||
in, out := &in.IdColumn, &out.IdColumn
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.PayloadColumn != nil {
|
||||
in, out := &in.PayloadColumn, &out.PayloadColumn
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -40,15 +40,15 @@ type EventStream struct {
|
|||
|
||||
// EventStreamFlow defines the flow characteristics of the event stream
|
||||
type EventStreamFlow struct {
|
||||
Type string `json:"type"`
|
||||
PayloadColumn string `json:"payloadColumn,omitempty" defaults:"payload"`
|
||||
Type string `json:"type"`
|
||||
PayloadColumn *string `json:"payloadColumn,omitempty"`
|
||||
}
|
||||
|
||||
// EventStreamSink defines the target of the event stream
|
||||
type EventStreamSink struct {
|
||||
Type string `json:"type"`
|
||||
EventType string `json:"eventType,omitempty"`
|
||||
MaxBatchSize uint32 `json:"maxBatchSize,omitempty"`
|
||||
Type string `json:"type"`
|
||||
EventType string `json:"eventType,omitempty"`
|
||||
MaxBatchSize *uint32 `json:"maxBatchSize,omitempty"`
|
||||
}
|
||||
|
||||
// EventStreamSource defines the source of the event stream and connection for FES operator
|
||||
|
|
@ -56,23 +56,23 @@ type EventStreamSource struct {
|
|||
Type string `json:"type"`
|
||||
Schema string `json:"schema,omitempty" defaults:"public"`
|
||||
EventStreamTable EventStreamTable `json:"table"`
|
||||
Filter string `json:"filter,omitempty"`
|
||||
Filter *string `json:"filter,omitempty"`
|
||||
Connection Connection `json:"jdbcConnection"`
|
||||
}
|
||||
|
||||
// EventStreamTable defines the name and ID column to be used for streaming
|
||||
type EventStreamTable struct {
|
||||
Name string `json:"name"`
|
||||
IDColumn string `json:"idColumn,omitempty" defaults:"id"`
|
||||
Name string `json:"name"`
|
||||
IDColumn *string `json:"idColumn,omitempty"`
|
||||
}
|
||||
|
||||
// Connection to be used for allowing the FES operator to connect to a database
|
||||
type Connection struct {
|
||||
Url string `json:"jdbcUrl"`
|
||||
SlotName string `json:"slotName"`
|
||||
PluginType string `json:"pluginType,omitempty" defaults:"wal2json"`
|
||||
PublicationName string `json:"publicationName,omitempty"`
|
||||
DBAuth DBAuth `json:"databaseAuthentication"`
|
||||
Url string `json:"jdbcUrl"`
|
||||
SlotName string `json:"slotName"`
|
||||
PluginType string `json:"pluginType,omitempty"`
|
||||
PublicationName *string `json:"publicationName,omitempty"`
|
||||
DBAuth DBAuth `json:"databaseAuthentication"`
|
||||
}
|
||||
|
||||
// DBAuth specifies the credentials to be used for connecting with the database
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ var (
|
|||
alphaNumericRegexp = regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9]*$")
|
||||
databaseNameRegexp = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
userRegexp = regexp.MustCompile(`^[a-z0-9]([-_a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-_a-z0-9]*[a-z0-9])?)*$`)
|
||||
patroniObjectSuffixes = []string{"config", "failover", "sync"}
|
||||
patroniObjectSuffixes = []string{"config", "failover", "sync", "leader"}
|
||||
)
|
||||
|
||||
// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication.
|
||||
|
|
@ -228,6 +228,8 @@ func (c *Cluster) initUsers() error {
|
|||
return fmt.Errorf("could not init human users: %v", err)
|
||||
}
|
||||
|
||||
c.initAdditionalOwnerRoles()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -254,24 +256,22 @@ func (c *Cluster) Create() error {
|
|||
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating)
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources")
|
||||
|
||||
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||
return fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||
}
|
||||
|
||||
for _, role := range []PostgresRole{Master, Replica} {
|
||||
|
||||
if c.Endpoints[role] != nil {
|
||||
return fmt.Errorf("%s endpoint already exists in the cluster", role)
|
||||
}
|
||||
if role == Master {
|
||||
// replica endpoint will be created by the replica service. Master endpoint needs to be created by us,
|
||||
// since the corresponding master service does not define any selectors.
|
||||
ep, err = c.createEndpoint(role)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create %s endpoint: %v", role, err)
|
||||
if !c.patroniKubernetesUseConfigMaps() {
|
||||
if c.Endpoints[role] != nil {
|
||||
return fmt.Errorf("%s endpoint already exists in the cluster", role)
|
||||
}
|
||||
if role == Master {
|
||||
// replica endpoint will be created by the replica service. Master endpoint needs to be created by us,
|
||||
// since the corresponding master service does not define any selectors.
|
||||
ep, err = c.createEndpoint(role)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create %s endpoint: %v", role, err)
|
||||
}
|
||||
c.logger.Infof("endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Endpoints", "Endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
||||
}
|
||||
c.logger.Infof("endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Endpoints", "Endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
||||
}
|
||||
|
||||
if c.Services[role] != nil {
|
||||
|
|
@ -380,9 +380,10 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
match = false
|
||||
reasons = append(reasons, "new statefulset's number of replicas does not match the current one")
|
||||
}
|
||||
if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) {
|
||||
if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations); changed {
|
||||
match = false
|
||||
needsReplace = true
|
||||
reasons = append(reasons, "new statefulset's annotations do not match the current one")
|
||||
reasons = append(reasons, "new statefulset's annotations do not match: "+reason)
|
||||
}
|
||||
|
||||
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
|
||||
|
|
@ -436,10 +437,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations) {
|
||||
if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations); changed {
|
||||
match = false
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's pod template metadata annotations does not match the current one")
|
||||
reasons = append(reasons, "new statefulset's pod template metadata annotations does not match "+reason)
|
||||
}
|
||||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) {
|
||||
needsReplace = true
|
||||
|
|
@ -674,44 +676,59 @@ func comparePorts(a, b []v1.ContainerPort) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
|
||||
func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) {
|
||||
reason := ""
|
||||
ignoredAnnotations := make(map[string]bool)
|
||||
for _, ignore := range c.OpConfig.IgnoredAnnotations {
|
||||
ignoredAnnotations[ignore] = true
|
||||
}
|
||||
|
||||
var (
|
||||
isSmaller bool
|
||||
err error
|
||||
)
|
||||
|
||||
// setting limits too low can cause unnecessary evictions / OOM kills
|
||||
minCPULimit := c.OpConfig.MinCPULimit
|
||||
minMemoryLimit := c.OpConfig.MinMemoryLimit
|
||||
|
||||
cpuLimit := spec.Resources.ResourceLimits.CPU
|
||||
if cpuLimit != "" {
|
||||
isSmaller, err = util.IsSmallerQuantity(cpuLimit, minCPULimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err)
|
||||
for key := range old {
|
||||
if _, ok := ignoredAnnotations[key]; ok {
|
||||
continue
|
||||
}
|
||||
if isSmaller {
|
||||
c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be increased", cpuLimit, minCPULimit)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
|
||||
spec.Resources.ResourceLimits.CPU = minCPULimit
|
||||
if _, ok := new[key]; !ok {
|
||||
reason += fmt.Sprintf(" Removed %q.", key)
|
||||
}
|
||||
}
|
||||
|
||||
memoryLimit := spec.Resources.ResourceLimits.Memory
|
||||
if memoryLimit != "" {
|
||||
isSmaller, err = util.IsSmallerQuantity(memoryLimit, minMemoryLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err)
|
||||
for key := range new {
|
||||
if _, ok := ignoredAnnotations[key]; ok {
|
||||
continue
|
||||
}
|
||||
if isSmaller {
|
||||
c.logger.Warningf("defined memory limit %s is below required minimum %s and will be increased", memoryLimit, minMemoryLimit)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
|
||||
spec.Resources.ResourceLimits.Memory = minMemoryLimit
|
||||
v, ok := old[key]
|
||||
if !ok {
|
||||
reason += fmt.Sprintf(" Added %q with value %q.", key, new[key])
|
||||
} else if v != new[key] {
|
||||
reason += fmt.Sprintf(" %q changed from %q to %q.", key, v, new[key])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return reason != "", reason
|
||||
|
||||
}
|
||||
|
||||
func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) {
|
||||
if old.Spec.Type != new.Spec.Type {
|
||||
return false, fmt.Sprintf("new service's type %q does not match the current one %q",
|
||||
new.Spec.Type, old.Spec.Type)
|
||||
}
|
||||
|
||||
oldSourceRanges := old.Spec.LoadBalancerSourceRanges
|
||||
newSourceRanges := new.Spec.LoadBalancerSourceRanges
|
||||
|
||||
/* work around Kubernetes 1.6 serializing [] as nil. See https://github.com/kubernetes/kubernetes/issues/43203 */
|
||||
if (len(oldSourceRanges) != 0) || (len(newSourceRanges) != 0) {
|
||||
if !util.IsEqualIgnoreOrder(oldSourceRanges, newSourceRanges) {
|
||||
return false, "new service's LoadBalancerSourceRange does not match the current one"
|
||||
}
|
||||
}
|
||||
|
||||
if changed, reason := c.compareAnnotations(old.Annotations, new.Annotations); changed {
|
||||
return !changed, "new service's annotations does not match the current one:" + reason
|
||||
}
|
||||
|
||||
return true, ""
|
||||
}
|
||||
|
||||
// Update changes Kubernetes objects according to the new specification. Unlike the sync case, the missing object
|
||||
|
|
@ -793,12 +810,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
|
||||
// Statefulset
|
||||
func() {
|
||||
if err := c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||
c.logger.Errorf("could not sync resources: %v", err)
|
||||
updateFailed = true
|
||||
return
|
||||
}
|
||||
|
||||
oldSs, err := c.generateStatefulSet(&oldSpec.Spec)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not generate old statefulset spec: %v", err)
|
||||
|
|
@ -806,16 +817,13 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
return
|
||||
}
|
||||
|
||||
// update newSpec to for latter comparison with oldSpec
|
||||
c.enforceMinResourceLimits(&newSpec.Spec)
|
||||
|
||||
newSs, err := c.generateStatefulSet(&newSpec.Spec)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not generate new statefulset spec: %v", err)
|
||||
updateFailed = true
|
||||
return
|
||||
}
|
||||
if syncStatefulSet || !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) {
|
||||
if syncStatefulSet || !reflect.DeepEqual(oldSs, newSs) {
|
||||
c.logger.Debugf("syncing statefulsets")
|
||||
syncStatefulSet = false
|
||||
// TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet
|
||||
|
|
@ -1297,6 +1305,33 @@ func (c *Cluster) initRobotUsers() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) initAdditionalOwnerRoles() {
|
||||
for _, additionalOwner := range c.OpConfig.AdditionalOwnerRoles {
|
||||
// fetch all database owners the additional should become a member of
|
||||
memberOf := make([]string, 0)
|
||||
for username, pgUser := range c.pgUsers {
|
||||
if pgUser.IsDbOwner {
|
||||
memberOf = append(memberOf, username)
|
||||
}
|
||||
}
|
||||
|
||||
if len(memberOf) > 1 {
|
||||
namespace := c.Namespace
|
||||
additionalOwnerPgUser := spec.PgUser{
|
||||
Origin: spec.RoleOriginSpilo,
|
||||
MemberOf: memberOf,
|
||||
Name: additionalOwner,
|
||||
Namespace: namespace,
|
||||
}
|
||||
if currentRole, present := c.pgUsers[additionalOwner]; present {
|
||||
c.pgUsers[additionalOwner] = c.resolveNameConflict(¤tRole, &additionalOwnerPgUser)
|
||||
} else {
|
||||
c.pgUsers[additionalOwner] = additionalOwnerPgUser
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) error {
|
||||
teamMembers, err := c.getTeamMembers(teamID)
|
||||
|
||||
|
|
@ -1543,8 +1578,9 @@ func (c *Cluster) deletePatroniClusterObjects() error {
|
|||
|
||||
if !c.patroniKubernetesUseConfigMaps() {
|
||||
actionsList = append(actionsList, c.deletePatroniClusterEndpoints)
|
||||
} else {
|
||||
actionsList = append(actionsList, c.deletePatroniClusterServices, c.deletePatroniClusterConfigMaps)
|
||||
}
|
||||
actionsList = append(actionsList, c.deletePatroniClusterServices, c.deletePatroniClusterConfigMaps)
|
||||
|
||||
c.logger.Debugf("removing leftover Patroni objects (endpoints / services and configmaps)")
|
||||
for _, deleter := range actionsList {
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package cluster
|
|||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -11,6 +12,7 @@ import (
|
|||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
|
|
@ -33,10 +35,11 @@ var cl = New(
|
|||
Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
ProtectedRoles: []string{"admin", "cron_admin", "part_man"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
AdditionalOwnerRoles: []string{"cron_admin", "part_man"},
|
||||
},
|
||||
Resources: config.Resources{
|
||||
DownscalerAnnotations: []string{"downscaler/*"},
|
||||
|
|
@ -44,7 +47,13 @@ var cl = New(
|
|||
},
|
||||
},
|
||||
k8sutil.NewMockKubernetesClient(),
|
||||
acidv1.Postgresql{ObjectMeta: metav1.ObjectMeta{Name: "acid-test", Namespace: "test", Annotations: map[string]string{"downscaler/downtime_replicas": "0"}}},
|
||||
acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "acid-test",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{"downscaler/downtime_replicas": "0"},
|
||||
},
|
||||
},
|
||||
logger,
|
||||
eventRecorder,
|
||||
)
|
||||
|
|
@ -53,7 +62,7 @@ func TestStatefulSetAnnotations(t *testing.T) {
|
|||
testName := "CheckStatefulsetAnnotations"
|
||||
spec := acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
|
|
@ -132,6 +141,48 @@ func TestInitRobotUsers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestInitAdditionalOwnerRoles(t *testing.T) {
|
||||
testName := "TestInitAdditionalOwnerRoles"
|
||||
|
||||
manifestUsers := map[string]acidv1.UserFlags{"foo_owner": {}, "bar_owner": {}, "app_user": {}}
|
||||
expectedUsers := map[string]spec.PgUser{
|
||||
"foo_owner": {Origin: spec.RoleOriginManifest, Name: "foo_owner", Namespace: cl.Namespace, Password: "f123", Flags: []string{"LOGIN"}, IsDbOwner: true},
|
||||
"bar_owner": {Origin: spec.RoleOriginManifest, Name: "bar_owner", Namespace: cl.Namespace, Password: "b123", Flags: []string{"LOGIN"}, IsDbOwner: true},
|
||||
"app_user": {Origin: spec.RoleOriginManifest, Name: "app_user", Namespace: cl.Namespace, Password: "a123", Flags: []string{"LOGIN"}, IsDbOwner: false},
|
||||
"cron_admin": {Origin: spec.RoleOriginSpilo, Name: "cron_admin", Namespace: cl.Namespace, MemberOf: []string{"foo_owner", "bar_owner"}},
|
||||
"part_man": {Origin: spec.RoleOriginSpilo, Name: "part_man", Namespace: cl.Namespace, MemberOf: []string{"foo_owner", "bar_owner"}},
|
||||
}
|
||||
|
||||
cl.Spec.Databases = map[string]string{"foo_db": "foo_owner", "bar_db": "bar_owner"}
|
||||
cl.Spec.Users = manifestUsers
|
||||
|
||||
// this should set IsDbOwner field for manifest users
|
||||
if err := cl.initRobotUsers(); err != nil {
|
||||
t.Errorf("%s could not init manifest users", testName)
|
||||
}
|
||||
|
||||
// update passwords to compare with result
|
||||
for manifestUser := range manifestUsers {
|
||||
pgUser := cl.pgUsers[manifestUser]
|
||||
pgUser.Password = manifestUser[0:1] + "123"
|
||||
cl.pgUsers[manifestUser] = pgUser
|
||||
}
|
||||
|
||||
cl.initAdditionalOwnerRoles()
|
||||
|
||||
for _, additionalOwnerRole := range cl.Config.OpConfig.AdditionalOwnerRoles {
|
||||
expectedPgUser := expectedUsers[additionalOwnerRole]
|
||||
existingPgUser, exists := cl.pgUsers[additionalOwnerRole]
|
||||
if !exists {
|
||||
t.Errorf("%s additional owner role %q not initilaized", testName, additionalOwnerRole)
|
||||
}
|
||||
if !util.IsEqualIgnoreOrder(expectedPgUser.MemberOf, existingPgUser.MemberOf) {
|
||||
t.Errorf("%s unexpected membership of additional owner role %q: expected member of %#v, got member of %#v",
|
||||
testName, additionalOwnerRole, expectedPgUser.MemberOf, existingPgUser.MemberOf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mockOAuthTokenGetter struct {
|
||||
}
|
||||
|
||||
|
|
@ -1004,6 +1055,336 @@ func TestCompareEnv(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func newService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service {
|
||||
svc := &v1.Service{
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: svcT,
|
||||
LoadBalancerSourceRanges: lbSr,
|
||||
},
|
||||
}
|
||||
svc.Annotations = ann
|
||||
return svc
|
||||
}
|
||||
|
||||
func TestCompareServices(t *testing.T) {
|
||||
testName := "TestCompareServices"
|
||||
cluster := Cluster{
|
||||
Config: Config{
|
||||
OpConfig: config.Config{
|
||||
Resources: config.Resources{
|
||||
IgnoredAnnotations: []string{
|
||||
"k8s.v1.cni.cncf.io/network-status",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
about string
|
||||
current *v1.Service
|
||||
new *v1.Service
|
||||
reason string
|
||||
match bool
|
||||
}{
|
||||
{
|
||||
about: "two equal services",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
about: "services differ on service type",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`,
|
||||
},
|
||||
{
|
||||
about: "services differ on lb source ranges",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"185.249.56.0/22"}),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange does not match the current one`,
|
||||
},
|
||||
{
|
||||
about: "new service doesn't have lb source ranges",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{}),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange does not match the current one`,
|
||||
},
|
||||
{
|
||||
about: "services differ on DNS annotation",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "new_clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: "external-dns.alpha.kubernetes.io/hostname" changed from "clstr.acid.zalan.do" to "new_clstr.acid.zalan.do".`,
|
||||
},
|
||||
{
|
||||
about: "services differ on AWS ELB annotation",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: "1800",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout" changed from "3600" to "1800".`,
|
||||
},
|
||||
{
|
||||
about: "service changes existing annotation",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "baz",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: "foo" changed from "bar" to "baz".`,
|
||||
},
|
||||
{
|
||||
about: "service changes multiple existing annotations",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
"bar": "foo",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "baz",
|
||||
"bar": "fooz",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations does not match the current one:`,
|
||||
},
|
||||
{
|
||||
about: "service adds a new custom annotation",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: Added "foo" with value "bar".`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: Removed "foo".`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation and adds a new one",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"bar": "foo",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: Removed "foo". Added "bar" with value "foo".`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation, adds a new one and change another",
|
||||
current: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
"zalan": "do",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"bar": "foo",
|
||||
"zalan": "do.com",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations does not match the current one: Removed "foo".`,
|
||||
},
|
||||
{
|
||||
about: "service add annotations",
|
||||
current: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations does not match the current one: Added `,
|
||||
},
|
||||
{
|
||||
about: "ignored annotations",
|
||||
current: newService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newService(
|
||||
map[string]string{
|
||||
"k8s.v1.cni.cncf.io/network-status": "up",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
match, reason := cluster.compareServices(tt.current, tt.new)
|
||||
if match && !tt.match {
|
||||
t.Logf("match=%v current=%v, old=%v reason=%s", match, tt.current.Annotations, tt.new.Annotations, reason)
|
||||
t.Errorf("%s - expected services to do not match: %q and %q", testName, tt.current, tt.new)
|
||||
return
|
||||
}
|
||||
if !match && tt.match {
|
||||
t.Errorf("%s - expected services to be the same: %q and %q", testName, tt.current, tt.new)
|
||||
return
|
||||
}
|
||||
if !match && !tt.match {
|
||||
if !strings.HasPrefix(reason, tt.reason) {
|
||||
t.Errorf("%s - expected reason prefix %s, found %s", testName, tt.reason, reason)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrossNamespacedSecrets(t *testing.T) {
|
||||
testName := "test secrets in different namespace"
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
|
|
@ -1041,7 +1422,7 @@ func TestCrossNamespacedSecrets(t *testing.T) {
|
|||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
NumberOfInstances: k8sutil.Int32ToPointer(1),
|
||||
},
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
Resources: config.Resources{
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/r3labs/diff"
|
||||
|
|
@ -211,9 +210,10 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
connectionPoolerSpec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
|
||||
resources, err := generateResourceRequirements(
|
||||
resources, err := c.generateResourceRequirements(
|
||||
connectionPoolerSpec.Resources,
|
||||
makeDefaultConnectionPoolerResources(&c.OpConfig))
|
||||
makeDefaultConnectionPoolerResources(&c.OpConfig),
|
||||
connectionPoolerContainer)
|
||||
|
||||
effectiveDockerImage := util.Coalesce(
|
||||
connectionPoolerSpec.DockerImage,
|
||||
|
|
@ -247,7 +247,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
},
|
||||
{
|
||||
Name: "PGPORT",
|
||||
Value: c.servicePort(role),
|
||||
Value: fmt.Sprint(c.servicePort(role)),
|
||||
},
|
||||
{
|
||||
Name: "PGUSER",
|
||||
|
|
@ -344,7 +344,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
|
|||
}
|
||||
|
||||
if *numberOfInstances < constants.ConnectionPoolerMinInstances {
|
||||
msg := "Adjusted number of connection pooler instances from %d to %d"
|
||||
msg := "adjusted number of connection pooler instances from %d to %d"
|
||||
c.logger.Warningf(msg, *numberOfInstances, constants.ConnectionPoolerMinInstances)
|
||||
|
||||
*numberOfInstances = constants.ConnectionPoolerMinInstances
|
||||
|
|
@ -386,7 +386,7 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
|
|||
{
|
||||
Name: connectionPooler.Name,
|
||||
Port: pgPort,
|
||||
TargetPort: intstr.IntOrString{StrVal: c.servicePort(connectionPooler.Role)},
|
||||
TargetPort: intstr.IntOrString{IntVal: c.servicePort(connectionPooler.Role)},
|
||||
},
|
||||
},
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
|
|
@ -395,6 +395,10 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
|
|||
},
|
||||
}
|
||||
|
||||
if c.shouldCreateLoadBalancerForPoolerService(connectionPooler.Role, spec) {
|
||||
c.configureLoadBalanceService(&serviceSpec, spec.AllowedSourceRanges)
|
||||
}
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: connectionPooler.Name,
|
||||
|
|
@ -415,6 +419,29 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
|
|||
return service
|
||||
}
|
||||
|
||||
func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, spec *acidv1.PostgresSpec) bool {
|
||||
|
||||
switch role {
|
||||
|
||||
case Replica:
|
||||
// if the value is explicitly set in a Postgresql manifest, follow this setting
|
||||
if spec.EnableReplicaPoolerLoadBalancer != nil {
|
||||
return *spec.EnableReplicaPoolerLoadBalancer
|
||||
}
|
||||
// otherwise, follow the operator configuration
|
||||
return c.OpConfig.EnableReplicaPoolerLoadBalancer
|
||||
|
||||
case Master:
|
||||
if spec.EnableMasterPoolerLoadBalancer != nil {
|
||||
return *spec.EnableMasterPoolerLoadBalancer
|
||||
}
|
||||
return c.OpConfig.EnableMasterPoolerLoadBalancer
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown role %v", role))
|
||||
}
|
||||
}
|
||||
|
||||
//delete connection pooler
|
||||
func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
|
||||
c.logger.Infof("deleting connection pooler spilo-role=%s", role)
|
||||
|
|
@ -587,7 +614,7 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.
|
|||
*deployment.Spec.Replicas != *config.NumberOfInstances {
|
||||
|
||||
sync = true
|
||||
msg := fmt.Sprintf("NumberOfInstances is different (having %d, required %d)",
|
||||
msg := fmt.Sprintf("numberOfInstances is different (having %d, required %d)",
|
||||
*deployment.Spec.Replicas, *config.NumberOfInstances)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
|
@ -596,13 +623,14 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.
|
|||
poolerContainer.Image != config.Image {
|
||||
|
||||
sync = true
|
||||
msg := fmt.Sprintf("DockerImage is different (having %s, required %s)",
|
||||
msg := fmt.Sprintf("dockerImage is different (having %s, required %s)",
|
||||
poolerContainer.Image, config.Image)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
||||
expectedResources, err := generateResourceRequirements(spec.Resources,
|
||||
makeDefaultConnectionPoolerResources(&Config.OpConfig))
|
||||
expectedResources, err := c.generateResourceRequirements(spec.Resources,
|
||||
makeDefaultConnectionPoolerResources(&Config.OpConfig),
|
||||
connectionPoolerContainer)
|
||||
|
||||
// An error to generate expected resources means something is not quite
|
||||
// right, but for the purpose of robustness do not panic here, just report
|
||||
|
|
@ -610,7 +638,7 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.
|
|||
// updates for new resource values).
|
||||
if err == nil && syncResources(&poolerContainer.Resources, expectedResources) {
|
||||
sync = true
|
||||
msg := fmt.Sprintf("Resources are different (having %+v, required %+v)",
|
||||
msg := fmt.Sprintf("resources are different (having %+v, required %+v)",
|
||||
poolerContainer.Resources, expectedResources)
|
||||
reasons = append(reasons, msg)
|
||||
}
|
||||
|
|
@ -695,29 +723,6 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
|
|||
var err error
|
||||
var connectionPoolerNeeded bool
|
||||
|
||||
needSync := !reflect.DeepEqual(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler)
|
||||
masterChanges, err := diff.Diff(oldSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableConnectionPooler)
|
||||
if err != nil {
|
||||
c.logger.Error("Error in getting diff of master connection pooler changes")
|
||||
}
|
||||
replicaChanges, err := diff.Diff(oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler)
|
||||
if err != nil {
|
||||
c.logger.Error("Error in getting diff of replica connection pooler changes")
|
||||
}
|
||||
|
||||
// skip pooler sync when theres no diff or it's deactivated
|
||||
// but, handling the case when connectionPooler is not there but it is required
|
||||
// as per spec, hence do not skip syncing in that case, even though there
|
||||
// is no diff in specs
|
||||
if (!needSync && len(masterChanges) <= 0 && len(replicaChanges) <= 0) &&
|
||||
((!needConnectionPooler(&newSpec.Spec) && (c.ConnectionPooler == nil || !needConnectionPooler(&oldSpec.Spec))) ||
|
||||
(c.ConnectionPooler != nil && needConnectionPooler(&newSpec.Spec) &&
|
||||
((c.ConnectionPooler[Master] != nil && c.ConnectionPooler[Master].LookupFunction) ||
|
||||
(c.ConnectionPooler[Replica] != nil && c.ConnectionPooler[Replica].LookupFunction)))) {
|
||||
c.logger.Debugln("syncing pooler is not required")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logPoolerEssentials(c.logger, oldSpec, newSpec)
|
||||
|
||||
// Check and perform the sync requirements for each of the roles.
|
||||
|
|
@ -754,7 +759,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
|
|||
// in between
|
||||
|
||||
// in this case also do not forget to install lookup function
|
||||
if !c.ConnectionPooler[role].LookupFunction {
|
||||
// skip installation in standby clusters, since they are read-only
|
||||
if !c.ConnectionPooler[role].LookupFunction && c.Spec.StandbyCluster == nil {
|
||||
connectionPooler := c.Spec.ConnectionPooler
|
||||
specSchema := ""
|
||||
specUser := ""
|
||||
|
|
@ -811,31 +817,37 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
|
|||
func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql, role PostgresRole) (
|
||||
SyncReason, error) {
|
||||
|
||||
deployment, err := c.KubeClient.
|
||||
var (
|
||||
deployment *appsv1.Deployment
|
||||
newDeployment *appsv1.Deployment
|
||||
service *v1.Service
|
||||
newService *v1.Service
|
||||
err error
|
||||
)
|
||||
|
||||
syncReason := make([]string, 0)
|
||||
deployment, err = c.KubeClient.
|
||||
Deployments(c.Namespace).
|
||||
Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{})
|
||||
|
||||
if err != nil && k8sutil.ResourceNotFound(err) {
|
||||
msg := "deployment %s for connection pooler synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connectionPoolerName(role))
|
||||
c.logger.Warningf("deployment %s for connection pooler synchronization is not found, create it", c.connectionPoolerName(role))
|
||||
|
||||
deploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role])
|
||||
newDeployment, err = c.generateConnectionPoolerDeployment(c.ConnectionPooler[role])
|
||||
if err != nil {
|
||||
msg = "could not generate deployment for connection pooler: %v"
|
||||
return NoSync, fmt.Errorf(msg, err)
|
||||
return NoSync, fmt.Errorf("could not generate deployment for connection pooler: %v", err)
|
||||
}
|
||||
|
||||
deployment, err := c.KubeClient.
|
||||
Deployments(deploymentSpec.Namespace).
|
||||
Create(context.TODO(), deploymentSpec, metav1.CreateOptions{})
|
||||
deployment, err = c.KubeClient.
|
||||
Deployments(newDeployment.Namespace).
|
||||
Create(context.TODO(), newDeployment, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return NoSync, err
|
||||
}
|
||||
c.ConnectionPooler[role].Deployment = deployment
|
||||
} else if err != nil {
|
||||
msg := "could not get connection pooler deployment to sync: %v"
|
||||
return NoSync, fmt.Errorf(msg, err)
|
||||
return NoSync, fmt.Errorf("could not get connection pooler deployment to sync: %v", err)
|
||||
} else {
|
||||
c.ConnectionPooler[role].Deployment = deployment
|
||||
// actual synchronization
|
||||
|
|
@ -865,25 +877,24 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
|
||||
if oldSpec != nil {
|
||||
specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger)
|
||||
syncReason = append(syncReason, specReason...)
|
||||
}
|
||||
|
||||
defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment)
|
||||
reason := append(specReason, defaultsReason...)
|
||||
syncReason = append(syncReason, defaultsReason...)
|
||||
|
||||
if specSync || defaultsSync {
|
||||
c.logger.Infof("Update connection pooler deployment %s, reason: %+v",
|
||||
c.connectionPoolerName(role), reason)
|
||||
newDeploymentSpec, err := c.generateConnectionPoolerDeployment(c.ConnectionPooler[role])
|
||||
c.logger.Infof("update connection pooler deployment %s, reason: %+v",
|
||||
c.connectionPoolerName(role), syncReason)
|
||||
newDeployment, err = c.generateConnectionPoolerDeployment(c.ConnectionPooler[role])
|
||||
if err != nil {
|
||||
msg := "could not generate deployment for connection pooler: %v"
|
||||
return reason, fmt.Errorf(msg, err)
|
||||
return syncReason, fmt.Errorf("could not generate deployment for connection pooler: %v", err)
|
||||
}
|
||||
|
||||
deployment, err := updateConnectionPoolerDeployment(c.KubeClient,
|
||||
newDeploymentSpec)
|
||||
deployment, err = updateConnectionPoolerDeployment(c.KubeClient, newDeployment)
|
||||
|
||||
if err != nil {
|
||||
return reason, err
|
||||
return syncReason, err
|
||||
}
|
||||
c.ConnectionPooler[role].Deployment = deployment
|
||||
}
|
||||
|
|
@ -898,31 +909,38 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
c.ConnectionPooler[role].Deployment = deployment
|
||||
}
|
||||
|
||||
service, err := c.KubeClient.
|
||||
Services(c.Namespace).
|
||||
Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{})
|
||||
|
||||
if err != nil && k8sutil.ResourceNotFound(err) {
|
||||
msg := "Service %s for connection pooler synchronization is not found, create it"
|
||||
c.logger.Warningf(msg, c.connectionPoolerName(role))
|
||||
|
||||
serviceSpec := c.generateConnectionPoolerService(c.ConnectionPooler[role])
|
||||
service, err := c.KubeClient.
|
||||
Services(serviceSpec.Namespace).
|
||||
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return NoSync, err
|
||||
if service, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}); err == nil {
|
||||
c.ConnectionPooler[role].Service = service
|
||||
desiredSvc := c.generateConnectionPoolerService(c.ConnectionPooler[role])
|
||||
if match, reason := c.compareServices(service, desiredSvc); !match {
|
||||
syncReason = append(syncReason, reason)
|
||||
c.logServiceChanges(role, service, desiredSvc, false, reason)
|
||||
newService, err = c.updateService(role, service, desiredSvc)
|
||||
if err != nil {
|
||||
return syncReason, fmt.Errorf("could not update %s service to match desired state: %v", role, err)
|
||||
}
|
||||
c.ConnectionPooler[role].Service = newService
|
||||
c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta))
|
||||
}
|
||||
c.ConnectionPooler[role].Service = service
|
||||
|
||||
} else if err != nil {
|
||||
msg := "could not get connection pooler service to sync: %v"
|
||||
return NoSync, fmt.Errorf(msg, err)
|
||||
} else {
|
||||
// Service updates are not supported and probably not that useful anyway
|
||||
c.ConnectionPooler[role].Service = service
|
||||
return NoSync, nil
|
||||
}
|
||||
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
return NoSync, fmt.Errorf("could not get connection pooler service to sync: %v", err)
|
||||
}
|
||||
|
||||
c.ConnectionPooler[role].Service = nil
|
||||
c.logger.Warningf("service %s for connection pooler synchronization is not found, create it", c.connectionPoolerName(role))
|
||||
|
||||
serviceSpec := c.generateConnectionPoolerService(c.ConnectionPooler[role])
|
||||
newService, err = c.KubeClient.
|
||||
Services(serviceSpec.Namespace).
|
||||
Create(context.TODO(), serviceSpec, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return NoSync, err
|
||||
}
|
||||
c.ConnectionPooler[role].Service = newService
|
||||
|
||||
return NoSync, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,10 +27,6 @@ func boolToPointer(value bool) *bool {
|
|||
return &value
|
||||
}
|
||||
|
||||
func int32ToPointer(value int32) *int32 {
|
||||
return &value
|
||||
}
|
||||
|
||||
func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error {
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
|
||||
|
|
@ -294,7 +290,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) {
|
|||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
NumberOfInstances: k8sutil.Int32ToPointer(1),
|
||||
},
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
Resources: config.Resources{
|
||||
|
|
@ -401,7 +397,7 @@ func TestConnectionPoolerSync(t *testing.T) {
|
|||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
NumberOfInstances: k8sutil.Int32ToPointer(1),
|
||||
},
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
Resources: config.Resources{
|
||||
|
|
@ -639,7 +635,7 @@ func TestConnectionPoolerSync(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
tt.cluster.OpConfig.ConnectionPooler.Image = tt.defaultImage
|
||||
tt.cluster.OpConfig.ConnectionPooler.NumberOfInstances =
|
||||
int32ToPointer(tt.defaultInstances)
|
||||
k8sutil.Int32ToPointer(tt.defaultInstances)
|
||||
|
||||
t.Logf("running test for %s [%s]", testName, tt.subTest)
|
||||
|
||||
|
|
@ -664,7 +660,7 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
|
|||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
MaxDBConnections: int32ToPointer(60),
|
||||
MaxDBConnections: k8sutil.Int32ToPointer(60),
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
|
|
|
|||
|
|
@ -37,6 +37,8 @@ const (
|
|||
patroniPGBinariesParameterName = "bin_dir"
|
||||
patroniPGHBAConfParameterName = "pg_hba"
|
||||
localHost = "127.0.0.1/32"
|
||||
scalyrSidecarName = "scalyr-sidecar"
|
||||
logicalBackupContainerName = "logical-backup"
|
||||
connectionPoolerContainer = "connection-pooler"
|
||||
pgPort = 5432
|
||||
)
|
||||
|
|
@ -102,24 +104,22 @@ func (c *Cluster) serviceAddress(role PostgresRole) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (c *Cluster) servicePort(role PostgresRole) string {
|
||||
func (c *Cluster) servicePort(role PostgresRole) int32 {
|
||||
service, exist := c.Services[role]
|
||||
|
||||
if exist {
|
||||
return fmt.Sprint(service.Spec.Ports[0].Port)
|
||||
return service.Spec.Ports[0].Port
|
||||
}
|
||||
|
||||
c.logger.Warningf("No service for role %s", role)
|
||||
return ""
|
||||
c.logger.Warningf("No service for role %s - defaulting to port 5432", role)
|
||||
return pgPort
|
||||
}
|
||||
|
||||
func (c *Cluster) podDisruptionBudgetName() string {
|
||||
return c.OpConfig.PDBNameFormat.Format("cluster", c.Name)
|
||||
}
|
||||
|
||||
func (c *Cluster) makeDefaultResources() acidv1.Resources {
|
||||
|
||||
config := c.OpConfig
|
||||
func makeDefaultResources(config *config.Config) acidv1.Resources {
|
||||
|
||||
defaultRequests := acidv1.ResourceDescription{
|
||||
CPU: config.Resources.DefaultCPURequest,
|
||||
|
|
@ -136,25 +136,61 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
|
|||
}
|
||||
}
|
||||
|
||||
func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
|
||||
var err error
|
||||
func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) error {
|
||||
var (
|
||||
isSmaller bool
|
||||
err error
|
||||
msg string
|
||||
)
|
||||
|
||||
specRequests := resources.ResourceRequests
|
||||
specLimits := resources.ResourceLimits
|
||||
|
||||
result := v1.ResourceRequirements{}
|
||||
|
||||
result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequests)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fill resource requests: %v", err)
|
||||
// setting limits too low can cause unnecessary evictions / OOM kills
|
||||
cpuLimit := resources.Limits[v1.ResourceCPU]
|
||||
minCPULimit := c.OpConfig.MinCPULimit
|
||||
if minCPULimit != "" {
|
||||
isSmaller, err = util.IsSmallerQuantity(cpuLimit.String(), minCPULimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare defined CPU limit %s for %q container with configured minimum value %s: %v",
|
||||
cpuLimit.String(), constants.PostgresContainerName, minCPULimit, err)
|
||||
}
|
||||
if isSmaller {
|
||||
msg = fmt.Sprintf("defined CPU limit %s for %q container is below required minimum %s and will be increased",
|
||||
cpuLimit.String(), constants.PostgresContainerName, minCPULimit)
|
||||
c.logger.Warningf(msg)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
|
||||
resources.Limits[v1.ResourceCPU], _ = resource.ParseQuantity(minCPULimit)
|
||||
}
|
||||
}
|
||||
|
||||
result.Limits, err = fillResourceList(specLimits, defaultResources.ResourceLimits)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fill resource limits: %v", err)
|
||||
memoryLimit := resources.Limits[v1.ResourceMemory]
|
||||
minMemoryLimit := c.OpConfig.MinMemoryLimit
|
||||
if minMemoryLimit != "" {
|
||||
isSmaller, err = util.IsSmallerQuantity(memoryLimit.String(), minMemoryLimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare defined memory limit %s for %q container with configured minimum value %s: %v",
|
||||
memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit, err)
|
||||
}
|
||||
if isSmaller {
|
||||
msg = fmt.Sprintf("defined memory limit %s for %q container is below required minimum %s and will be increased",
|
||||
memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit)
|
||||
c.logger.Warningf(msg)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
|
||||
resources.Limits[v1.ResourceMemory], _ = resource.ParseQuantity(minMemoryLimit)
|
||||
}
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func setMemoryRequestToLimit(resources *v1.ResourceRequirements, containerName string, logger *logrus.Entry) {
|
||||
|
||||
requests := resources.Requests[v1.ResourceMemory]
|
||||
limits := resources.Limits[v1.ResourceMemory]
|
||||
isSmaller := requests.Cmp(limits) == -1
|
||||
if isSmaller {
|
||||
logger.Warningf("memory request of %s for %q container is increased to match memory limit of %s",
|
||||
requests.String(), containerName, limits.String())
|
||||
resources.Requests[v1.ResourceMemory] = limits
|
||||
}
|
||||
}
|
||||
|
||||
func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) {
|
||||
|
|
@ -187,6 +223,44 @@ func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceD
|
|||
return requests, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) generateResourceRequirements(
|
||||
resources *acidv1.Resources,
|
||||
defaultResources acidv1.Resources,
|
||||
containerName string) (*v1.ResourceRequirements, error) {
|
||||
var err error
|
||||
specRequests := acidv1.ResourceDescription{}
|
||||
specLimits := acidv1.ResourceDescription{}
|
||||
result := v1.ResourceRequirements{}
|
||||
|
||||
if resources != nil {
|
||||
specRequests = resources.ResourceRequests
|
||||
specLimits = resources.ResourceLimits
|
||||
}
|
||||
|
||||
result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequests)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fill resource requests: %v", err)
|
||||
}
|
||||
|
||||
result.Limits, err = fillResourceList(specLimits, defaultResources.ResourceLimits)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fill resource limits: %v", err)
|
||||
}
|
||||
|
||||
// enforce minimum cpu and memory limits for Postgres containers only
|
||||
if containerName == constants.PostgresContainerName {
|
||||
if err = c.enforceMinResourceLimits(&result); err != nil {
|
||||
return nil, fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if c.OpConfig.SetMemoryRequestToLimit {
|
||||
setMemoryRequestToLimit(&result, containerName, c.logger)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, EnablePgVersionEnvVar bool, logger *logrus.Entry) (string, error) {
|
||||
config := spiloConfiguration{}
|
||||
|
||||
|
|
@ -507,22 +581,20 @@ func generateContainer(
|
|||
}
|
||||
}
|
||||
|
||||
func generateSidecarContainers(sidecars []acidv1.Sidecar,
|
||||
defaultResources acidv1.Resources, startIndex int, logger *logrus.Entry) ([]v1.Container, error) {
|
||||
func (c *Cluster) generateSidecarContainers(sidecars []acidv1.Sidecar,
|
||||
defaultResources acidv1.Resources, startIndex int) ([]v1.Container, error) {
|
||||
|
||||
if len(sidecars) > 0 {
|
||||
result := make([]v1.Container, 0)
|
||||
for index, sidecar := range sidecars {
|
||||
var resourcesSpec acidv1.Resources
|
||||
if sidecar.Resources == nil {
|
||||
resourcesSpec = acidv1.Resources{}
|
||||
} else {
|
||||
sidecar.Resources.DeepCopyInto(&resourcesSpec)
|
||||
}
|
||||
|
||||
resources, err := generateResourceRequirements(
|
||||
makeResources(
|
||||
sidecar.Resources.ResourceRequests.CPU,
|
||||
sidecar.Resources.ResourceRequests.Memory,
|
||||
sidecar.Resources.ResourceLimits.CPU,
|
||||
sidecar.Resources.ResourceLimits.Memory,
|
||||
),
|
||||
defaultResources,
|
||||
)
|
||||
resources, err := c.generateResourceRequirements(&resourcesSpec, defaultResources, sidecar.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -997,63 +1069,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
additionalVolumes = spec.AdditionalVolumes
|
||||
)
|
||||
|
||||
// Improve me. Please.
|
||||
if c.OpConfig.SetMemoryRequestToLimit {
|
||||
|
||||
// controller adjusts the default memory request at operator startup
|
||||
|
||||
request := spec.Resources.ResourceRequests.Memory
|
||||
if request == "" {
|
||||
request = c.OpConfig.Resources.DefaultMemoryRequest
|
||||
}
|
||||
|
||||
limit := spec.Resources.ResourceLimits.Memory
|
||||
if limit == "" {
|
||||
limit = c.OpConfig.Resources.DefaultMemoryLimit
|
||||
}
|
||||
|
||||
isSmaller, err := util.IsSmallerQuantity(request, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isSmaller {
|
||||
c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit)
|
||||
spec.Resources.ResourceRequests.Memory = limit
|
||||
|
||||
}
|
||||
|
||||
// controller adjusts the Scalyr sidecar request at operator startup
|
||||
// as this sidecar is managed separately
|
||||
|
||||
// adjust sidecar containers defined for that particular cluster
|
||||
for _, sidecar := range spec.Sidecars {
|
||||
|
||||
// TODO #413
|
||||
sidecarRequest := sidecar.Resources.ResourceRequests.Memory
|
||||
if request == "" {
|
||||
request = c.OpConfig.Resources.DefaultMemoryRequest
|
||||
}
|
||||
|
||||
sidecarLimit := sidecar.Resources.ResourceLimits.Memory
|
||||
if limit == "" {
|
||||
limit = c.OpConfig.Resources.DefaultMemoryLimit
|
||||
}
|
||||
|
||||
isSmaller, err := util.IsSmallerQuantity(sidecarRequest, sidecarLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isSmaller {
|
||||
c.logger.Warningf("The memory request of %v for the %v sidecar container is increased to match the memory limit of %v.", sidecar.Resources.ResourceRequests.Memory, sidecar.Name, sidecar.Resources.ResourceLimits.Memory)
|
||||
sidecar.Resources.ResourceRequests.Memory = sidecar.Resources.ResourceLimits.Memory
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
defaultResources := c.makeDefaultResources()
|
||||
|
||||
resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources)
|
||||
defaultResources := makeDefaultResources(&c.OpConfig)
|
||||
resourceRequirements, err := c.generateResourceRequirements(
|
||||
spec.Resources, defaultResources, constants.PostgresContainerName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||
}
|
||||
|
|
@ -1101,7 +1119,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
// backward compatible check for InitContainers
|
||||
if spec.InitContainersOld != nil {
|
||||
msg := "Manifest parameter init_containers is deprecated."
|
||||
msg := "manifest parameter init_containers is deprecated."
|
||||
if spec.InitContainers == nil {
|
||||
c.logger.Warningf("%s Consider using initContainers instead.", msg)
|
||||
spec.InitContainers = spec.InitContainersOld
|
||||
|
|
@ -1112,7 +1130,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
|
||||
// backward compatible check for PodPriorityClassName
|
||||
if spec.PodPriorityClassNameOld != "" {
|
||||
msg := "Manifest parameter pod_priority_class_name is deprecated."
|
||||
msg := "manifest parameter pod_priority_class_name is deprecated."
|
||||
if spec.PodPriorityClassName == "" {
|
||||
c.logger.Warningf("%s Consider using podPriorityClassName instead.", msg)
|
||||
spec.PodPriorityClassName = spec.PodPriorityClassNameOld
|
||||
|
|
@ -1230,7 +1248,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail")
|
||||
}
|
||||
|
||||
if clusterSpecificSidecars, err = generateSidecarContainers(spec.Sidecars, defaultResources, 0, c.logger); err != nil {
|
||||
if clusterSpecificSidecars, err = c.generateSidecarContainers(spec.Sidecars, defaultResources, 0); err != nil {
|
||||
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1241,7 +1259,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
for name, dockerImage := range c.OpConfig.SidecarImages {
|
||||
globalSidecarsByDockerImage = append(globalSidecarsByDockerImage, acidv1.Sidecar{Name: name, DockerImage: dockerImage})
|
||||
}
|
||||
if globalSidecarContainersByDockerImage, err = generateSidecarContainers(globalSidecarsByDockerImage, defaultResources, len(clusterSpecificSidecars), c.logger); err != nil {
|
||||
if globalSidecarContainersByDockerImage, err = c.generateSidecarContainers(globalSidecarsByDockerImage, defaultResources, len(clusterSpecificSidecars)); err != nil {
|
||||
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
|
||||
}
|
||||
// make the resulting list reproducible
|
||||
|
|
@ -1254,7 +1272,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
// generate scalyr sidecar container
|
||||
var scalyrSidecars []v1.Container
|
||||
if scalyrSidecar, err :=
|
||||
generateScalyrSidecarSpec(c.Name,
|
||||
c.generateScalyrSidecarSpec(c.Name,
|
||||
c.OpConfig.ScalyrAPIKey,
|
||||
c.OpConfig.ScalyrServerURL,
|
||||
c.OpConfig.ScalyrImage,
|
||||
|
|
@ -1262,8 +1280,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
c.OpConfig.ScalyrMemoryRequest,
|
||||
c.OpConfig.ScalyrCPULimit,
|
||||
c.OpConfig.ScalyrMemoryLimit,
|
||||
defaultResources,
|
||||
c.logger); err != nil {
|
||||
defaultResources); err != nil {
|
||||
return nil, fmt.Errorf("could not generate Scalyr sidecar: %v", err)
|
||||
} else {
|
||||
if scalyrSidecar != nil {
|
||||
|
|
@ -1373,12 +1390,12 @@ func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]s
|
|||
return annotations
|
||||
}
|
||||
|
||||
func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string,
|
||||
func (c *Cluster) generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string,
|
||||
scalyrCPURequest string, scalyrMemoryRequest string, scalyrCPULimit string, scalyrMemoryLimit string,
|
||||
defaultResources acidv1.Resources, logger *logrus.Entry) (*v1.Container, error) {
|
||||
defaultResources acidv1.Resources) (*v1.Container, error) {
|
||||
if APIKey == "" || dockerImage == "" {
|
||||
if APIKey == "" && dockerImage != "" {
|
||||
logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined")
|
||||
c.logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -1388,7 +1405,8 @@ func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage strin
|
|||
scalyrCPULimit,
|
||||
scalyrMemoryLimit,
|
||||
)
|
||||
resourceRequirementsScalyrSidecar, err := generateResourceRequirements(resourcesScalyrSidecar, defaultResources)
|
||||
resourceRequirementsScalyrSidecar, err := c.generateResourceRequirements(
|
||||
&resourcesScalyrSidecar, defaultResources, scalyrSidecarName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid resources for Scalyr sidecar: %v", err)
|
||||
}
|
||||
|
|
@ -1406,7 +1424,7 @@ func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage strin
|
|||
env = append(env, v1.EnvVar{Name: "SCALYR_SERVER_URL", Value: serverURL})
|
||||
}
|
||||
return &v1.Container{
|
||||
Name: "scalyr-sidecar",
|
||||
Name: scalyrSidecarName,
|
||||
Image: dockerImage,
|
||||
Env: env,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
|
|
@ -1505,13 +1523,13 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
|
|||
mountPaths := map[string]acidv1.AdditionalVolume{}
|
||||
for i, additionalVolume := range additionalVolumes {
|
||||
if previousVolume, exist := mountPaths[additionalVolume.MountPath]; exist {
|
||||
msg := "Volume %+v cannot be mounted to the same path as %+v"
|
||||
msg := "volume %+v cannot be mounted to the same path as %+v"
|
||||
c.logger.Warningf(msg, additionalVolume, previousVolume)
|
||||
continue
|
||||
}
|
||||
|
||||
if additionalVolume.MountPath == constants.PostgresDataMount {
|
||||
msg := "Cannot mount volume on postgresql data directory, %+v"
|
||||
msg := "cannot mount volume on postgresql data directory, %+v"
|
||||
c.logger.Warningf(msg, additionalVolume)
|
||||
continue
|
||||
}
|
||||
|
|
@ -1524,7 +1542,7 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
|
|||
|
||||
for _, target := range additionalVolume.TargetContainers {
|
||||
if target == "all" && len(additionalVolume.TargetContainers) != 1 {
|
||||
msg := `Target containers could be either "all" or a list
|
||||
msg := `target containers could be either "all" or a list
|
||||
of containers, mixing those is not allowed, %+v`
|
||||
c.logger.Warningf(msg, additionalVolume)
|
||||
continue
|
||||
|
|
@ -1623,7 +1641,7 @@ func (c *Cluster) generateUserSecrets() map[string]*v1.Secret {
|
|||
func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) *v1.Secret {
|
||||
//Skip users with no password i.e. human users (they'll be authenticated using pam)
|
||||
if pgUser.Password == "" {
|
||||
if pgUser.Origin != spec.RoleOriginTeamsAPI {
|
||||
if pgUser.Origin != spec.RoleOriginTeamsAPI && pgUser.Origin != spec.RoleOriginSpilo {
|
||||
c.logger.Warningf("could not generate secret for a non-teamsAPI role %q: role has no password",
|
||||
pgUser.Name)
|
||||
}
|
||||
|
|
@ -1700,23 +1718,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
}
|
||||
|
||||
if c.shouldCreateLoadBalancerForService(role, spec) {
|
||||
|
||||
// spec.AllowedSourceRanges evaluates to the empty slice of zero length
|
||||
// when omitted or set to 'null'/empty sequence in the PG manifest
|
||||
if len(spec.AllowedSourceRanges) > 0 {
|
||||
serviceSpec.LoadBalancerSourceRanges = spec.AllowedSourceRanges
|
||||
} else {
|
||||
// safe default value: lock a load balancer only to the local address unless overridden explicitly
|
||||
serviceSpec.LoadBalancerSourceRanges = []string{localHost}
|
||||
}
|
||||
|
||||
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
|
||||
serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy)
|
||||
serviceSpec.Type = v1.ServiceTypeLoadBalancer
|
||||
} else if role == Replica {
|
||||
// before PR #258, the replica service was only created if allocated a LB
|
||||
// now we always create the service but warn if the LB is absent
|
||||
c.logger.Debugf("No load balancer created for the replica service")
|
||||
c.configureLoadBalanceService(&serviceSpec, spec.AllowedSourceRanges)
|
||||
}
|
||||
|
||||
service := &v1.Service{
|
||||
|
|
@ -1732,6 +1734,21 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
return service
|
||||
}
|
||||
|
||||
func (c *Cluster) configureLoadBalanceService(serviceSpec *v1.ServiceSpec, sourceRanges []string) {
|
||||
// spec.AllowedSourceRanges evaluates to the empty slice of zero length
|
||||
// when omitted or set to 'null'/empty sequence in the PG manifest
|
||||
if len(sourceRanges) > 0 {
|
||||
serviceSpec.LoadBalancerSourceRanges = sourceRanges
|
||||
} else {
|
||||
// safe default value: lock a load balancer only to the local address unless overridden explicitly
|
||||
serviceSpec.LoadBalancerSourceRanges = []string{localHost}
|
||||
}
|
||||
|
||||
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
|
||||
serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy)
|
||||
serviceSpec.Type = v1.ServiceTypeLoadBalancer
|
||||
}
|
||||
|
||||
func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
|
||||
|
|
@ -1815,11 +1832,11 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
|
|||
})
|
||||
} else {
|
||||
// cloning with S3, find out the bucket to clone
|
||||
msg := "Clone from S3 bucket"
|
||||
msg := "clone from S3 bucket"
|
||||
c.logger.Info(msg, description.S3WalPath)
|
||||
|
||||
if description.S3WalPath == "" {
|
||||
msg := "Figure out which S3 bucket to use from env"
|
||||
msg := "figure out which S3 bucket to use from env"
|
||||
c.logger.Info(msg, description.S3WalPath)
|
||||
|
||||
if c.OpConfig.WALES3Bucket != "" {
|
||||
|
|
@ -1863,7 +1880,7 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
|
|||
|
||||
result = append(result, envs...)
|
||||
} else {
|
||||
msg := "Use custom parsed S3WalPath %s from the manifest"
|
||||
msg := "use custom parsed S3WalPath %s from the manifest"
|
||||
c.logger.Warningf(msg, description.S3WalPath)
|
||||
|
||||
result = append(result, v1.EnvVar{
|
||||
|
|
@ -2000,15 +2017,15 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
c.logger.Debug("Generating logical backup pod template")
|
||||
|
||||
// allocate for the backup pod the same amount of resources as for normal DB pods
|
||||
defaultResources := c.makeDefaultResources()
|
||||
resourceRequirements, err = generateResourceRequirements(c.Spec.Resources, defaultResources)
|
||||
resourceRequirements, err = c.generateResourceRequirements(
|
||||
c.Spec.Resources, makeDefaultResources(&c.OpConfig), logicalBackupContainerName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate resource requirements for logical backup pods: %v", err)
|
||||
}
|
||||
|
||||
envVars := c.generateLogicalBackupPodEnvVars()
|
||||
logicalBackupContainer := generateContainer(
|
||||
"logical-backup",
|
||||
logicalBackupContainerName,
|
||||
&c.OpConfig.LogicalBackup.LogicalBackupDockerImage,
|
||||
resourceRequirements,
|
||||
envVars,
|
||||
|
|
@ -2145,6 +2162,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
|
|||
Name: "LOGICAL_BACKUP_S3_SSE",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupS3SSE,
|
||||
},
|
||||
{
|
||||
Name: "LOGICAL_BACKUP_S3_RETENTION_TIME",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupS3RetentionTime,
|
||||
},
|
||||
{
|
||||
Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX",
|
||||
Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())),
|
||||
|
|
|
|||
|
|
@ -27,8 +27,10 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
func newFakeK8sTestClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||
|
|
@ -991,7 +993,7 @@ func TestNodeAffinity(t *testing.T) {
|
|||
makeSpec := func(nodeAffinity *v1.NodeAffinity) acidv1.PostgresSpec {
|
||||
return acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
|
|
@ -1087,7 +1089,7 @@ func TestTLS(t *testing.T) {
|
|||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
|
|
@ -1206,7 +1208,7 @@ func TestAdditionalVolume(t *testing.T) {
|
|||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
|
|
@ -1312,7 +1314,7 @@ func TestSidecars(t *testing.T) {
|
|||
},
|
||||
},
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
|
|
@ -1325,7 +1327,7 @@ func TestSidecars(t *testing.T) {
|
|||
},
|
||||
acidv1.Sidecar{
|
||||
Name: "cluster-specific-sidecar-with-resources",
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"},
|
||||
},
|
||||
|
|
@ -1487,7 +1489,7 @@ func TestGenerateService(t *testing.T) {
|
|||
var enableLB bool = true
|
||||
spec = acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
|
|
@ -1500,7 +1502,7 @@ func TestGenerateService(t *testing.T) {
|
|||
},
|
||||
acidv1.Sidecar{
|
||||
Name: "cluster-specific-sidecar-with-resources",
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"},
|
||||
},
|
||||
|
|
@ -1561,6 +1563,501 @@ func TestGenerateService(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func newLBFakeClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||
clientSet := fake.NewSimpleClientset()
|
||||
|
||||
return k8sutil.KubernetesClient{
|
||||
DeploymentsGetter: clientSet.AppsV1(),
|
||||
ServicesGetter: clientSet.CoreV1(),
|
||||
}, clientSet
|
||||
}
|
||||
|
||||
func getServices(serviceType v1.ServiceType, sourceRanges []string, extTrafficPolicy, clusterName string) []v1.ServiceSpec {
|
||||
return []v1.ServiceSpec{
|
||||
v1.ServiceSpec{
|
||||
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
|
||||
LoadBalancerSourceRanges: sourceRanges,
|
||||
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
|
||||
Type: serviceType,
|
||||
},
|
||||
v1.ServiceSpec{
|
||||
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
|
||||
LoadBalancerSourceRanges: sourceRanges,
|
||||
Ports: []v1.ServicePort{{Name: clusterName + "-pooler", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
|
||||
Selector: map[string]string{"connection-pooler": clusterName + "-pooler"},
|
||||
Type: serviceType,
|
||||
},
|
||||
v1.ServiceSpec{
|
||||
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
|
||||
LoadBalancerSourceRanges: sourceRanges,
|
||||
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
|
||||
Selector: map[string]string{"spilo-role": "replica", "application": "spilo", "cluster-name": clusterName},
|
||||
Type: serviceType,
|
||||
},
|
||||
v1.ServiceSpec{
|
||||
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
|
||||
LoadBalancerSourceRanges: sourceRanges,
|
||||
Ports: []v1.ServicePort{{Name: clusterName + "-pooler-repl", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
|
||||
Selector: map[string]string{"connection-pooler": clusterName + "-pooler-repl"},
|
||||
Type: serviceType,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnableLoadBalancers(t *testing.T) {
|
||||
testName := "Test enabling LoadBalancers"
|
||||
client, _ := newLBFakeClient()
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
clusterNameLabel := "cluster-name"
|
||||
roleLabel := "spilo-role"
|
||||
roles := []PostgresRole{Master, Replica}
|
||||
sourceRanges := []string{"192.186.1.2/22"}
|
||||
extTrafficPolicy := "Cluster"
|
||||
|
||||
tests := []struct {
|
||||
subTest string
|
||||
config config.Config
|
||||
pgSpec acidv1.Postgresql
|
||||
expectedServices []v1.ServiceSpec
|
||||
}{
|
||||
{
|
||||
subTest: "LBs enabled in config, disabled in manifest",
|
||||
config: config.Config{
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: k8sutil.Int32ToPointer(1),
|
||||
},
|
||||
EnableMasterLoadBalancer: true,
|
||||
EnableMasterPoolerLoadBalancer: true,
|
||||
EnableReplicaLoadBalancer: true,
|
||||
EnableReplicaPoolerLoadBalancer: true,
|
||||
ExternalTrafficPolicy: extTrafficPolicy,
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: clusterNameLabel,
|
||||
PodRoleLabel: roleLabel,
|
||||
},
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
AllowedSourceRanges: sourceRanges,
|
||||
EnableConnectionPooler: util.True(),
|
||||
EnableReplicaConnectionPooler: util.True(),
|
||||
EnableMasterLoadBalancer: util.False(),
|
||||
EnableMasterPoolerLoadBalancer: util.False(),
|
||||
EnableReplicaLoadBalancer: util.False(),
|
||||
EnableReplicaPoolerLoadBalancer: util.False(),
|
||||
NumberOfInstances: 1,
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedServices: getServices(v1.ServiceTypeClusterIP, nil, "", clusterName),
|
||||
},
|
||||
{
|
||||
subTest: "LBs enabled in manifest, disabled in config",
|
||||
config: config.Config{
|
||||
ConnectionPooler: config.ConnectionPooler{
|
||||
ConnectionPoolerDefaultCPURequest: "100m",
|
||||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: k8sutil.Int32ToPointer(1),
|
||||
},
|
||||
EnableMasterLoadBalancer: false,
|
||||
EnableMasterPoolerLoadBalancer: false,
|
||||
EnableReplicaLoadBalancer: false,
|
||||
EnableReplicaPoolerLoadBalancer: false,
|
||||
ExternalTrafficPolicy: extTrafficPolicy,
|
||||
Resources: config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: clusterNameLabel,
|
||||
PodRoleLabel: roleLabel,
|
||||
},
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
AllowedSourceRanges: sourceRanges,
|
||||
EnableConnectionPooler: util.True(),
|
||||
EnableReplicaConnectionPooler: util.True(),
|
||||
EnableMasterLoadBalancer: util.True(),
|
||||
EnableMasterPoolerLoadBalancer: util.True(),
|
||||
EnableReplicaLoadBalancer: util.True(),
|
||||
EnableReplicaPoolerLoadBalancer: util.True(),
|
||||
NumberOfInstances: 1,
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedServices: getServices(v1.ServiceTypeLoadBalancer, sourceRanges, extTrafficPolicy, clusterName),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: tt.config,
|
||||
}, client, tt.pgSpec, logger, eventRecorder)
|
||||
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{}
|
||||
generatedServices := make([]v1.ServiceSpec, 0)
|
||||
for _, role := range roles {
|
||||
cluster.syncService(role)
|
||||
cluster.ConnectionPooler[role] = &ConnectionPoolerObjects{
|
||||
Name: cluster.connectionPoolerName(role),
|
||||
ClusterName: cluster.ClusterName,
|
||||
Namespace: cluster.Namespace,
|
||||
Role: role,
|
||||
}
|
||||
cluster.syncConnectionPoolerWorker(&tt.pgSpec, &tt.pgSpec, role)
|
||||
generatedServices = append(generatedServices, cluster.Services[role].Spec)
|
||||
generatedServices = append(generatedServices, cluster.ConnectionPooler[role].Service.Spec)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.expectedServices, generatedServices) {
|
||||
t.Errorf("%s %s: expected %#v but got %#v", testName, tt.subTest, tt.expectedServices, generatedServices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateResourceRequirements(t *testing.T) {
|
||||
testName := "TestGenerateResourceRequirements"
|
||||
client, _ := newFakeK8sTestClient()
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
clusterNameLabel := "cluster-name"
|
||||
roleLabel := "spilo-role"
|
||||
sidecarName := "postgres-exporter"
|
||||
|
||||
// two test cases will call enforceMinResourceLimits which emits 2 events per call
|
||||
// hence bufferSize of 4 is required
|
||||
newEventRecorder := record.NewFakeRecorder(4)
|
||||
|
||||
configResources := config.Resources{
|
||||
ClusterLabels: map[string]string{"application": "spilo"},
|
||||
ClusterNameLabel: clusterNameLabel,
|
||||
DefaultCPURequest: "100m",
|
||||
DefaultCPULimit: "1",
|
||||
DefaultMemoryRequest: "100Mi",
|
||||
DefaultMemoryLimit: "500Mi",
|
||||
MinCPULimit: "250m",
|
||||
MinMemoryLimit: "250Mi",
|
||||
PodRoleLabel: roleLabel,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
subTest string
|
||||
config config.Config
|
||||
pgSpec acidv1.Postgresql
|
||||
expectedResources acidv1.Resources
|
||||
}{
|
||||
{
|
||||
subTest: "test generation of default resources when empty in manifest",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test generation of default resources for sidecar",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
acidv1.Sidecar{
|
||||
Name: sidecarName,
|
||||
},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test generation of resources when only requests are defined in manifest",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "50Mi"},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "50Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test generation of resources when only memory is defined in manifest",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{Memory: "100Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{Memory: "1Gi"},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "1Gi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test SetMemoryRequestToLimit flag",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: true,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "500Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test SetMemoryRequestToLimit flag for sidecar container, too",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: true,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
acidv1.Sidecar{
|
||||
Name: sidecarName,
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "100Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test generating resources from manifest",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "250Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "400m", Memory: "800Mi"},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "250Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "400m", Memory: "800Mi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test enforcing min cpu and memory limit",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "200m", Memory: "200Mi"},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "250m", Memory: "250Mi"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "test min cpu and memory limit are not enforced on sidecar",
|
||||
config: config.Config{
|
||||
Resources: configResources,
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
SetMemoryRequestToLimit: false,
|
||||
},
|
||||
pgSpec: acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
acidv1.Sidecar{
|
||||
Name: sidecarName,
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: tt.config,
|
||||
}, client, tt.pgSpec, logger, newEventRecorder)
|
||||
|
||||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
_, err := cluster.createStatefulSet()
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
err = cluster.syncStatefulSet()
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
containers := cluster.Statefulset.Spec.Template.Spec.Containers
|
||||
clusterResources, err := parseResourceRequirements(containers[0].Resources)
|
||||
if len(containers) > 1 {
|
||||
clusterResources, err = parseResourceRequirements(containers[1].Resources)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
if !reflect.DeepEqual(tt.expectedResources, clusterResources) {
|
||||
t.Errorf("%s - %s: expected %#v but got %#v", testName, tt.subTest, tt.expectedResources, clusterResources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateCapabilities(t *testing.T) {
|
||||
|
||||
testName := "TestGenerateCapabilities"
|
||||
|
|
@ -1614,7 +2111,7 @@ func TestVolumeSelector(t *testing.T) {
|
|||
return acidv1.PostgresSpec{
|
||||
TeamID: "myapp",
|
||||
NumberOfInstances: 0,
|
||||
Resources: acidv1.Resources{
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error {
|
|||
return fmt.Errorf("could not form patch for pod's rolling update flag: %v", err)
|
||||
}
|
||||
|
||||
err = retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
err = retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
|
||||
func() (bool, error) {
|
||||
_, err2 := c.KubeClient.Pods(pod.Namespace).Patch(
|
||||
context.TODO(),
|
||||
|
|
@ -356,7 +356,7 @@ func (c *Cluster) getPatroniConfig(pod *v1.Pod) (acidv1.Patroni, map[string]stri
|
|||
pgParameters map[string]string
|
||||
)
|
||||
podName := util.NameFromMeta(pod.ObjectMeta)
|
||||
err := retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
err := retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
|
||||
func() (bool, error) {
|
||||
var err error
|
||||
patroniConfig, pgParameters, err = c.patroni.GetConfig(pod)
|
||||
|
|
@ -377,7 +377,7 @@ func (c *Cluster) getPatroniConfig(pod *v1.Pod) (acidv1.Patroni, map[string]stri
|
|||
|
||||
func (c *Cluster) getPatroniMemberData(pod *v1.Pod) (patroni.MemberData, error) {
|
||||
var memberData patroni.MemberData
|
||||
err := retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
err := retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
|
||||
func() (bool, error) {
|
||||
var err error
|
||||
memberData, err = c.patroni.GetMemberData(pod)
|
||||
|
|
@ -403,7 +403,7 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
|
|||
defer c.unregisterPodSubscriber(podName)
|
||||
stopChan := make(chan struct{})
|
||||
|
||||
err := retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
err := retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
|
||||
func() (bool, error) {
|
||||
err2 := c.KubeClient.Pods(podName.Namespace).Delete(
|
||||
context.TODO(),
|
||||
|
|
@ -492,7 +492,7 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
|
|||
candidates := make([]patroni.ClusterMember, 0)
|
||||
syncCandidates := make([]patroni.ClusterMember, 0)
|
||||
|
||||
err := retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
err := retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
|
||||
func() (bool, error) {
|
||||
var err error
|
||||
members, err = c.patroni.GetClusterMembers(master)
|
||||
|
|
|
|||
|
|
@ -6,11 +6,13 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/zalando/postgres-operator/mocks"
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
||||
)
|
||||
|
|
@ -22,7 +24,13 @@ func TestGetSwitchoverCandidate(t *testing.T) {
|
|||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
var cluster = New(Config{}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
var cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
PatroniAPICheckInterval: time.Duration(1),
|
||||
PatroniAPICheckTimeout: time.Duration(5),
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
||||
// simulate different member scenarios
|
||||
tests := []struct {
|
||||
|
|
|
|||
|
|
@ -275,7 +275,7 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) {
|
|||
return service, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error {
|
||||
func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newService *v1.Service) (*v1.Service, error) {
|
||||
var (
|
||||
svc *v1.Service
|
||||
err error
|
||||
|
|
@ -283,11 +283,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
|
||||
c.setProcessName("updating %v service", role)
|
||||
|
||||
if c.Services[role] == nil {
|
||||
return fmt.Errorf("there is no service in the cluster")
|
||||
}
|
||||
|
||||
serviceName := util.NameFromMeta(c.Services[role].ObjectMeta)
|
||||
serviceName := util.NameFromMeta(oldService.ObjectMeta)
|
||||
|
||||
// update the service annotation in order to propagate ELB notation.
|
||||
if len(newService.ObjectMeta.Annotations) > 0 {
|
||||
|
|
@ -301,39 +297,38 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
"")
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not replace annotations for the service %q: %v", serviceName, err)
|
||||
return nil, fmt.Errorf("could not replace annotations for the service %q: %v", serviceName, err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("could not form patch for the service metadata: %v", err)
|
||||
return nil, fmt.Errorf("could not form patch for the service metadata: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// now, patch the service spec, but when disabling LoadBalancers do update instead
|
||||
// patch does not work because of LoadBalancerSourceRanges field (even if set to nil)
|
||||
oldServiceType := c.Services[role].Spec.Type
|
||||
oldServiceType := oldService.Spec.Type
|
||||
newServiceType := newService.Spec.Type
|
||||
if newServiceType == "ClusterIP" && newServiceType != oldServiceType {
|
||||
newService.ResourceVersion = c.Services[role].ResourceVersion
|
||||
newService.Spec.ClusterIP = c.Services[role].Spec.ClusterIP
|
||||
newService.ResourceVersion = oldService.ResourceVersion
|
||||
newService.Spec.ClusterIP = oldService.Spec.ClusterIP
|
||||
svc, err = c.KubeClient.Services(serviceName.Namespace).Update(context.TODO(), newService, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not update service %q: %v", serviceName, err)
|
||||
return nil, fmt.Errorf("could not update service %q: %v", serviceName, err)
|
||||
}
|
||||
} else {
|
||||
patchData, err := specPatch(newService.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err)
|
||||
return nil, fmt.Errorf("could not form patch for the service %q: %v", serviceName, err)
|
||||
}
|
||||
|
||||
svc, err = c.KubeClient.Services(serviceName.Namespace).Patch(
|
||||
context.TODO(), serviceName.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch service %q: %v", serviceName, err)
|
||||
return nil, fmt.Errorf("could not patch service %q: %v", serviceName, err)
|
||||
}
|
||||
}
|
||||
c.Services[role] = svc
|
||||
|
||||
return nil
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deleteService(role PostgresRole) error {
|
||||
|
|
|
|||
|
|
@ -189,7 +189,7 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName, idColumn string) zalandov1.EventStreamSource {
|
||||
func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, idColumn *string) zalandov1.EventStreamSource {
|
||||
table, schema := getTableSchema(tableName)
|
||||
streamFilter := stream.Filter[tableName]
|
||||
return zalandov1.EventStreamSource{
|
||||
|
|
@ -204,7 +204,7 @@ func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName, idColumn
|
|||
}
|
||||
}
|
||||
|
||||
func getEventStreamFlow(stream acidv1.Stream, payloadColumn string) zalandov1.EventStreamFlow {
|
||||
func getEventStreamFlow(stream acidv1.Stream, payloadColumn *string) zalandov1.EventStreamFlow {
|
||||
return zalandov1.EventStreamFlow{
|
||||
Type: constants.EventStreamFlowPgGenericType,
|
||||
PayloadColumn: payloadColumn,
|
||||
|
|
@ -230,7 +230,7 @@ func getTableSchema(fullTableName string) (tableName, schemaName string) {
|
|||
return tableName, schemaName
|
||||
}
|
||||
|
||||
func getOutboxTable(tableName, idColumn string) zalandov1.EventStreamTable {
|
||||
func getOutboxTable(tableName string, idColumn *string) zalandov1.EventStreamTable {
|
||||
return zalandov1.EventStreamTable{
|
||||
Name: tableName,
|
||||
IDColumn: idColumn,
|
||||
|
|
@ -347,8 +347,8 @@ func (c *Cluster) createOrUpdateStreams() error {
|
|||
c.logger.Infof("event stream %q has been successfully created", fesName)
|
||||
} else {
|
||||
desiredStreams := c.generateFabricEventStream(appId)
|
||||
if !reflect.DeepEqual(effectiveStreams.Spec, desiredStreams.Spec) {
|
||||
c.logger.Debug("updating event streams")
|
||||
if match, reason := sameStreams(effectiveStreams.Spec.EventStreams, desiredStreams.Spec.EventStreams); !match {
|
||||
c.logger.Debugf("updating event streams: %s", reason)
|
||||
desiredStreams.ObjectMeta.ResourceVersion = effectiveStreams.ObjectMeta.ResourceVersion
|
||||
err = c.updateStreams(desiredStreams)
|
||||
if err != nil {
|
||||
|
|
@ -361,3 +361,27 @@ func (c *Cluster) createOrUpdateStreams() error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sameStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (match bool, reason string) {
|
||||
if len(newEventStreams) != len(curEventStreams) {
|
||||
return false, "number of defined streams is different"
|
||||
}
|
||||
|
||||
for _, newStream := range newEventStreams {
|
||||
match = false
|
||||
reason = "event stream specs differ"
|
||||
for _, curStream := range curEventStreams {
|
||||
if reflect.DeepEqual(newStream.EventStreamSource, curStream.EventStreamSource) &&
|
||||
reflect.DeepEqual(newStream.EventStreamFlow, curStream.EventStreamFlow) &&
|
||||
reflect.DeepEqual(newStream.EventStreamSink, curStream.EventStreamSink) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return false, reason
|
||||
}
|
||||
}
|
||||
|
||||
return true, ""
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
v1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||
zalandov1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||
fakezalandov1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
|
|
@ -63,15 +63,18 @@ var (
|
|||
Database: "foo",
|
||||
Tables: map[string]acidv1.StreamTable{
|
||||
"data.bar": acidv1.StreamTable{
|
||||
EventType: "stream_type_a",
|
||||
IdColumn: "b_id",
|
||||
PayloadColumn: "b_payload",
|
||||
EventType: "stream-type-a",
|
||||
IdColumn: k8sutil.StringToPointer("b_id"),
|
||||
PayloadColumn: k8sutil.StringToPointer("b_payload"),
|
||||
},
|
||||
"data.foobar": acidv1.StreamTable{
|
||||
EventType: "stream-type-b",
|
||||
},
|
||||
},
|
||||
Filter: map[string]string{
|
||||
"data.bar": "[?(@.source.txId > 500 && @.source.lsn > 123456)]",
|
||||
Filter: map[string]*string{
|
||||
"data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"),
|
||||
},
|
||||
BatchSize: uint32(100),
|
||||
BatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||
},
|
||||
},
|
||||
Volume: acidv1.Volume{
|
||||
|
|
@ -80,7 +83,7 @@ var (
|
|||
},
|
||||
}
|
||||
|
||||
fes = &v1.FabricEventStream{
|
||||
fes = &zalandov1.FabricEventStream{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: constants.EventStreamCRDApiVersion,
|
||||
Kind: constants.EventStreamCRDKind,
|
||||
|
|
@ -97,23 +100,23 @@ var (
|
|||
},
|
||||
},
|
||||
},
|
||||
Spec: v1.FabricEventStreamSpec{
|
||||
Spec: zalandov1.FabricEventStreamSpec{
|
||||
ApplicationId: appId,
|
||||
EventStreams: []v1.EventStream{
|
||||
{
|
||||
EventStreamFlow: v1.EventStreamFlow{
|
||||
PayloadColumn: "b_payload",
|
||||
EventStreams: []zalandov1.EventStream{
|
||||
zalandov1.EventStream{
|
||||
EventStreamFlow: zalandov1.EventStreamFlow{
|
||||
PayloadColumn: k8sutil.StringToPointer("b_payload"),
|
||||
Type: constants.EventStreamFlowPgGenericType,
|
||||
},
|
||||
EventStreamSink: v1.EventStreamSink{
|
||||
EventType: "stream_type_a",
|
||||
MaxBatchSize: uint32(100),
|
||||
EventStreamSink: zalandov1.EventStreamSink{
|
||||
EventType: "stream-type-a",
|
||||
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||
Type: constants.EventStreamSinkNakadiType,
|
||||
},
|
||||
EventStreamSource: v1.EventStreamSource{
|
||||
Filter: "[?(@.source.txId > 500 && @.source.lsn > 123456)]",
|
||||
Connection: v1.Connection{
|
||||
DBAuth: v1.DBAuth{
|
||||
EventStreamSource: zalandov1.EventStreamSource{
|
||||
Filter: k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"),
|
||||
Connection: zalandov1.Connection{
|
||||
DBAuth: zalandov1.DBAuth{
|
||||
Name: fmt.Sprintf("fes-user.%s.credentials.postgresql.acid.zalan.do", clusterName),
|
||||
PasswordKey: "password",
|
||||
Type: constants.EventStreamSourceAuthType,
|
||||
|
|
@ -124,13 +127,41 @@ var (
|
|||
PluginType: constants.EventStreamSourcePluginType,
|
||||
},
|
||||
Schema: "data",
|
||||
EventStreamTable: v1.EventStreamTable{
|
||||
IDColumn: "b_id",
|
||||
EventStreamTable: zalandov1.EventStreamTable{
|
||||
IDColumn: k8sutil.StringToPointer("b_id"),
|
||||
Name: "bar",
|
||||
},
|
||||
Type: constants.EventStreamSourcePGType,
|
||||
},
|
||||
},
|
||||
zalandov1.EventStream{
|
||||
EventStreamFlow: zalandov1.EventStreamFlow{
|
||||
Type: constants.EventStreamFlowPgGenericType,
|
||||
},
|
||||
EventStreamSink: zalandov1.EventStreamSink{
|
||||
EventType: "stream-type-b",
|
||||
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||
Type: constants.EventStreamSinkNakadiType,
|
||||
},
|
||||
EventStreamSource: zalandov1.EventStreamSource{
|
||||
Connection: zalandov1.Connection{
|
||||
DBAuth: zalandov1.DBAuth{
|
||||
Name: fmt.Sprintf("fes-user.%s.credentials.postgresql.acid.zalan.do", clusterName),
|
||||
PasswordKey: "password",
|
||||
Type: constants.EventStreamSourceAuthType,
|
||||
UserKey: "username",
|
||||
},
|
||||
Url: fmt.Sprintf("jdbc:postgresql://%s.%s/foo?user=%s&ssl=true&sslmode=require", clusterName, namespace, fesUser),
|
||||
SlotName: slotName,
|
||||
PluginType: constants.EventStreamSourcePluginType,
|
||||
},
|
||||
Schema: "data",
|
||||
EventStreamTable: zalandov1.EventStreamTable{
|
||||
Name: "foobar",
|
||||
},
|
||||
Type: constants.EventStreamSourcePGType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -161,23 +192,116 @@ func TestGenerateFabricEventStream(t *testing.T) {
|
|||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
|
||||
// create statefulset to have ownerReference for streams
|
||||
_, err := cluster.createStatefulSet()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create the streams
|
||||
err = cluster.createOrUpdateStreams()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// compare generated stream with expected stream
|
||||
result := cluster.generateFabricEventStream(appId)
|
||||
|
||||
if !reflect.DeepEqual(result, fes) {
|
||||
t.Errorf("Malformed FabricEventStream, expected %#v, got %#v", fes, result)
|
||||
if match, _ := sameStreams(result.Spec.EventStreams, fes.Spec.EventStreams); !match {
|
||||
t.Errorf("malformed FabricEventStream, expected %#v, got %#v", fes, result)
|
||||
}
|
||||
|
||||
// compare stream resturned from API with expected stream
|
||||
streamCRD, err := cluster.KubeClient.FabricEventStreams(namespace).Get(context.TODO(), fesName, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
if match, _ := sameStreams(streamCRD.Spec.EventStreams, fes.Spec.EventStreams); !match {
|
||||
t.Errorf("malformed FabricEventStream returned from API, expected %#v, got %#v", fes, streamCRD)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(streamCRD, fes) {
|
||||
t.Errorf("Malformed FabricEventStream, expected %#v, got %#v", fes, streamCRD)
|
||||
// sync streams once again
|
||||
err = cluster.createOrUpdateStreams()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// compare stream resturned from API with generated stream
|
||||
streamCRD, err = cluster.KubeClient.FabricEventStreams(namespace).Get(context.TODO(), fesName, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
if match, _ := sameStreams(streamCRD.Spec.EventStreams, result.Spec.EventStreams); !match {
|
||||
t.Errorf("returned FabricEventStream differs from generated one, expected %#v, got %#v", result, streamCRD)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSameStreams(t *testing.T) {
|
||||
testName := "TestSameStreams"
|
||||
|
||||
stream1 := zalandov1.EventStream{
|
||||
EventStreamFlow: zalandov1.EventStreamFlow{},
|
||||
EventStreamSink: zalandov1.EventStreamSink{
|
||||
EventType: "stream-type-a",
|
||||
},
|
||||
EventStreamSource: zalandov1.EventStreamSource{
|
||||
EventStreamTable: zalandov1.EventStreamTable{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
stream2 := zalandov1.EventStream{
|
||||
EventStreamFlow: zalandov1.EventStreamFlow{},
|
||||
EventStreamSink: zalandov1.EventStreamSink{
|
||||
EventType: "stream-type-b",
|
||||
},
|
||||
EventStreamSource: zalandov1.EventStreamSource{
|
||||
EventStreamTable: zalandov1.EventStreamTable{
|
||||
Name: "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
subTest string
|
||||
streamsA []zalandov1.EventStream
|
||||
streamsB []zalandov1.EventStream
|
||||
match bool
|
||||
reason string
|
||||
}{
|
||||
{
|
||||
subTest: "identical streams",
|
||||
streamsA: []zalandov1.EventStream{stream1, stream2},
|
||||
streamsB: []zalandov1.EventStream{stream1, stream2},
|
||||
match: true,
|
||||
reason: "",
|
||||
},
|
||||
{
|
||||
subTest: "same streams different order",
|
||||
streamsA: []zalandov1.EventStream{stream1, stream2},
|
||||
streamsB: []zalandov1.EventStream{stream2, stream1},
|
||||
match: true,
|
||||
reason: "",
|
||||
},
|
||||
{
|
||||
subTest: "same streams different order",
|
||||
streamsA: []zalandov1.EventStream{stream1},
|
||||
streamsB: []zalandov1.EventStream{stream1, stream2},
|
||||
match: false,
|
||||
reason: "number of defined streams is different",
|
||||
},
|
||||
{
|
||||
subTest: "different number of streams",
|
||||
streamsA: []zalandov1.EventStream{stream1},
|
||||
streamsB: []zalandov1.EventStream{stream1, stream2},
|
||||
match: false,
|
||||
reason: "number of defined streams is different",
|
||||
},
|
||||
{
|
||||
subTest: "event stream specs differ",
|
||||
streamsA: []zalandov1.EventStream{stream1, stream2},
|
||||
streamsB: fes.Spec.EventStreams,
|
||||
match: false,
|
||||
reason: "number of defined streams is different",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
streamsMatch, matchReason := sameStreams(tt.streamsA, tt.streamsB)
|
||||
if streamsMatch != tt.match {
|
||||
t.Errorf("%s %s: unexpected match result when comparing streams: got %s, epxected %s",
|
||||
testName, tt.subTest, matchReason, tt.reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -213,12 +337,12 @@ func TestUpdateFabricEventStream(t *testing.T) {
|
|||
Database: dbName,
|
||||
Tables: map[string]acidv1.StreamTable{
|
||||
"data.bar": acidv1.StreamTable{
|
||||
EventType: "stream_type_b",
|
||||
IdColumn: "b_id",
|
||||
PayloadColumn: "b_payload",
|
||||
EventType: "stream-type-c",
|
||||
IdColumn: k8sutil.StringToPointer("b_id"),
|
||||
PayloadColumn: k8sutil.StringToPointer("b_payload"),
|
||||
},
|
||||
},
|
||||
BatchSize: uint32(250),
|
||||
BatchSize: k8sutil.UInt32ToPointer(uint32(250)),
|
||||
},
|
||||
}
|
||||
patch, err := json.Marshal(struct {
|
||||
|
|
|
|||
|
|
@ -76,11 +76,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
|
||||
err = fmt.Errorf("could not enforce minimum resource limits: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Debug("syncing statefulsets")
|
||||
if err = c.syncStatefulSet(); err != nil {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
|
|
@ -172,11 +167,13 @@ func (c *Cluster) syncService(role PostgresRole) error {
|
|||
if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil {
|
||||
c.Services[role] = svc
|
||||
desiredSvc := c.generateService(role, &c.Spec)
|
||||
if match, reason := k8sutil.SameService(svc, desiredSvc); !match {
|
||||
if match, reason := c.compareServices(svc, desiredSvc); !match {
|
||||
c.logServiceChanges(role, svc, desiredSvc, false, reason)
|
||||
if err = c.updateService(role, desiredSvc); err != nil {
|
||||
updatedSvc, err := c.updateService(role, svc, desiredSvc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not update %s service to match desired state: %v", role, err)
|
||||
}
|
||||
c.Services[role] = updatedSvc
|
||||
c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta))
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -594,3 +594,15 @@ func trimCronjobName(name string) string {
|
|||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (acidv1.Resources, error) {
|
||||
var resources acidv1.Resources
|
||||
resourcesJSON, err := json.Marshal(resourcesRequirement)
|
||||
if err != nil {
|
||||
return acidv1.Resources{}, fmt.Errorf("could not marshal K8s resources requirements")
|
||||
}
|
||||
if err = json.Unmarshal(resourcesJSON, &resources); err != nil {
|
||||
return acidv1.Resources{}, fmt.Errorf("could not unmarshal K8s resources requirements into acidv1.Resources struct")
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func TestInheritedAnnotations(t *testing.T) {
|
|||
ConnectionPoolerDefaultCPULimit: "100m",
|
||||
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||
NumberOfInstances: int32ToPointer(1),
|
||||
NumberOfInstances: k8sutil.Int32ToPointer(1),
|
||||
},
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
Resources: config.Resources{
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
|
@ -24,10 +25,6 @@ func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, con
|
|||
return config, nil
|
||||
}
|
||||
|
||||
func int32ToPointer(value int32) *int32 {
|
||||
return &value
|
||||
}
|
||||
|
||||
// importConfigurationFromCRD is a transitional function that converts CRD configuration to the one based on the configmap
|
||||
func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigurationData) *config.Config {
|
||||
result := &config.Config{}
|
||||
|
|
@ -55,6 +52,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
// user config
|
||||
result.SuperUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.SuperUsername, "postgres")
|
||||
result.ReplicationUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.ReplicationUsername, "standby")
|
||||
result.AdditionalOwnerRoles = fromCRD.PostgresUsersConfiguration.AdditionalOwnerRoles
|
||||
result.EnablePasswordRotation = fromCRD.PostgresUsersConfiguration.EnablePasswordRotation
|
||||
result.PasswordRotationInterval = util.CoalesceUInt32(fromCRD.PostgresUsersConfiguration.PasswordRotationInterval, 90)
|
||||
result.PasswordRotationUserRetention = util.CoalesceUInt32(fromCRD.PostgresUsersConfiguration.DeepCopy().PasswordRotationUserRetention, 180)
|
||||
|
|
@ -110,6 +108,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels
|
||||
result.InheritedAnnotations = fromCRD.Kubernetes.InheritedAnnotations
|
||||
result.DownscalerAnnotations = fromCRD.Kubernetes.DownscalerAnnotations
|
||||
result.IgnoredAnnotations = fromCRD.Kubernetes.IgnoredAnnotations
|
||||
result.ClusterNameLabel = util.Coalesce(fromCRD.Kubernetes.ClusterNameLabel, "cluster-name")
|
||||
result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey
|
||||
result.DeleteAnnotationNameKey = fromCRD.Kubernetes.DeleteAnnotationNameKey
|
||||
|
|
@ -137,11 +136,15 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.PodDeletionWaitTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.PodDeletionWaitTimeout), "10m")
|
||||
result.ReadyWaitInterval = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ReadyWaitInterval), "4s")
|
||||
result.ReadyWaitTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ReadyWaitTimeout), "30s")
|
||||
result.PatroniAPICheckInterval = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.PatroniAPICheckInterval), "1s")
|
||||
result.PatroniAPICheckTimeout = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.PatroniAPICheckTimeout), "5s")
|
||||
|
||||
// load balancer config
|
||||
result.DbHostedZone = util.Coalesce(fromCRD.LoadBalancer.DbHostedZone, "db.example.com")
|
||||
result.EnableMasterLoadBalancer = fromCRD.LoadBalancer.EnableMasterLoadBalancer
|
||||
result.EnableMasterPoolerLoadBalancer = fromCRD.LoadBalancer.EnableMasterPoolerLoadBalancer
|
||||
result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer
|
||||
result.EnableReplicaPoolerLoadBalancer = fromCRD.LoadBalancer.EnableReplicaPoolerLoadBalancer
|
||||
result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations
|
||||
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
|
||||
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
|
||||
|
|
@ -170,6 +173,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID
|
||||
result.LogicalBackupS3SecretAccessKey = fromCRD.LogicalBackup.S3SecretAccessKey
|
||||
result.LogicalBackupS3SSE = fromCRD.LogicalBackup.S3SSE
|
||||
result.LogicalBackupS3RetentionTime = fromCRD.LogicalBackup.RetentionTime
|
||||
result.LogicalBackupGoogleApplicationCredentials = fromCRD.LogicalBackup.GoogleApplicationCredentials
|
||||
result.LogicalBackupJobPrefix = util.Coalesce(fromCRD.LogicalBackup.JobPrefix, "logical-backup-")
|
||||
|
||||
|
|
@ -186,7 +190,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole
|
||||
result.PamRoleName = util.Coalesce(fromCRD.TeamsAPI.PamRoleName, "zalandos")
|
||||
result.PamConfiguration = util.Coalesce(fromCRD.TeamsAPI.PamConfiguration, "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees")
|
||||
result.ProtectedRoles = util.CoalesceStrArr(fromCRD.TeamsAPI.ProtectedRoles, []string{"admin"})
|
||||
result.ProtectedRoles = util.CoalesceStrArr(fromCRD.TeamsAPI.ProtectedRoles, []string{"admin", "cron_admin"})
|
||||
result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams
|
||||
result.EnablePostgresTeamCRD = fromCRD.TeamsAPI.EnablePostgresTeamCRD
|
||||
result.EnablePostgresTeamCRDSuperusers = fromCRD.TeamsAPI.EnablePostgresTeamCRDSuperusers
|
||||
|
|
@ -211,11 +215,11 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
// so ensure default values here.
|
||||
result.ConnectionPooler.NumberOfInstances = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPooler.NumberOfInstances,
|
||||
int32ToPointer(2))
|
||||
k8sutil.Int32ToPointer(2))
|
||||
|
||||
result.ConnectionPooler.NumberOfInstances = util.MaxInt32(
|
||||
result.ConnectionPooler.NumberOfInstances,
|
||||
int32ToPointer(2))
|
||||
k8sutil.Int32ToPointer(2))
|
||||
|
||||
result.ConnectionPooler.Schema = util.Coalesce(
|
||||
fromCRD.ConnectionPooler.Schema,
|
||||
|
|
@ -226,7 +230,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
constants.ConnectionPoolerUserName)
|
||||
|
||||
if result.ConnectionPooler.User == result.SuperUsername {
|
||||
msg := "Connection pool user is not allowed to be the same as super user, username: %s"
|
||||
msg := "connection pool user is not allowed to be the same as super user, username: %s"
|
||||
panic(fmt.Errorf(msg, result.ConnectionPooler.User))
|
||||
}
|
||||
|
||||
|
|
@ -256,7 +260,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
|
||||
result.ConnectionPooler.MaxDBConnections = util.CoalesceInt32(
|
||||
fromCRD.ConnectionPooler.MaxDBConnections,
|
||||
int32ToPointer(constants.ConnectionPoolerMaxDBConnections))
|
||||
k8sutil.Int32ToPointer(constants.ConnectionPoolerMaxDBConnections))
|
||||
|
||||
return result
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ const (
|
|||
RoleOriginManifest
|
||||
RoleOriginInfrastructure
|
||||
RoleOriginTeamsAPI
|
||||
RoleOriginSpilo
|
||||
RoleOriginSystem
|
||||
RoleOriginBootstrap
|
||||
RoleConnectionPooler
|
||||
|
|
@ -118,6 +119,7 @@ type ControllerConfig struct {
|
|||
CRDReadyWaitTimeout time.Duration
|
||||
ConfigMapName NamespacedName
|
||||
Namespace string
|
||||
IgnoredAnnotations []string
|
||||
|
||||
EnableJsonLogging bool
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ type Resources struct {
|
|||
InheritedLabels []string `name:"inherited_labels" default:""`
|
||||
InheritedAnnotations []string `name:"inherited_annotations" default:""`
|
||||
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
||||
IgnoredAnnotations []string `name:"ignored_annotations"`
|
||||
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
||||
DeleteAnnotationDateKey string `name:"delete_annotation_date_key"`
|
||||
DeleteAnnotationNameKey string `name:"delete_annotation_name_key"`
|
||||
|
|
@ -101,6 +102,7 @@ type Auth struct {
|
|||
InfrastructureRolesDefs string `name:"infrastructure_roles_secrets"`
|
||||
SuperUsername string `name:"super_username" default:"postgres"`
|
||||
ReplicationUsername string `name:"replication_username" default:"standby"`
|
||||
AdditionalOwnerRoles []string `name:"additional_owner_roles" default:""`
|
||||
EnablePasswordRotation bool `name:"enable_password_rotation" default:"false"`
|
||||
PasswordRotationInterval uint32 `name:"password_rotation_interval" default:"90"`
|
||||
PasswordRotationUserRetention uint32 `name:"password_rotation_user_retention" default:"180"`
|
||||
|
|
@ -128,6 +130,7 @@ type LogicalBackup struct {
|
|||
LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""`
|
||||
LogicalBackupS3SecretAccessKey string `name:"logical_backup_s3_secret_access_key" default:""`
|
||||
LogicalBackupS3SSE string `name:"logical_backup_s3_sse" default:""`
|
||||
LogicalBackupS3RetentionTime string `name:"logical_backup_s3_retention_time" default:""`
|
||||
LogicalBackupGoogleApplicationCredentials string `name:"logical_backup_google_application_credentials" default:""`
|
||||
LogicalBackupJobPrefix string `name:"logical_backup_job_prefix" default:"logical-backup-"`
|
||||
}
|
||||
|
|
@ -189,7 +192,9 @@ type Config struct {
|
|||
EnablePostgresTeamCRD bool `name:"enable_postgres_team_crd" default:"false"`
|
||||
EnablePostgresTeamCRDSuperusers bool `name:"enable_postgres_team_crd_superusers" default:"false"`
|
||||
EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"`
|
||||
EnableMasterPoolerLoadBalancer bool `name:"enable_master_pooler_load_balancer" default:"false"`
|
||||
EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"`
|
||||
EnableReplicaPoolerLoadBalancer bool `name:"enable_replica_pooler_load_balancer" default:"false"`
|
||||
CustomServiceAnnotations map[string]string `name:"custom_service_annotations"`
|
||||
CustomPodAnnotations map[string]string `name:"custom_pod_annotations"`
|
||||
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`
|
||||
|
|
@ -210,7 +215,7 @@ type Config struct {
|
|||
TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"`
|
||||
ProtectedRoles []string `name:"protected_role_names" default:"admin"`
|
||||
ProtectedRoles []string `name:"protected_role_names" default:"admin,cron_admin"`
|
||||
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
|
||||
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
|
||||
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
|
||||
|
|
@ -221,6 +226,8 @@ type Config struct {
|
|||
MajorVersionUpgradeTeamAllowList []string `name:"major_version_upgrade_team_allow_list" default:""`
|
||||
MinimalMajorVersion string `name:"minimal_major_version" default:"9.6"`
|
||||
TargetMajorVersion string `name:"target_major_version" default:"14"`
|
||||
PatroniAPICheckInterval time.Duration `name:"patroni_api_check_interval" default:"1s"`
|
||||
PatroniAPICheckTimeout time.Duration `name:"patroni_api_check_timeout" default:"5s"`
|
||||
}
|
||||
|
||||
// MustMarshal marshals the config or panics
|
||||
|
|
@ -287,7 +294,7 @@ func validate(cfg *Config) (err error) {
|
|||
}
|
||||
|
||||
if cfg.ConnectionPooler.User == cfg.SuperUsername {
|
||||
msg := "Connection pool user is not allowed to be the same as super user, username: %s"
|
||||
msg := "connection pool user is not allowed to be the same as super user, username: %s"
|
||||
err = fmt.Errorf(msg, cfg.ConnectionPooler.User)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -37,6 +37,14 @@ func Int32ToPointer(value int32) *int32 {
|
|||
return &value
|
||||
}
|
||||
|
||||
func UInt32ToPointer(value uint32) *uint32 {
|
||||
return &value
|
||||
}
|
||||
|
||||
func StringToPointer(str string) *string {
|
||||
return &str
|
||||
}
|
||||
|
||||
// KubernetesClient describes getters for Kubernetes objects
|
||||
type KubernetesClient struct {
|
||||
corev1.SecretsGetter
|
||||
|
|
@ -205,57 +213,6 @@ func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.Namespaced
|
|||
return pg, nil
|
||||
}
|
||||
|
||||
// SameService compares the Services
|
||||
func SameService(cur, new *v1.Service) (match bool, reason string) {
|
||||
//TODO: improve comparison
|
||||
if cur.Spec.Type != new.Spec.Type {
|
||||
return false, fmt.Sprintf("new service's type %q does not match the current one %q",
|
||||
new.Spec.Type, cur.Spec.Type)
|
||||
}
|
||||
|
||||
oldSourceRanges := cur.Spec.LoadBalancerSourceRanges
|
||||
newSourceRanges := new.Spec.LoadBalancerSourceRanges
|
||||
|
||||
/* work around Kubernetes 1.6 serializing [] as nil. See https://github.com/kubernetes/kubernetes/issues/43203 */
|
||||
if (len(oldSourceRanges) != 0) || (len(newSourceRanges) != 0) {
|
||||
if !reflect.DeepEqual(oldSourceRanges, newSourceRanges) {
|
||||
return false, "new service's LoadBalancerSourceRange does not match the current one"
|
||||
}
|
||||
}
|
||||
|
||||
match = true
|
||||
|
||||
reasonPrefix := "new service's annotations does not match the current one:"
|
||||
for ann := range cur.Annotations {
|
||||
if _, ok := new.Annotations[ann]; !ok {
|
||||
match = false
|
||||
if len(reason) == 0 {
|
||||
reason = reasonPrefix
|
||||
}
|
||||
reason += fmt.Sprintf(" Removed '%s'.", ann)
|
||||
}
|
||||
}
|
||||
|
||||
for ann := range new.Annotations {
|
||||
v, ok := cur.Annotations[ann]
|
||||
if !ok {
|
||||
if len(reason) == 0 {
|
||||
reason = reasonPrefix
|
||||
}
|
||||
reason += fmt.Sprintf(" Added '%s' with value '%s'.", ann, new.Annotations[ann])
|
||||
match = false
|
||||
} else if v != new.Annotations[ann] {
|
||||
if len(reason) == 0 {
|
||||
reason = reasonPrefix
|
||||
}
|
||||
reason += fmt.Sprintf(" '%s' changed from '%s' to '%s'.", ann, v, new.Annotations[ann])
|
||||
match = false
|
||||
}
|
||||
}
|
||||
|
||||
return match, reason
|
||||
}
|
||||
|
||||
// SamePDB compares the PodDisruptionBudgets
|
||||
func SamePDB(cur, new *policybeta1.PodDisruptionBudget) (match bool, reason string) {
|
||||
//TODO: improve comparison
|
||||
|
|
|
|||
|
|
@ -1,311 +0,0 @@
|
|||
package k8sutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func newsService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service {
|
||||
svc := &v1.Service{
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: svcT,
|
||||
LoadBalancerSourceRanges: lbSr,
|
||||
},
|
||||
}
|
||||
svc.Annotations = ann
|
||||
return svc
|
||||
}
|
||||
|
||||
func TestSameService(t *testing.T) {
|
||||
tests := []struct {
|
||||
about string
|
||||
current *v1.Service
|
||||
new *v1.Service
|
||||
reason string
|
||||
match bool
|
||||
}{
|
||||
{
|
||||
about: "two equal services",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
about: "services differ on service type",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeClusterIP,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`,
|
||||
},
|
||||
{
|
||||
about: "services differ on lb source ranges",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"185.249.56.0/22"}),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange does not match the current one`,
|
||||
},
|
||||
{
|
||||
about: "new service doesn't have lb source ranges",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{}),
|
||||
match: false,
|
||||
reason: `new service's LoadBalancerSourceRange does not match the current one`,
|
||||
},
|
||||
{
|
||||
about: "services differ on DNS annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "new_clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: 'external-dns.alpha.kubernetes.io/hostname' changed from 'clstr.acid.zalan.do' to 'new_clstr.acid.zalan.do'.`,
|
||||
},
|
||||
{
|
||||
about: "services differ on AWS ELB annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: "1800",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: 'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout' changed from '3600' to '1800'.`,
|
||||
},
|
||||
{
|
||||
about: "service changes existing annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "baz",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: 'foo' changed from 'bar' to 'baz'.`,
|
||||
},
|
||||
{
|
||||
about: "service changes multiple existing annotations",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
"bar": "foo",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "baz",
|
||||
"bar": "fooz",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations does not match the current one:`,
|
||||
},
|
||||
{
|
||||
about: "service adds a new custom annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: Added 'foo' with value 'bar'.`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: Removed 'foo'.`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation and adds a new one",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"bar": "foo",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
reason: `new service's annotations does not match the current one: Removed 'foo'. Added 'bar' with value 'foo'.`,
|
||||
},
|
||||
{
|
||||
about: "service removes a custom annotation, adds a new one and change another",
|
||||
current: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"foo": "bar",
|
||||
"zalan": "do",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
"bar": "foo",
|
||||
"zalan": "do.com",
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations does not match the current one: Removed 'foo'.`,
|
||||
},
|
||||
{
|
||||
about: "service add annotations",
|
||||
current: newsService(
|
||||
map[string]string{},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
new: newsService(
|
||||
map[string]string{
|
||||
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
|
||||
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
|
||||
},
|
||||
v1.ServiceTypeLoadBalancer,
|
||||
[]string{"128.141.0.0/16", "137.138.0.0/16"}),
|
||||
match: false,
|
||||
// Test just the prefix to avoid flakiness and map sorting
|
||||
reason: `new service's annotations does not match the current one: Added `,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
match, reason := SameService(tt.current, tt.new)
|
||||
if match && !tt.match {
|
||||
t.Errorf("expected services to do not match: '%q' and '%q'", tt.current, tt.new)
|
||||
return
|
||||
}
|
||||
if !match && tt.match {
|
||||
t.Errorf("expected services to be the same: '%q' and '%q'", tt.current, tt.new)
|
||||
return
|
||||
}
|
||||
if !match && !tt.match {
|
||||
if !strings.HasPrefix(reason, tt.reason) {
|
||||
t.Errorf("expected reason prefix '%s', found '%s'", tt.reason, reason)
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -53,25 +53,31 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM
|
|||
}
|
||||
} else {
|
||||
r := spec.PgSyncUserRequest{}
|
||||
r.User = dbUser
|
||||
newMD5Password := util.NewEncryptor(strategy.PasswordEncryption).PGUserPassword(newUser)
|
||||
|
||||
if dbUser.Password != newMD5Password {
|
||||
r.User.Password = newMD5Password
|
||||
r.Kind = spec.PGsyncUserAlter
|
||||
// do not compare for roles coming from docker image
|
||||
if newUser.Origin != spec.RoleOriginSpilo {
|
||||
if dbUser.Password != newMD5Password {
|
||||
r.User.Password = newMD5Password
|
||||
r.Kind = spec.PGsyncUserAlter
|
||||
}
|
||||
if addNewFlags, equal := util.SubstractStringSlices(newUser.Flags, dbUser.Flags); !equal {
|
||||
r.User.Flags = addNewFlags
|
||||
r.Kind = spec.PGsyncUserAlter
|
||||
}
|
||||
}
|
||||
if addNewRoles, equal := util.SubstractStringSlices(newUser.MemberOf, dbUser.MemberOf); !equal {
|
||||
r.User.MemberOf = addNewRoles
|
||||
r.Kind = spec.PGsyncUserAlter
|
||||
}
|
||||
if addNewFlags, equal := util.SubstractStringSlices(newUser.Flags, dbUser.Flags); !equal {
|
||||
r.User.Flags = addNewFlags
|
||||
r.Kind = spec.PGsyncUserAlter
|
||||
}
|
||||
if r.Kind == spec.PGsyncUserAlter {
|
||||
r.User.Name = newUser.Name
|
||||
reqs = append(reqs, r)
|
||||
}
|
||||
if len(newUser.Parameters) > 0 && !reflect.DeepEqual(dbUser.Parameters, newUser.Parameters) {
|
||||
if newUser.Origin != spec.RoleOriginSpilo &&
|
||||
len(newUser.Parameters) > 0 &&
|
||||
!reflect.DeepEqual(dbUser.Parameters, newUser.Parameters) {
|
||||
reqs = append(reqs, spec.PgSyncUserRequest{Kind: spec.PGSyncAlterSet, User: newUser})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -126,6 +126,7 @@ edit
|
|||
if (i.metadata.selfLink) { delete i.metadata.selfLink }
|
||||
if (i.metadata.uid) { delete i.metadata.uid }
|
||||
if (i.metadata.resourceVersion) { delete i.metadata.resourceVersion }
|
||||
if (i.metadata.managedFields) { delete i.metadata.managedFields }
|
||||
|
||||
this.update()
|
||||
this.refs.yamlNice.innerHTML = yamlParser.safeDump(i.postgresql, {sortKeys: true})
|
||||
|
|
@ -138,7 +139,15 @@ edit
|
|||
o.spec.enableMasterLoadBalancer = i.spec.enableMasterLoadBalancer || false
|
||||
o.spec.enableReplicaLoadBalancer = i.spec.enableReplicaLoadBalancer || false
|
||||
o.spec.enableConnectionPooler = i.spec.enableConnectionPooler || false
|
||||
o.spec.volume = { size: i.spec.volume.size }
|
||||
|
||||
o.spec.volume = {
|
||||
size: i.spec.volume.size,
|
||||
throughput: i.spec.volume.throughput || 125,
|
||||
iops: i.spec.volume.iops || 3000
|
||||
}
|
||||
|
||||
o.spec.postgresql = {}
|
||||
o.spec.postgresql.version = i.spec.postgresql.version
|
||||
|
||||
if ('users' in i.spec && typeof i.spec.users === 'object') {
|
||||
o.spec.users = Object.mapValues(i.spec.users, roleFlags =>
|
||||
|
|
@ -166,7 +175,7 @@ edit
|
|||
].forEach(resourceType => {
|
||||
if (resourceType in resources) {
|
||||
const resourceClaim = resources[resourceType]
|
||||
if (typeof resourceClaim === '') {
|
||||
if (typeof resourceClaim === 'string') {
|
||||
o.spec.resources[section][resourceType] = resources[resourceType]
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,6 @@ help-general
|
|||
h3 Basics
|
||||
|
||||
p.
|
||||
The PostgreSQL operator will use your definition to create a new
|
||||
The Postgres Operator will use your definition to create a new
|
||||
PostgreSQL cluster for you. You can either copy the yaml definition
|
||||
to the repositiory or you can just hit create cluster.
|
||||
to a repositiory or hit create cluster (not available in prod).
|
||||
|
|
|
|||
|
|
@ -266,6 +266,37 @@ new
|
|||
)
|
||||
.input-group-addon
|
||||
.input-units Gi
|
||||
tr
|
||||
td
|
||||
td Specify Iops and Throughput only if you need more than the default 3000 Iops and 125Mb/s EBS provides.
|
||||
|
||||
tr
|
||||
td Iops
|
||||
td
|
||||
.input-group
|
||||
input.form-control(
|
||||
ref='iops'
|
||||
type='number'
|
||||
value='{ iops }'
|
||||
onchange='{ iopsChange }'
|
||||
onkeyup='{ iopsChange }'
|
||||
)
|
||||
.input-group-addon
|
||||
.input-units
|
||||
|
||||
tr
|
||||
td Througput
|
||||
td
|
||||
.input-group
|
||||
input.form-control(
|
||||
ref='throughput'
|
||||
type='number'
|
||||
value='{ throughput }'
|
||||
onchange='{ throughputChange }'
|
||||
onkeyup='{ throughputChange }'
|
||||
)
|
||||
.input-group-addon
|
||||
.input-units MB/s
|
||||
|
||||
tr(if='{ config.users_visible }')
|
||||
td
|
||||
|
|
@ -509,7 +540,9 @@ new
|
|||
enableConnectionPooler: true
|
||||
{{/if}}
|
||||
volume:
|
||||
size: "{{ volumeSize }}Gi"
|
||||
size: "{{ volumeSize }}Gi"{{#if iops}}
|
||||
iops: {{ iops }}{{/if}}{{#if throughput}}
|
||||
throughput: {{ throughput }}{{/if}}
|
||||
{{#if users}}
|
||||
users:{{#each users}}
|
||||
{{ state }}: []{{/each}}{{/if}}
|
||||
|
|
@ -560,6 +593,8 @@ new
|
|||
enableReplicaLoadBalancer: this.enableReplicaLoadBalancer,
|
||||
enableConnectionPooler: this.enableConnectionPooler,
|
||||
volumeSize: this.volumeSize,
|
||||
iops: this.iops,
|
||||
throughput: this.throughput,
|
||||
users: this.users.valids,
|
||||
databases: this.databases.valids,
|
||||
ranges: this.ranges,
|
||||
|
|
@ -624,6 +659,14 @@ new
|
|||
this.volumeSize = +e.target.value
|
||||
}
|
||||
|
||||
this.iopsChange = e => {
|
||||
this.iops = +e.target.value
|
||||
}
|
||||
|
||||
this.throughputChange = e => {
|
||||
this.throughput = +e.target.value
|
||||
}
|
||||
|
||||
this.updateDNSName = () => {
|
||||
this.dnsName = this.config.dns_format_string.format(
|
||||
this.name,
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ postgresqls
|
|||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
td { calcCosts(nodes, cpu, memory, volume_size) }$
|
||||
td { calcCosts(nodes, cpu, memory, volume_size, iops, throughput) }$
|
||||
|
||||
td
|
||||
|
||||
|
|
@ -152,7 +152,7 @@ postgresqls
|
|||
td { cpu } / { cpu_limit }
|
||||
td { memory } / { memory_limit }
|
||||
td { volume_size }
|
||||
td { calcCosts(nodes, cpu, memory, volume_size) }$
|
||||
td { calcCosts(nodes, cpu, memory, volume_size, iops, throughput) }$
|
||||
|
||||
td
|
||||
|
||||
|
|
@ -227,9 +227,22 @@ postgresqls
|
|||
+ '/' + encodeURI(cluster.name)
|
||||
)
|
||||
|
||||
const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => {
|
||||
costs = Math.max(nodes, opts.config.min_pods) * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs)
|
||||
return costs.toFixed(2)
|
||||
const calcCosts = this.calcCosts = (nodes, cpu, memory, disk, iops, throughput) => {
|
||||
podcount = Math.max(nodes, opts.config.min_pods)
|
||||
corecost = toCores(cpu) * opts.config.cost_core
|
||||
memorycost = toMemory(memory) * opts.config.cost_memory
|
||||
diskcost = toDisk(disk) * opts.config.cost_ebs
|
||||
iopscost = 0
|
||||
if (iops !== undefined && iops > 3000) {
|
||||
iopscost = (iops - 3000) * opts.config.cost_iops
|
||||
}
|
||||
throughputcost = 0
|
||||
if (throughput !== undefined && throughput > 125) {
|
||||
throughputcost = (throughput - 125) * opts.config.cost_throughput
|
||||
}
|
||||
|
||||
costs = podcount * (corecost + memorycost + diskcost + iopscost + throughputcost)
|
||||
return costs.toFixed(2)
|
||||
}
|
||||
|
||||
const toDisk = this.toDisk = value => {
|
||||
|
|
@ -253,6 +266,11 @@ postgresqls
|
|||
value = Number(value)
|
||||
return value
|
||||
}
|
||||
else if(value.endsWith("Ti")) {
|
||||
value = value.substring(0, value.length-2)
|
||||
value = Number(value) * 1000
|
||||
return value
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,9 @@ spec:
|
|||
"replica_load_balancer_visible": true,
|
||||
"resources_visible": true,
|
||||
"users_visible": true,
|
||||
"cost_ebs": 0.119,
|
||||
"cost_ebs": 0.0952,
|
||||
"cost_iops": 0.006,
|
||||
"cost_throughput": 0.0476,
|
||||
"cost_core": 0.0575,
|
||||
"cost_memory": 0.014375,
|
||||
"postgresql_versions": [
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ metadata:
|
|||
labels:
|
||||
application: "postgres-operator-ui"
|
||||
spec:
|
||||
# ingressClassName: "ingress-nginx"
|
||||
rules:
|
||||
- host: "ui.example.org"
|
||||
http:
|
||||
|
|
|
|||
|
|
@ -89,8 +89,10 @@ TARGET_NAMESPACE = getenv('TARGET_NAMESPACE')
|
|||
GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False)
|
||||
MIN_PODS= getenv('MIN_PODS', 2)
|
||||
|
||||
# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/
|
||||
COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month
|
||||
# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/ (e.g. Europe - Franfurt)
|
||||
COST_EBS = float(getenv('COST_EBS', 0.0952)) # GB per month
|
||||
COST_IOPS = float(getenv('COST_IOPS', 0.006)) # IOPS per month above 3000 baseline
|
||||
COST_THROUGHPUT = float(getenv('COST_THROUGHPUT', 0.0476)) # MB/s per month above 125 MB/s baseline
|
||||
|
||||
# compute costs, i.e. https://www.ec2instances.info/?region=eu-central-1&selected=m5.2xlarge
|
||||
COST_CORE = 30.5 * 24 * float(getenv('COST_CORE', 0.0575)) # Core per hour m5.2xlarge / 8.
|
||||
|
|
@ -308,6 +310,8 @@ DEFAULT_UI_CONFIG = {
|
|||
'pgui_link': '',
|
||||
'static_network_whitelist': {},
|
||||
'cost_ebs': COST_EBS,
|
||||
'cost_iops': COST_IOPS,
|
||||
'cost_throughput': COST_THROUGHPUT,
|
||||
'cost_core': COST_CORE,
|
||||
'cost_memory': COST_MEMORY,
|
||||
'min_pods': MIN_PODS
|
||||
|
|
@ -487,6 +491,8 @@ def get_postgresqls():
|
|||
'cpu': spec.get('resources', {}).get('requests', {}).get('cpu', 0),
|
||||
'cpu_limit': spec.get('resources', {}).get('limits', {}).get('cpu', 0),
|
||||
'volume_size': spec.get('volume', {}).get('size', 0),
|
||||
'iops': spec.get('volume', {}).get('iops', 3000),
|
||||
'throughput': spec.get('volume', {}).get('throughput', 125),
|
||||
'team': (
|
||||
spec.get('teamId') or
|
||||
metadata.get('labels', {}).get('team', '')
|
||||
|
|
@ -614,6 +620,28 @@ def update_postgresql(namespace: str, cluster: str):
|
|||
|
||||
spec['volume'] = {'size': size}
|
||||
|
||||
if (
|
||||
'volume' in postgresql['spec']
|
||||
and 'iops' in postgresql['spec']['volume']
|
||||
and postgresql['spec']['volume']['iops'] != None
|
||||
):
|
||||
iops = int(postgresql['spec']['volume']['iops'])
|
||||
if not 'volume' in spec:
|
||||
spec['volume'] = {}
|
||||
|
||||
spec['volume']['iops'] = iops
|
||||
|
||||
if (
|
||||
'volume' in postgresql['spec']
|
||||
and 'throughput' in postgresql['spec']['volume']
|
||||
and postgresql['spec']['volume']['throughput'] != None
|
||||
):
|
||||
throughput = int(postgresql['spec']['volume']['throughput'])
|
||||
if not 'volume' in spec:
|
||||
spec['volume'] = {}
|
||||
|
||||
spec['volume']['throughput'] = throughput
|
||||
|
||||
if 'enableConnectionPooler' in postgresql['spec']:
|
||||
cp = postgresql['spec']['enableConnectionPooler']
|
||||
if not cp:
|
||||
|
|
@ -758,6 +786,27 @@ def update_postgresql(namespace: str, cluster: str):
|
|||
owner_username=owner_username,
|
||||
)
|
||||
|
||||
resource_types = ["cpu","memory"]
|
||||
resource_constraints = ["requests","limits"]
|
||||
if "resources" in postgresql["spec"]:
|
||||
spec["resources"] = {}
|
||||
|
||||
res = postgresql["spec"]["resources"]
|
||||
for rt in resource_types:
|
||||
for rc in resource_constraints:
|
||||
if rc in res:
|
||||
if rt in res[rc]:
|
||||
if not rc in spec["resources"]:
|
||||
spec["resources"][rc] = {}
|
||||
spec["resources"][rc][rt] = res[rc][rt]
|
||||
|
||||
if "postgresql" in postgresql["spec"]:
|
||||
if "version" in postgresql["spec"]["postgresql"]:
|
||||
if "postgresql" not in spec:
|
||||
spec["postgresql"]={}
|
||||
|
||||
spec["postgresql"]["version"] = postgresql["spec"]["postgresql"]["version"]
|
||||
|
||||
o['spec'].update(spec)
|
||||
|
||||
apply_postgresql(get_cluster(), namespace, cluster, o)
|
||||
|
|
|
|||
|
|
@ -19,7 +19,9 @@ default_operator_ui_config='{
|
|||
"nat_gateways_visible": false,
|
||||
"resources_visible": true,
|
||||
"users_visible": true,
|
||||
"cost_ebs": 0.119,
|
||||
"cost_ebs": 0.0952,
|
||||
"cost_iops": 0.006,
|
||||
"cost_throughput": 0.0476,
|
||||
"cost_core": 0.0575,
|
||||
"cost_memory": 0.014375,
|
||||
"postgresql_versions": [
|
||||
|
|
|
|||
Loading…
Reference in New Issue