Merge branch 'master' into gh-pages
This commit is contained in:
commit
bf502ce248
|
|
@ -9,7 +9,7 @@ assignees: ''
|
|||
|
||||
Please, answer some short questions which should help us to understand your problem / question better?
|
||||
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.1
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.2
|
||||
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
||||
- **Are you running Postgres Operator in production?** [yes | no]
|
||||
- **Type of issue?** [Bug report, question, feature request, etc.]
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ We introduce the major version into the backup path to smoothen the [major versi
|
|||
The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths.
|
||||
This comes at potential performance costs and should be disabled after a few days.
|
||||
|
||||
The newest Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p4`
|
||||
The newest Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p6`
|
||||
|
||||
The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator-ui
|
||||
version: 1.6.1
|
||||
appVersion: 1.6.1
|
||||
version: 1.6.2
|
||||
appVersion: 1.6.2
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,31 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.2
|
||||
created: "2021-03-29T15:05:15.495911278+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: 7adfb8fad3c1ac8038bb9367aec8fbeed04e590631c00a2ecaee7aca6b222520
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.6.2.tgz
|
||||
version: 1.6.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-02-19T12:19:43.9076945+01:00"
|
||||
created: "2021-03-29T15:05:15.495347181+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: 3d321352f2f1e7bb7450aa8876e3d818aa9f9da9bd4250507386f0490f2c1969
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -25,9 +47,9 @@ entries:
|
|||
version: 1.6.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2021-02-19T12:19:43.907164331+01:00"
|
||||
created: "2021-03-29T15:05:15.494822657+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: 7bc10d08b25fa423b85a26a1b4c6f6709c5a9d374fa833b44629dc0713f17529
|
||||
digest: 1e0aa1e7db3c1daa96927ffbf6fdbcdb434562f961833cb5241ddbe132220ee4
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
|
|
@ -47,7 +69,7 @@ entries:
|
|||
version: 1.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2021-02-19T12:19:43.906550454+01:00"
|
||||
created: "2021-03-29T15:05:15.494293759+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -67,4 +89,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-ui-1.5.0.tgz
|
||||
version: 1.5.0
|
||||
generated: "2021-02-19T12:19:43.905861603+01:00"
|
||||
generated: "2021-03-29T15:05:15.493586164+02:00"
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -18,7 +18,7 @@ spec:
|
|||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
team: "acid" # Parameterize?
|
||||
team: "{{ join "," .Values.envs.teams }}"
|
||||
spec:
|
||||
serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
|
|
@ -54,7 +54,10 @@ spec:
|
|||
- name: "TEAMS"
|
||||
value: |-
|
||||
[
|
||||
"acid"
|
||||
{{- range(initial .Values.envs.teams) }}
|
||||
{{ . | quote }},
|
||||
{{- end }}
|
||||
{{ last .Values.envs.teams | quote }}
|
||||
]
|
||||
- name: "OPERATOR_UI_CONFIG"
|
||||
value: |-
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator-ui
|
||||
tag: v1.6.1
|
||||
tag: v1.6.2
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -45,6 +45,8 @@ envs:
|
|||
operatorClusterNameLabel: "cluster-name"
|
||||
resourcesVisible: "False"
|
||||
targetNamespace: "default"
|
||||
teams:
|
||||
- "acid"
|
||||
|
||||
# configure UI service
|
||||
service:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator
|
||||
version: 1.6.1
|
||||
appVersion: 1.6.1
|
||||
version: 1.6.2
|
||||
appVersion: 1.6.2
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ spec:
|
|||
properties:
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p4"
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p6"
|
||||
enable_crd_validation:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -127,6 +127,18 @@ spec:
|
|||
super_username:
|
||||
type: string
|
||||
default: postgres
|
||||
major_version_upgrade:
|
||||
type: object
|
||||
properties:
|
||||
major_version_upgrade_mode:
|
||||
type: string
|
||||
default: "off"
|
||||
minimal_major_version:
|
||||
type: string
|
||||
default: "9.5"
|
||||
target_major_version:
|
||||
type: string
|
||||
default: "13"
|
||||
kubernetes:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -252,6 +264,9 @@ spec:
|
|||
secret_name_template:
|
||||
type: string
|
||||
default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
spilo_allow_privilege_escalation:
|
||||
type: boolean
|
||||
default: true
|
||||
spilo_runasuser:
|
||||
type: integer
|
||||
spilo_runasgroup:
|
||||
|
|
@ -382,7 +397,7 @@ spec:
|
|||
properties:
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.1"
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
@ -511,7 +526,7 @@ spec:
|
|||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-14"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
default: 60
|
||||
|
|
|
|||
|
|
@ -1,9 +1,30 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.2
|
||||
created: "2021-03-30T17:00:50.171986449+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: d886f8a0879ca07d1e5246ee7bc55710e1c872f3977280fe495db6fc2057a7f4
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.6.2.tgz
|
||||
version: 1.6.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-02-19T12:13:13.262839018+01:00"
|
||||
created: "2021-03-30T17:00:50.170294515+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: 4ba5972cd486dcaa2d11c5613a6f97f6b7b831822e610fe9e10a57ea1db23556
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -24,9 +45,9 @@ entries:
|
|||
version: 1.6.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2021-02-19T12:13:13.260413425+01:00"
|
||||
created: "2021-03-30T17:00:50.168493689+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: 1b4c3892335a207f4719c3817186e98b6105398ab45dafc4274dac837ec9bf31
|
||||
digest: f52149718ea364f46b4b9eec9a65f6253ad182bb78df541d14cd5277b9c8a8c3
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
|
|
@ -45,7 +66,7 @@ entries:
|
|||
version: 1.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2021-02-19T12:13:13.257505579+01:00"
|
||||
created: "2021-03-30T17:00:50.166722286+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
|
|
@ -64,4 +85,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.5.0.tgz
|
||||
version: 1.5.0
|
||||
generated: "2021-02-19T12:13:13.25598107+01:00"
|
||||
generated: "2021-03-30T17:00:50.165166707+02:00"
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -15,6 +15,7 @@ data:
|
|||
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
||||
{{ toYaml .Values.configGeneral | indent 2 }}
|
||||
{{ toYaml .Values.configUsers | indent 2 }}
|
||||
{{ toYaml .Values.configMajorVersionUpgrade | indent 2 }}
|
||||
{{ toYaml .Values.configKubernetes | indent 2 }}
|
||||
{{ toYaml .Values.configTimeouts | indent 2 }}
|
||||
{{ toYaml .Values.configLoadBalancer | indent 2 }}
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ configuration:
|
|||
{{ toYaml .Values.configGeneral | indent 2 }}
|
||||
users:
|
||||
{{ toYaml .Values.configUsers | indent 4 }}
|
||||
major_version_upgrade:
|
||||
{{ toYaml .Values.configMajorVersionUpgrade | indent 4 }}
|
||||
kubernetes:
|
||||
{{- if .Values.podPriorityClassName }}
|
||||
pod_priority_class_name: {{ .Values.podPriorityClassName }}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.6.1
|
||||
tag: v1.6.2
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -32,10 +32,10 @@ configGeneral:
|
|||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p4
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
max_instances: -1
|
||||
# period between consecutive repair requests
|
||||
repair_period: 5m
|
||||
|
|
@ -58,6 +58,14 @@ configUsers:
|
|||
# postgres superuser name to be created by initdb
|
||||
super_username: postgres
|
||||
|
||||
configMajorVersionUpgrade:
|
||||
# "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too
|
||||
major_version_upgrade_mode: "off"
|
||||
# minimal Postgres major version that will not automatically be upgraded
|
||||
minimal_major_version: "9.5"
|
||||
# target Postgres major version when upgrading clusters automatically
|
||||
target_major_version: "13"
|
||||
|
||||
configKubernetes:
|
||||
# list of additional capabilities for postgres container
|
||||
# additional_pod_capabilities:
|
||||
|
|
@ -147,6 +155,9 @@ configKubernetes:
|
|||
|
||||
# whether the Spilo container should run in privileged mode
|
||||
spilo_privileged: false
|
||||
# whether the Spilo container should run with additional permissions other than parent.
|
||||
# required by cron which needs setuid
|
||||
spilo_allow_privilege_escalation: true
|
||||
# storage resize strategy, available options are: ebs, pvc, off
|
||||
storage_resize_mode: pvc
|
||||
# operator watches for postgres objects in the given namespace
|
||||
|
|
@ -252,7 +263,7 @@ configAwsOrGcp:
|
|||
# configure K8s cron job managed by the operator
|
||||
configLogicalBackup:
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.1"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
|
|
@ -315,7 +326,7 @@ configConnectionPooler:
|
|||
# db user for pooler to use
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-14"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
# max db connections the pooler should hold
|
||||
connection_pooler_max_db_connections: 60
|
||||
# default pooling mode
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.6.1
|
||||
tag: v1.6.2
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -35,10 +35,10 @@ configGeneral:
|
|||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p4
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: "-1"
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: "-1"
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
max_instances: "-1"
|
||||
# period between consecutive repair requests
|
||||
repair_period: 5m
|
||||
|
|
@ -60,6 +60,14 @@ configUsers:
|
|||
# postgres superuser name to be created by initdb
|
||||
super_username: postgres
|
||||
|
||||
configMajorVersionUpgrade:
|
||||
# "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too
|
||||
major_version_upgrade_mode: "off"
|
||||
# minimal Postgres major version that will not automatically be upgraded
|
||||
minimal_major_version: "9.5"
|
||||
# target Postgres major version when upgrading clusters automatically
|
||||
target_major_version: "13"
|
||||
|
||||
configKubernetes:
|
||||
# list of additional capabilities for postgres container
|
||||
# additional_pod_capabilities: "SYS_NICE"
|
||||
|
|
@ -139,6 +147,9 @@ configKubernetes:
|
|||
|
||||
# whether the Spilo container should run in privileged mode
|
||||
spilo_privileged: "false"
|
||||
# whether the Spilo container should run with additional permissions other than parent.
|
||||
# required by cron which needs setuid
|
||||
spilo_allow_privilege_escalation: "true"
|
||||
# storage resize strategy, available options are: ebs, pvc, off
|
||||
storage_resize_mode: pvc
|
||||
# operator watches for postgres objects in the given namespace
|
||||
|
|
@ -242,7 +253,7 @@ configAwsOrGcp:
|
|||
# configure K8s cron job managed by the operator
|
||||
configLogicalBackup:
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.1"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
|
|
@ -309,7 +320,7 @@ configConnectionPooler:
|
|||
# db user for pooler to use
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-14"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
# max db connections the pooler should hold
|
||||
connection_pooler_max_db_connections: "60"
|
||||
# default pooling mode
|
||||
|
|
|
|||
|
|
@ -11,30 +11,43 @@ switchover (planned failover) of the master to the Pod with new minor version.
|
|||
The switch should usually take less than 5 seconds, still clients have to
|
||||
reconnect.
|
||||
|
||||
Major version upgrades are supported either via [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
||||
or in-place.
|
||||
### Upgrade on cloning
|
||||
|
||||
With cloning, the new cluster manifest must have a higher `version` string than
|
||||
the source cluster and will be created from a basebackup. Depending of the
|
||||
cluster size, downtime in this case can be significant as writes to the database
|
||||
should be stopped and all WAL files should be archived first before cloning is
|
||||
started.
|
||||
With [cloning](user.md#how-to-clone-an-existing-postgresql-cluster), the new
|
||||
cluster manifest must have a higher `version` string than the source cluster
|
||||
and will be created from a basebackup. Depending of the cluster size, downtime
|
||||
in this case can be significant as writes to the database should be stopped
|
||||
and all WAL files should be archived first before cloning is started.
|
||||
Therefore, use cloning only to test major version upgrades and check for
|
||||
compatibility of your app with to Postgres server of a higher version.
|
||||
|
||||
Starting with Spilo 13, Postgres Operator can do in-place major version upgrade,
|
||||
which should be faster than cloning. However, it is not fully automatic yet.
|
||||
First, you need to make sure, that setting the `PGVERSION` environment variable
|
||||
is enabled in the configuration. Since `v1.6.0`, `enable_pgversion_env_var` is
|
||||
enabled by default.
|
||||
### In-place major version upgrade
|
||||
|
||||
To trigger the upgrade, increase the version in the cluster manifest. After
|
||||
Pods are rotated `configure_spilo` will notice the version mismatch and start
|
||||
the old version again. You can then exec into the Postgres container of the
|
||||
master instance and call `python3 /scripts/inplace_upgrade.py N` where `N`
|
||||
is the number of members of your cluster (see [`numberOfInstances`](https://github.com/zalando/postgres-operator/blob/50cb5898ea715a1db7e634de928b2d16dc8cd969/manifests/minimal-postgres-manifest.yaml#L10)).
|
||||
Starting with Spilo 13, Postgres Operator can run an in-place major version
|
||||
upgrade which is much faster than cloning. First, you need to make sure, that
|
||||
the `PGVERSION` environment variable is set for the database pods. Since
|
||||
`v1.6.0` the related option `enable_pgversion_env_var` is enabled by default.
|
||||
|
||||
In-place major version upgrades can be configured to be executed by the
|
||||
operator with the `major_version_upgrade_mode` option. By default it is set
|
||||
to `off` which means the cluster version will not change when increased in
|
||||
the manifest. Still, a rolling update would be triggered updating the
|
||||
`PGVERSION` variable. But Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py)
|
||||
script will notice the version mismatch and start the old version again.
|
||||
|
||||
In this scenario the major version could then be run by a user from within the
|
||||
master pod. Exec into the container and run:
|
||||
```bash
|
||||
python3 /scripts/inplace_upgrade.py N
|
||||
```
|
||||
where `N` is the number of members of your cluster (see [`numberOfInstances`](https://github.com/zalando/postgres-operator/blob/50cb5898ea715a1db7e634de928b2d16dc8cd969/manifests/minimal-postgres-manifest.yaml#L10)).
|
||||
The upgrade is usually fast, well under one minute for most DBs. Note, that
|
||||
changes become irrevertible once `pg_upgrade` is called. To understand the
|
||||
upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
|
||||
|
||||
When `major_version_upgrade_mode` is set to `manual` the operator will run
|
||||
the upgrade script for you after the manifest is updated and pods are rotated.
|
||||
|
||||
## CRD Validation
|
||||
|
||||
[CustomResourceDefinitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions)
|
||||
|
|
@ -937,7 +950,7 @@ make docker
|
|||
|
||||
# build in image in minikube docker env
|
||||
eval $(minikube docker-env)
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.6.1 .
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.6.2 .
|
||||
|
||||
# apply UI manifests next to a running Postgres Operator
|
||||
kubectl apply -f manifests/
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ features and tests.
|
|||
|
||||
Postgres Operator is written in Go. Use the [installation instructions](https://golang.org/doc/install#install)
|
||||
if you don't have Go on your system. You won't be able to compile the operator
|
||||
with Go older than 1.7. We recommend installing [the latest one](https://golang.org/dl/).
|
||||
with Go older than 1.15. We recommend installing [the latest one](https://golang.org/dl/).
|
||||
|
||||
Go projects expect their source code and all the dependencies to be located
|
||||
under the [GOPATH](https://github.com/golang/go/wiki/GOPATH). Normally, one
|
||||
|
|
|
|||
|
|
@ -170,6 +170,29 @@ under the `users` key.
|
|||
Postgres username used for replication between instances. The default is
|
||||
`standby`.
|
||||
|
||||
## Major version upgrades
|
||||
|
||||
Parameters configuring automatic major version upgrades. In a
|
||||
CRD-configuration, they are grouped under the `major_version_upgrade` key.
|
||||
|
||||
* **major_version_upgrade_mode**
|
||||
Postgres Operator supports [in-place major version upgrade](../administrator.md#in-place-major-version-upgrade)
|
||||
with three different modes:
|
||||
`"off"` = no upgrade by the operator,
|
||||
`"manual"` = manifest triggers action,
|
||||
`"full"` = manifest and minimal version violation trigger upgrade.
|
||||
Note, that with all three modes increasing the version in the manifest will
|
||||
trigger a rolling update of the pods. The default is `"off"`.
|
||||
|
||||
* **minimal_major_version**
|
||||
The minimal Postgres major version that will not automatically be upgraded
|
||||
when `major_version_upgrade_mode` is set to `"full"`. The default is `"9.5"`.
|
||||
|
||||
* **target_major_version**
|
||||
The target Postgres major version when upgrading clusters automatically
|
||||
which violate the configured allowed `minimal_major_version` when
|
||||
`major_version_upgrade_mode` is set to `"full"`. The default is `"13"`.
|
||||
|
||||
## Kubernetes resources
|
||||
|
||||
Parameters to configure cluster-related Kubernetes objects created by the
|
||||
|
|
@ -351,6 +374,11 @@ configuration they are grouped under the `kubernetes` key.
|
|||
used for AWS volume resizing and not required if you don't need that
|
||||
capability. The default is `false`.
|
||||
|
||||
* **spilo_allow_privilege_escalation**
|
||||
Controls whether a process can gain more privileges than its parent
|
||||
process. Required by cron which needs setuid. Without this parameter,
|
||||
certification rotation & backups will not be done. The default is `true`.
|
||||
|
||||
* **additional_pod_capabilities**
|
||||
list of additional capabilities to be added to the postgres container's
|
||||
SecurityContext (e.g. SYS_NICE etc.). Please, make sure first that the
|
||||
|
|
@ -565,7 +593,7 @@ grouped under the `logical_backup` key.
|
|||
runs `pg_dumpall` on a replica if possible and uploads compressed results to
|
||||
an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`.
|
||||
The default image is the same image built with the Zalando-internal CI
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.1"
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
|
||||
* **logical_backup_google_application_credentials**
|
||||
Specifies the path of the google cloud service account json file. Default is empty.
|
||||
|
|
|
|||
|
|
@ -646,7 +646,13 @@ spec:
|
|||
|
||||
## In-place major version upgrade
|
||||
|
||||
Starting with Spilo 13, operator supports in-place major version upgrade to a higher major version (e.g. from PG 10 to PG 12). To trigger the upgrade, simply increase the version in the manifest. It is your responsibility to test your applications against the new version before the upgrade; downgrading is not supported. The easiest way to do so is to try the upgrade on the cloned cluster first. For details of how Spilo does the upgrade [see here](https://github.com/zalando/spilo/pull/488), operator implementation is described [in the admin docs](administrator.md#minor-and-major-version-upgrade).
|
||||
Starting with Spilo 13, operator supports in-place major version upgrade to a
|
||||
higher major version (e.g. from PG 10 to PG 12). To trigger the upgrade,
|
||||
simply increase the version in the manifest. It is your responsibility to test
|
||||
your applications against the new version before the upgrade; downgrading is
|
||||
not supported. The easiest way to do so is to try the upgrade on the cloned
|
||||
cluster first (see next chapter). More details can be found in the
|
||||
[admin docs](administrator.md#minor-and-major-version-upgrade).
|
||||
|
||||
## How to clone an existing PostgreSQL cluster
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ watch -c "
|
|||
kubectl get postgresql --all-namespaces
|
||||
echo
|
||||
echo -n 'Rolling upgrade pending: '
|
||||
kubectl get statefulset -o jsonpath='{.items..metadata.annotations.zalando-postgres-operator-rolling-update-required}'
|
||||
kubectl get pods -o jsonpath='{.items[].metadata.annotations.zalando-postgres-operator-rolling-update-required}'
|
||||
echo
|
||||
echo
|
||||
echo 'Pods'
|
||||
|
|
|
|||
|
|
@ -219,8 +219,8 @@ class K8s:
|
|||
self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch)
|
||||
self.delete_operator_pod(step=step)
|
||||
|
||||
def patch_statefulset(self, data, name="acid-minimal-cluster", namespace="default"):
|
||||
self.api.apps_v1.patch_namespaced_stateful_set(name, namespace, data)
|
||||
def patch_pod(self, data, pod_name, namespace="default"):
|
||||
self.api.core_v1.patch_namespaced_pod(pod_name, namespace, data)
|
||||
|
||||
def create_with_kubectl(self, path):
|
||||
return subprocess.run(
|
||||
|
|
@ -280,19 +280,21 @@ class K8s:
|
|||
return None
|
||||
return pod.items[0].spec.containers[0].image
|
||||
|
||||
def get_cluster_leader_pod(self, pg_cluster_name, namespace='default'):
|
||||
labels = {
|
||||
'application': 'spilo',
|
||||
'cluster-name': pg_cluster_name,
|
||||
'spilo-role': 'master',
|
||||
}
|
||||
def get_cluster_pod(self, role, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||
labels = labels + ',spilo-role=' + role
|
||||
|
||||
pods = self.api.core_v1.list_namespaced_pod(
|
||||
namespace, label_selector=to_selector(labels)).items
|
||||
namespace, label_selector=labels).items
|
||||
|
||||
if pods:
|
||||
return pods[0]
|
||||
|
||||
def get_cluster_leader_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||
return self.get_cluster_pod('master', labels, namespace)
|
||||
|
||||
def get_cluster_replica_pod(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||
return self.get_cluster_pod('replica', labels, namespace)
|
||||
|
||||
|
||||
class K8sBase:
|
||||
'''
|
||||
|
|
|
|||
|
|
@ -168,13 +168,26 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"additional_pod_capabilities": ','.join(capabilities),
|
||||
},
|
||||
}
|
||||
|
||||
# get node and replica (expected target of new master)
|
||||
_, replica_nodes = self.k8s.get_pg_nodes(cluster_label)
|
||||
|
||||
try:
|
||||
self.k8s.update_config(patch_capabilities)
|
||||
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"},
|
||||
"Operator does not get in sync")
|
||||
|
||||
# changed security context of postrges container should trigger a rolling update
|
||||
self.k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||
self.k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
self.eventuallyEqual(lambda: self.k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
|
||||
2, "Container capabilities not updated")
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_additional_teams_and_members(self):
|
||||
'''
|
||||
|
|
@ -212,7 +225,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# make sure we let one sync pass and the new user being added
|
||||
time.sleep(15)
|
||||
|
||||
leader = self.k8s.get_cluster_leader_pod('acid-minimal-cluster')
|
||||
leader = self.k8s.get_cluster_leader_pod()
|
||||
user_query = """
|
||||
SELECT usename
|
||||
FROM pg_catalog.pg_user
|
||||
|
|
@ -392,7 +405,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# credentials.
|
||||
db_list = []
|
||||
|
||||
leader = k8s.get_cluster_leader_pod('acid-minimal-cluster')
|
||||
leader = k8s.get_cluster_leader_pod()
|
||||
schemas_query = """
|
||||
select schema_name
|
||||
from information_schema.schemata
|
||||
|
|
@ -611,7 +624,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
k8s.update_config(unpatch_lazy_spilo_upgrade, step="patch lazy upgrade")
|
||||
|
||||
# at this point operator will complete the normal rolling upgrade
|
||||
# so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
|
||||
# so we additionally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
|
||||
self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0),
|
||||
conf_image, "Rolling upgrade was not executed",
|
||||
50, 3)
|
||||
|
|
@ -750,12 +763,6 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
self.eventuallyTrue(verify_pod_limits, "Pod limits where not adjusted")
|
||||
|
||||
@classmethod
|
||||
def setUp(cls):
|
||||
# cls.k8s.update_config({}, step="Setup")
|
||||
cls.k8s.patch_statefulset({"meta": {"annotations": {"zalando-postgres-operator-rolling-update-required": False}}})
|
||||
pass
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_multi_namespace_support(self):
|
||||
'''
|
||||
|
|
@ -784,6 +791,139 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster")
|
||||
time.sleep(5)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_rolling_update_flag(self):
|
||||
'''
|
||||
Add rolling update flag to only the master and see it failing over
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||
|
||||
# verify we are in good state from potential previous tests
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
|
||||
|
||||
# get node and replica (expected target of new master)
|
||||
_, replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
|
||||
# rolling update annotation
|
||||
flag = {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"zalando-postgres-operator-rolling-update-required": "true",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
|
||||
for pod in podsList.items:
|
||||
# add flag only to the master to make it appear to the operator as a leftover from a rolling update
|
||||
if pod.metadata.labels.get('spilo-role') == 'master':
|
||||
old_creation_timestamp = pod.metadata.creation_timestamp
|
||||
k8s.patch_pod(flag, pod.metadata.name, pod.metadata.namespace)
|
||||
else:
|
||||
# remember replica name to check if operator does a switchover
|
||||
switchover_target = pod.metadata.name
|
||||
|
||||
# do not wait until the next sync
|
||||
k8s.delete_operator_pod()
|
||||
|
||||
# operator should now recreate the master pod and do a switchover before
|
||||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||
|
||||
# check if the former replica is now the new master
|
||||
leader = k8s.get_cluster_leader_pod()
|
||||
self.eventuallyEqual(lambda: leader.metadata.name, switchover_target, "Rolling update flag did not trigger switchover")
|
||||
|
||||
# check that the old master has been recreated
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
replica = k8s.get_cluster_replica_pod()
|
||||
self.assertTrue(replica.metadata.creation_timestamp > old_creation_timestamp, "Old master pod was not recreated")
|
||||
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_rolling_update_label_timeout(self):
|
||||
'''
|
||||
Simulate case when replica does not receive label in time and rolling update does not finish
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||
flag = "zalando-postgres-operator-rolling-update-required"
|
||||
|
||||
# verify we are in good state from potential previous tests
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
|
||||
|
||||
# get node and replica (expected target of new master)
|
||||
_, replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
|
||||
# rolling update annotation
|
||||
rolling_update_patch = {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
flag: "true",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# make pod_label_wait_timeout so short that rolling update fails on first try
|
||||
# temporarily lower resync interval to reduce waiting for further tests
|
||||
# pods should get healthy in the meantime
|
||||
patch_resync_config = {
|
||||
"data": {
|
||||
"pod_label_wait_timeout": "2s",
|
||||
"resync_period": "20s",
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
# patch both pods for rolling update
|
||||
podList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
|
||||
for pod in podList.items:
|
||||
k8s.patch_pod(rolling_update_patch, pod.metadata.name, pod.metadata.namespace)
|
||||
if pod.metadata.labels.get('spilo-role') == 'replica':
|
||||
switchover_target = pod.metadata.name
|
||||
|
||||
# update config and restart operator
|
||||
k8s.update_config(patch_resync_config, "update resync interval and pod_label_wait_timeout")
|
||||
|
||||
# operator should now recreate the replica pod first and do a switchover after
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
# pod_label_wait_timeout should have been exceeded hence the rolling update is continued on next sync
|
||||
# check if the cluster state is "SyncFailed"
|
||||
self.eventuallyEqual(lambda: k8s.pg_get_status(), "SyncFailed", "Expected SYNC event to fail")
|
||||
|
||||
# wait for next sync, replica should be running normally by now and be ready for switchover
|
||||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||
|
||||
# check if the former replica is now the new master
|
||||
leader = k8s.get_cluster_leader_pod()
|
||||
self.eventuallyEqual(lambda: leader.metadata.name, switchover_target, "Rolling update flag did not trigger switchover")
|
||||
|
||||
# wait for the old master to get restarted
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
# status should again be "SyncFailed" but turn into "Running" on the next sync
|
||||
time.sleep(10)
|
||||
self.eventuallyEqual(lambda: k8s.pg_get_status(), "Running", "Expected running cluster after two syncs")
|
||||
|
||||
# revert config changes
|
||||
patch_resync_config = {
|
||||
"data": {
|
||||
"pod_label_wait_timeout": "10m",
|
||||
"resync_period": "30m",
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_resync_config, "revert resync interval and pod_label_wait_timeout")
|
||||
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_zz_node_readiness_label(self):
|
||||
|
|
@ -926,6 +1066,33 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
@unittest.skip("Skipping this test until fixed")
|
||||
def test_zaa_test_major_version_upgrade(self):
|
||||
k8s = self.k8s
|
||||
result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml")
|
||||
self.eventuallyEqual(lambda: k8s.count_running_pods(labels="application=spilo,cluster-name=acid-upgrade-test"), 2, "No 2 pods running")
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
pg_patch_version = {
|
||||
"spec": {
|
||||
"postgres": {
|
||||
"version": "13"
|
||||
}
|
||||
}
|
||||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version)
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
def check_version_13():
|
||||
p = k8s.get_patroni_state("acid-upgrade-test-0")
|
||||
version = p["server_version"][0:2]
|
||||
return version
|
||||
|
||||
self.evantuallyEqual(check_version_13, "13", "Version was not upgrade to 13")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
@unittest.skip("Skipping this test until fixed")
|
||||
def test_zzz_taint_based_eviction(self):
|
||||
|
|
|
|||
56
go.sum
56
go.sum
|
|
@ -45,7 +45,6 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt
|
|||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
|
|
@ -63,21 +62,16 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
|
|||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.36.29 h1:lM1G3AF1+7vzFm0n7hfH8r2+750BTo+6Lo6FtPB7kzk=
|
||||
github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
|
|
@ -88,13 +82,10 @@ github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkE
|
|||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
|
|
@ -142,11 +133,9 @@ github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70t
|
|||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
|
||||
github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=
|
||||
github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
|
||||
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=
|
||||
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
|
|
@ -164,11 +153,9 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf
|
|||
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
|
||||
github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY=
|
||||
github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
|
||||
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
|
||||
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
|
||||
github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI=
|
||||
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
|
|
@ -179,7 +166,6 @@ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8
|
|||
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
|
||||
github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA=
|
||||
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
|
|
@ -189,9 +175,7 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF
|
|||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
|
||||
github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0=
|
||||
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
|
|
@ -201,7 +185,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
|
|||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
|
@ -223,7 +206,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
|||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
|
|
@ -232,7 +214,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
|||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
|
|
@ -248,7 +229,6 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
|
@ -261,7 +241,6 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
|
|||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
|
|
@ -274,7 +253,6 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
|||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
|
|
@ -315,10 +293,8 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
|
|||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
|
@ -330,7 +306,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
|||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+pW6rOkFdld9QQ7jRydBKKM6jyPVI=
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
|
|
@ -356,22 +331,18 @@ github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prY
|
|||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M=
|
||||
|
|
@ -392,7 +363,6 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B
|
|||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
|
|
@ -403,12 +373,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
|||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
|
@ -425,22 +393,17 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
|||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5 h1:Gqga3zA9tdAcfqobUGjSoCob5L3f8Dt5EuOp3ihNZko=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=
|
||||
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
|
|
@ -451,7 +414,6 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c h1:9HhBz5L/UjnK9XLtiZhYAdue5BVKep3PMmS2LuPDt8k=
|
||||
|
|
@ -522,7 +484,6 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -532,7 +493,6 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -566,7 +526,6 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -576,14 +535,12 @@ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fq
|
|||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -666,7 +623,6 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4
|
|||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
|
|
@ -674,7 +630,6 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij
|
|||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
|
|
@ -685,7 +640,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
|||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
|
|
@ -700,7 +654,6 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
|||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
|
|
@ -711,7 +664,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
|
|
@ -725,18 +677,15 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo=
|
||||
k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk=
|
||||
k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw=
|
||||
k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8=
|
||||
k8s.io/apiextensions-apiserver v0.19.4 h1:D9ak9T012tb3vcGFWYmbQuj9SCC8YM4zhA4XZqsAQC4=
|
||||
k8s.io/apiextensions-apiserver v0.19.4/go.mod h1:B9rpH/nu4JBCtuUp3zTTk8DEjZUupZTBEec7/2zNRYw=
|
||||
k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0=
|
||||
k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
|
||||
k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg=
|
||||
k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apiserver v0.19.4/go.mod h1:X8WRHCR1UGZDd7HpV0QDc1h/6VbbpAeAGyxSh8yzZXw=
|
||||
k8s.io/client-go v0.19.4 h1:85D3mDNoLF+xqpyE9Dh/OtrJDyJrSRKkHmDXIbEzer8=
|
||||
k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA=
|
||||
k8s.io/client-go v0.20.2 h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ=
|
||||
k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
|
||||
|
|
@ -747,24 +696,19 @@ k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8
|
|||
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw=
|
||||
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9 h1:rusRLrDhjBp6aYtl9sGEvQJr6faoHoDLd0YcUBTZguI=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||
spec:
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p4
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ data:
|
|||
# connection_pooler_default_cpu_request: "500m"
|
||||
# connection_pooler_default_memory_limit: 100Mi
|
||||
# connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-14"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
# connection_pooler_mode: "transaction"
|
||||
# connection_pooler_number_of_instances: 2
|
||||
|
|
@ -32,7 +32,7 @@ data:
|
|||
# default_memory_request: 100Mi
|
||||
# delete_annotation_date_key: delete-date
|
||||
# delete_annotation_name_key: delete-clustername
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p4
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
# enable_admin_role_for_users: "true"
|
||||
# enable_crd_validation: "true"
|
||||
|
|
@ -63,7 +63,7 @@ data:
|
|||
# inherited_labels: application,environment
|
||||
# kube_iam_role: ""
|
||||
# log_s3_bucket: ""
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.1"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
@ -74,12 +74,14 @@ data:
|
|||
# logical_backup_s3_secret_access_key: ""
|
||||
logical_backup_s3_sse: "AES256"
|
||||
logical_backup_schedule: "30 00 * * *"
|
||||
major_version_upgrade_mode: "manual"
|
||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
# master_pod_move_timeout: 20m
|
||||
# max_instances: "-1"
|
||||
# min_instances: "-1"
|
||||
# min_cpu_limit: 250m
|
||||
# min_memory_limit: 250Mi
|
||||
# minimal_major_version: "9.5"
|
||||
# node_readiness_label: ""
|
||||
# oauth_token_secret_name: postgresql-operator
|
||||
# pam_configuration: |
|
||||
|
|
@ -112,12 +114,14 @@ data:
|
|||
secret_name_template: "{username}.{cluster}.credentials"
|
||||
# sidecar_docker_images: ""
|
||||
# set_memory_request_to_limit: "false"
|
||||
spilo_allow_privilege_escalation: "true"
|
||||
# spilo_runasuser: 101
|
||||
# spilo_runasgroup: 103
|
||||
# spilo_fsgroup: 103
|
||||
spilo_privileged: "false"
|
||||
storage_resize_mode: "pvc"
|
||||
super_username: postgres
|
||||
# target_major_version: "13"
|
||||
# team_admin_role: "admin"
|
||||
# team_api_role_configuration: "log_statement:all"
|
||||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-14
|
||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-16
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-upgrade-test
|
||||
namespace: default
|
||||
spec:
|
||||
teamId: "acid"
|
||||
volume:
|
||||
size: 1Gi
|
||||
numberOfInstances: 2
|
||||
users:
|
||||
zalando: # database owner
|
||||
- superuser
|
||||
- createdb
|
||||
foo_user: [] # role for application foo
|
||||
databases:
|
||||
foo: zalando # dbname: owner
|
||||
preparedDatabases:
|
||||
bar: {}
|
||||
postgresql:
|
||||
version: "12"
|
||||
|
|
@ -61,7 +61,7 @@ spec:
|
|||
properties:
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p4"
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p6"
|
||||
enable_crd_validation:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -123,6 +123,18 @@ spec:
|
|||
super_username:
|
||||
type: string
|
||||
default: postgres
|
||||
major_version_upgrade:
|
||||
type: object
|
||||
properties:
|
||||
major_version_upgrade_mode:
|
||||
type: string
|
||||
default: "off"
|
||||
minimal_major_version:
|
||||
type: string
|
||||
default: "9.5"
|
||||
target_major_version:
|
||||
type: string
|
||||
default: "13"
|
||||
kubernetes:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -248,6 +260,9 @@ spec:
|
|||
secret_name_template:
|
||||
type: string
|
||||
default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
spilo_allow_privilege_escalation:
|
||||
type: boolean
|
||||
default: true
|
||||
spilo_runasuser:
|
||||
type: integer
|
||||
spilo_runasgroup:
|
||||
|
|
@ -378,7 +393,7 @@ spec:
|
|||
properties:
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.1"
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
@ -507,7 +522,7 @@ spec:
|
|||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-14"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
default: 60
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.1
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ kind: OperatorConfiguration
|
|||
metadata:
|
||||
name: postgresql-operator-default-configuration
|
||||
configuration:
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p4
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p6
|
||||
# enable_crd_validation: true
|
||||
# enable_lazy_spilo_upgrade: false
|
||||
enable_pgversion_env_var: true
|
||||
|
|
@ -26,6 +26,10 @@ configuration:
|
|||
users:
|
||||
replication_username: standby
|
||||
super_username: postgres
|
||||
major_version_upgrade:
|
||||
major_version_upgrade_mode: "off"
|
||||
minimal_major_version: "9.5"
|
||||
target_major_version: "13"
|
||||
kubernetes:
|
||||
# additional_pod_capabilities:
|
||||
# - "SYS_NICE"
|
||||
|
|
@ -75,6 +79,7 @@ configuration:
|
|||
# pod_service_account_role_binding_definition: ""
|
||||
pod_terminate_grace_period: 5m
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
spilo_allow_privilege_escalation: true
|
||||
# spilo_runasuser: 101
|
||||
# spilo_runasgroup: 103
|
||||
# spilo_fsgroup: 103
|
||||
|
|
@ -118,7 +123,7 @@ configuration:
|
|||
# wal_gs_bucket: ""
|
||||
# wal_s3_bucket: ""
|
||||
logical_backup:
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.1"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.2"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
@ -157,7 +162,7 @@ configuration:
|
|||
connection_pooler_default_cpu_request: "500m"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-14"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
connection_pooler_mode: "transaction"
|
||||
connection_pooler_number_of_instances: 2
|
||||
|
|
|
|||
|
|
@ -965,6 +965,20 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"major_version_upgrade": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"major_version_upgrade_mode": {
|
||||
Type: "string",
|
||||
},
|
||||
"minimal_major_version": {
|
||||
Type: "string",
|
||||
},
|
||||
"target_major_version": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"kubernetes": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
|
|
@ -1148,6 +1162,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"spilo_privileged": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"spilo_allow_privilege_escalation": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"storage_resize_mode": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1.JSON{
|
||||
|
|
|
|||
|
|
@ -41,6 +41,13 @@ type PostgresUsersConfiguration struct {
|
|||
ReplicationUsername string `json:"replication_username,omitempty"`
|
||||
}
|
||||
|
||||
// MajorVersionUpgradeConfiguration defines how to execute major version upgrades of Postgres.
|
||||
type MajorVersionUpgradeConfiguration struct {
|
||||
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"off"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
|
||||
MinimalMajorVersion string `json:"minimal_major_version" default:"9.5"`
|
||||
TargetMajorVersion string `json:"target_major_version" default:"13"`
|
||||
}
|
||||
|
||||
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
|
||||
type KubernetesMetaConfiguration struct {
|
||||
PodServiceAccountName string `json:"pod_service_account_name,omitempty"`
|
||||
|
|
@ -49,6 +56,7 @@ type KubernetesMetaConfiguration struct {
|
|||
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
|
||||
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
|
||||
SpiloPrivileged bool `json:"spilo_privileged,omitempty"`
|
||||
SpiloAllowPrivilegeEscalation *bool `json:"spilo_allow_privilege_escalation,omitempty"`
|
||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
||||
|
|
@ -219,6 +227,7 @@ type OperatorConfigurationData struct {
|
|||
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers
|
||||
SidecarContainers []v1.Container `json:"sidecars,omitempty"`
|
||||
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
||||
MajorVersionUpgrade MajorVersionUpgradeConfiguration `json:"major_version_upgrade"`
|
||||
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
||||
PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"`
|
||||
Timeouts OperatorTimeouts `json:"timeouts"`
|
||||
|
|
|
|||
|
|
@ -147,6 +147,11 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) {
|
||||
*out = *in
|
||||
if in.SpiloAllowPrivilegeEscalation != nil {
|
||||
in, out := &in.SpiloAllowPrivilegeEscalation, &out.SpiloAllowPrivilegeEscalation
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloRunAsUser != nil {
|
||||
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
|
||||
*out = new(int64)
|
||||
|
|
@ -309,6 +314,22 @@ func (in *MaintenanceWindow) DeepCopy() *MaintenanceWindow {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MajorVersionUpgradeConfiguration) DeepCopyInto(out *MajorVersionUpgradeConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MajorVersionUpgradeConfiguration.
|
||||
func (in *MajorVersionUpgradeConfiguration) DeepCopy() *MajorVersionUpgradeConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MajorVersionUpgradeConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) {
|
||||
*out = *in
|
||||
|
|
@ -364,6 +385,7 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
|||
}
|
||||
}
|
||||
out.PostgresUsersConfiguration = in.PostgresUsersConfiguration
|
||||
out.MajorVersionUpgrade = in.MajorVersionUpgrade
|
||||
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
|
||||
out.PostgresPodResources = in.PostgresPodResources
|
||||
out.Timeouts = in.Timeouts
|
||||
|
|
|
|||
|
|
@ -92,6 +92,7 @@ type Cluster struct {
|
|||
ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects
|
||||
EBSVolumes map[string]volumes.VolumeProperties
|
||||
VolumeResizer volumes.VolumeResizer
|
||||
currentMajorVersion int
|
||||
}
|
||||
|
||||
type compareStatefulsetResult struct {
|
||||
|
|
@ -132,11 +133,12 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
|||
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
||||
podEventsQueue: podEventsQueue,
|
||||
KubeClient: kubeClient,
|
||||
currentMajorVersion: 0,
|
||||
}
|
||||
cluster.logger = logger.WithField("pkg", "cluster").WithField("cluster-name", cluster.clusterName())
|
||||
cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger)
|
||||
cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName)
|
||||
cluster.patroni = patroni.New(cluster.logger)
|
||||
cluster.patroni = patroni.New(cluster.logger, nil)
|
||||
cluster.eventRecorder = eventRecorder
|
||||
|
||||
cluster.EBSVolumes = make(map[string]volumes.VolumeProperties)
|
||||
|
|
@ -359,7 +361,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
}
|
||||
if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) {
|
||||
match = false
|
||||
reasons = append(reasons, "new statefulset's annotations does not match the current one")
|
||||
reasons = append(reasons, "new statefulset's annotations do not match the current one")
|
||||
}
|
||||
|
||||
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
|
||||
|
|
@ -614,17 +616,14 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
|
||||
logNiceDiff(c.logger, oldSpec, newSpec)
|
||||
|
||||
if oldSpec.Spec.PostgresqlParam.PgVersion > newSpec.Spec.PostgresqlParam.PgVersion {
|
||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
|
||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect",
|
||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||
// we need that hack to generate statefulset with the old version
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
} else if oldSpec.Spec.PostgresqlParam.PgVersion < newSpec.Spec.PostgresqlParam.PgVersion {
|
||||
c.logger.Infof("postgresql version increased (%q -> %q), major version upgrade can be done manually after StatefulSet Sync",
|
||||
if IsBiggerPostgresVersion(oldSpec.Spec.PostgresqlParam.PgVersion, c.GetDesiredMajorVersion()) {
|
||||
c.logger.Infof("postgresql version increased (%s -> %s), depending on config manual upgrade needed",
|
||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||
syncStatetfulSet = true
|
||||
} else {
|
||||
c.logger.Infof("postgresql major version unchanged or smaller, no changes needed")
|
||||
// sticking with old version, this will also advance GetDesiredVersion next time.
|
||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||
}
|
||||
|
||||
// Service
|
||||
|
|
@ -781,6 +780,14 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
updateFailed = true
|
||||
}
|
||||
|
||||
if !updateFailed {
|
||||
// Major version upgrade must only fire after success of earlier operations and should stay last
|
||||
if err := c.majorVersionUpgrade(); err != nil {
|
||||
c.logger.Errorf("major version upgrade failed: %v", err)
|
||||
updateFailed = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -1302,7 +1309,8 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
|
|||
err = fmt.Errorf("could not get master pod label: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("could not switch over: %v", err)
|
||||
err = fmt.Errorf("could not switch over from %q to %q: %v", curMaster.Name, candidate, err)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err)
|
||||
}
|
||||
|
||||
// signal the role label waiting goroutine to close the shop and go home
|
||||
|
|
@ -1313,9 +1321,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
|
|||
// close the label waiting channel no sooner than the waiting goroutine terminates.
|
||||
close(podLabelErr)
|
||||
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err)
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// Lock locks the cluster
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -443,6 +442,7 @@ func generateContainer(
|
|||
envVars []v1.EnvVar,
|
||||
volumeMounts []v1.VolumeMount,
|
||||
privilegedMode bool,
|
||||
privilegeEscalationMode *bool,
|
||||
additionalPodCapabilities *v1.Capabilities,
|
||||
) *v1.Container {
|
||||
return &v1.Container{
|
||||
|
|
@ -467,7 +467,7 @@ func generateContainer(
|
|||
VolumeMounts: volumeMounts,
|
||||
Env: envVars,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: &privilegedMode,
|
||||
AllowPrivilegeEscalation: privilegeEscalationMode,
|
||||
Privileged: &privilegedMode,
|
||||
ReadOnlyRootFilesystem: util.False(),
|
||||
Capabilities: additionalPodCapabilities,
|
||||
|
|
@ -734,7 +734,7 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
|
|||
},
|
||||
}
|
||||
if c.OpConfig.EnablePgVersionEnvVar {
|
||||
envVars = append(envVars, v1.EnvVar{Name: "PGVERSION", Value: c.Spec.PgVersion})
|
||||
envVars = append(envVars, v1.EnvVar{Name: "PGVERSION", Value: c.GetDesiredMajorVersion()})
|
||||
}
|
||||
// Spilo expects cluster labels as JSON
|
||||
if clusterLabels, err := json.Marshal(labels.Set(c.OpConfig.ClusterLabels)); err != nil {
|
||||
|
|
@ -1163,6 +1163,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
|
||||
volumeMounts,
|
||||
c.OpConfig.Resources.SpiloPrivileged,
|
||||
c.OpConfig.Resources.SpiloAllowPrivilegeEscalation,
|
||||
generateCapabilities(c.OpConfig.AdditionalPodCapabilities),
|
||||
)
|
||||
|
||||
|
|
@ -1279,7 +1280,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
}
|
||||
|
||||
stsAnnotations := make(map[string]string)
|
||||
stsAnnotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false)
|
||||
stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil))
|
||||
|
||||
statefulSet := &appsv1.StatefulSet{
|
||||
|
|
@ -1917,6 +1917,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
envVars,
|
||||
[]v1.VolumeMount{},
|
||||
c.OpConfig.SpiloPrivileged, // use same value as for normal DB pods
|
||||
c.OpConfig.SpiloAllowPrivilegeEscalation,
|
||||
nil,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,103 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// VersionMap Map of version numbers
|
||||
var VersionMap = map[string]int{
|
||||
"9.5": 90500,
|
||||
"9.6": 90600,
|
||||
"10": 100000,
|
||||
"11": 110000,
|
||||
"12": 120000,
|
||||
"13": 130000,
|
||||
}
|
||||
|
||||
// IsBiggerPostgresVersion Compare two Postgres version numbers
|
||||
func IsBiggerPostgresVersion(old string, new string) bool {
|
||||
oldN, _ := VersionMap[old]
|
||||
newN, _ := VersionMap[new]
|
||||
return newN > oldN
|
||||
}
|
||||
|
||||
// GetDesiredMajorVersionAsInt Convert string to comparable integer of PG version
|
||||
func (c *Cluster) GetDesiredMajorVersionAsInt() int {
|
||||
return VersionMap[c.GetDesiredMajorVersion()]
|
||||
}
|
||||
|
||||
// GetDesiredMajorVersion returns major version to use, incl. potential auto upgrade
|
||||
func (c *Cluster) GetDesiredMajorVersion() string {
|
||||
|
||||
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
|
||||
// current is 9.5, minimal is 11 allowing 11 to 13 clusters, everything below is upgraded
|
||||
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
|
||||
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
|
||||
return c.Config.OpConfig.TargetMajorVersion
|
||||
}
|
||||
}
|
||||
|
||||
return c.Spec.PgVersion
|
||||
}
|
||||
|
||||
func (c *Cluster) majorVersionUpgrade() error {
|
||||
|
||||
if c.OpConfig.MajorVersionUpgradeMode == "off" {
|
||||
return nil
|
||||
}
|
||||
|
||||
desiredVersion := c.GetDesiredMajorVersionAsInt()
|
||||
|
||||
if c.currentMajorVersion >= desiredVersion {
|
||||
c.logger.Infof("cluster version up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allRunning := true
|
||||
|
||||
var masterPod *v1.Pod
|
||||
|
||||
for _, pod := range pods {
|
||||
ps, _ := c.patroni.GetMemberData(&pod)
|
||||
|
||||
if ps.State != "running" {
|
||||
allRunning = false
|
||||
c.logger.Infof("identified non running pod, potentially skipping major version upgrade")
|
||||
}
|
||||
|
||||
if ps.Role == "master" {
|
||||
masterPod = &pod
|
||||
c.currentMajorVersion = ps.ServerVersion
|
||||
}
|
||||
}
|
||||
|
||||
numberOfPods := len(pods)
|
||||
if allRunning && masterPod != nil {
|
||||
c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion)
|
||||
if c.currentMajorVersion < desiredVersion {
|
||||
podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name}
|
||||
c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "Starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
||||
upgradeCommand := fmt.Sprintf("/usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)
|
||||
|
||||
result, err := c.ExecCommand(podName, "/bin/su", "postgres", "-c", upgradeCommand)
|
||||
if err != nil {
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "Upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Infof("upgrade action triggered and command completed: %s", result[:50])
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "Upgrade from %d to %d finished", c.currentMajorVersion, desiredVersion)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -4,14 +4,17 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
||||
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
||||
)
|
||||
|
||||
|
|
@ -45,6 +48,64 @@ func (c *Cluster) getRolePods(role PostgresRole) ([]v1.Pod, error) {
|
|||
return pods.Items, nil
|
||||
}
|
||||
|
||||
// markRollingUpdateFlagForPod sets the indicator for the rolling update requirement
|
||||
// in the Pod annotation.
|
||||
func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error {
|
||||
// no need to patch pod if annotation is already there
|
||||
if c.getRollingUpdateFlagFromPod(pod) {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.logger.Debugf("mark rolling update annotation for %s: reason %s", pod.Name, msg)
|
||||
flag := make(map[string]string)
|
||||
flag[rollingUpdatePodAnnotationKey] = strconv.FormatBool(true)
|
||||
|
||||
patchData, err := metaAnnotationsPatch(flag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not form patch for pod's rolling update flag: %v", err)
|
||||
}
|
||||
|
||||
err = retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
func() (bool, error) {
|
||||
_, err2 := c.KubeClient.Pods(pod.Namespace).Patch(
|
||||
context.TODO(),
|
||||
pod.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(patchData),
|
||||
metav1.PatchOptions{},
|
||||
"")
|
||||
if err2 != nil {
|
||||
return false, err2
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not patch pod rolling update flag %q: %v", patchData, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getRollingUpdateFlagFromPod returns the value of the rollingUpdate flag from the given pod
|
||||
func (c *Cluster) getRollingUpdateFlagFromPod(pod *v1.Pod) (flag bool) {
|
||||
anno := pod.GetAnnotations()
|
||||
flag = false
|
||||
|
||||
stringFlag, exists := anno[rollingUpdatePodAnnotationKey]
|
||||
if exists {
|
||||
var err error
|
||||
c.logger.Debugf("found rolling update flag on pod %q", pod.Name)
|
||||
if flag, err = strconv.ParseBool(stringFlag); err != nil {
|
||||
c.logger.Warnf("error when parsing %q annotation for the pod %q: expected boolean value, got %q\n",
|
||||
rollingUpdatePodAnnotationKey,
|
||||
types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name},
|
||||
stringFlag)
|
||||
}
|
||||
}
|
||||
|
||||
return flag
|
||||
}
|
||||
|
||||
func (c *Cluster) deletePods() error {
|
||||
c.logger.Debugln("deleting pods")
|
||||
pods, err := c.listPods()
|
||||
|
|
@ -281,7 +342,18 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
|
|||
defer c.unregisterPodSubscriber(podName)
|
||||
stopChan := make(chan struct{})
|
||||
|
||||
if err := c.KubeClient.Pods(podName.Namespace).Delete(context.TODO(), podName.Name, c.deleteOptions); err != nil {
|
||||
err := retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
func() (bool, error) {
|
||||
err2 := c.KubeClient.Pods(podName.Namespace).Delete(
|
||||
context.TODO(),
|
||||
podName.Name,
|
||||
c.deleteOptions)
|
||||
if err2 != nil {
|
||||
return false, err2
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not delete pod: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -296,7 +368,7 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
|
|||
return pod, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) isSafeToRecreatePods(pods *v1.PodList) bool {
|
||||
func (c *Cluster) isSafeToRecreatePods(pods []v1.Pod) bool {
|
||||
|
||||
/*
|
||||
Operator should not re-create pods if there is at least one replica being bootstrapped
|
||||
|
|
@ -305,21 +377,18 @@ func (c *Cluster) isSafeToRecreatePods(pods *v1.PodList) bool {
|
|||
XXX operator cannot forbid replica re-init, so we might still fail if re-init is started
|
||||
after this check succeeds but before a pod is re-created
|
||||
*/
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
for _, pod := range pods {
|
||||
c.logger.Debugf("name=%s phase=%s ip=%s", pod.Name, pod.Status.Phase, pod.Status.PodIP)
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
for _, pod := range pods {
|
||||
|
||||
var state string
|
||||
var data patroni.MemberData
|
||||
|
||||
err := retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
func() (bool, error) {
|
||||
|
||||
var err error
|
||||
|
||||
state, err = c.patroni.GetPatroniMemberState(&pod)
|
||||
data, err = c.patroni.GetMemberData(&pod)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
@ -331,51 +400,43 @@ func (c *Cluster) isSafeToRecreatePods(pods *v1.PodList) bool {
|
|||
if err != nil {
|
||||
c.logger.Errorf("failed to get Patroni state for pod: %s", err)
|
||||
return false
|
||||
} else if state == "creating replica" {
|
||||
} else if data.State == "creating replica" {
|
||||
c.logger.Warningf("cannot re-create replica %s: it is currently being initialized", pod.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Cluster) recreatePods() error {
|
||||
func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.NamespacedName) error {
|
||||
c.setProcessName("starting to recreate pods")
|
||||
ls := c.labelsSet(false)
|
||||
namespace := c.Namespace
|
||||
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: ls.String(),
|
||||
}
|
||||
|
||||
pods, err := c.KubeClient.Pods(namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get the list of pods: %v", err)
|
||||
}
|
||||
c.logger.Infof("there are %d pods in the cluster to recreate", len(pods.Items))
|
||||
c.logger.Infof("there are %d pods in the cluster to recreate", len(pods))
|
||||
|
||||
if !c.isSafeToRecreatePods(pods) {
|
||||
return fmt.Errorf("postpone pod recreation until next Sync: recreation is unsafe because pods are being initialized")
|
||||
}
|
||||
|
||||
var (
|
||||
masterPod, newMasterPod, newPod *v1.Pod
|
||||
masterPod, newMasterPod *v1.Pod
|
||||
)
|
||||
replicas := make([]spec.NamespacedName, 0)
|
||||
for i, pod := range pods.Items {
|
||||
replicas := switchoverCandidates
|
||||
|
||||
for i, pod := range pods {
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
|
||||
if role == Master {
|
||||
masterPod = &pods.Items[i]
|
||||
masterPod = &pods[i]
|
||||
continue
|
||||
}
|
||||
|
||||
podName := util.NameFromMeta(pods.Items[i].ObjectMeta)
|
||||
if newPod, err = c.recreatePod(podName); err != nil {
|
||||
podName := util.NameFromMeta(pod.ObjectMeta)
|
||||
newPod, err := c.recreatePod(podName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not recreate replica pod %q: %v", util.NameFromMeta(pod.ObjectMeta), err)
|
||||
}
|
||||
if newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel]); newRole == Replica {
|
||||
|
||||
newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if newRole == Replica {
|
||||
replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta))
|
||||
} else if newRole == Master {
|
||||
newMasterPod = newPod
|
||||
|
|
@ -383,7 +444,9 @@ func (c *Cluster) recreatePods() error {
|
|||
}
|
||||
|
||||
if masterPod != nil {
|
||||
// failover if we have not observed a master pod when re-creating former replicas.
|
||||
// switchover if
|
||||
// 1. we have not observed a new master pod when re-creating former replicas
|
||||
// 2. we know possible switchover targets even when no replicas were recreated
|
||||
if newMasterPod == nil && len(replicas) > 0 {
|
||||
if err := c.Switchover(masterPod, masterCandidate(replicas)); err != nil {
|
||||
c.logger.Warningf("could not perform switch over: %v", err)
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
rollingUpdateStatefulsetAnnotationKey = "zalando-postgres-operator-rolling-update-required"
|
||||
rollingUpdatePodAnnotationKey = "zalando-postgres-operator-rolling-update-required"
|
||||
)
|
||||
|
||||
func (c *Cluster) listResources() error {
|
||||
|
|
@ -147,79 +147,6 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// setRollingUpdateFlagForStatefulSet sets the indicator or the rolling update requirement
|
||||
// in the StatefulSet annotation.
|
||||
func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool, msg string) {
|
||||
anno := sset.GetAnnotations()
|
||||
if anno == nil {
|
||||
anno = make(map[string]string)
|
||||
}
|
||||
|
||||
anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val)
|
||||
sset.SetAnnotations(anno)
|
||||
c.logger.Debugf("set statefulset's rolling update annotation to %t: caller/reason %s", val, msg)
|
||||
}
|
||||
|
||||
// applyRollingUpdateFlagforStatefulSet sets the rolling update flag for the cluster's StatefulSet
|
||||
// and applies that setting to the actual running cluster.
|
||||
func (c *Cluster) applyRollingUpdateFlagforStatefulSet(val bool) error {
|
||||
c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val, "applyRollingUpdateFlag")
|
||||
sset, err := c.updateStatefulSetAnnotations(c.Statefulset.GetAnnotations())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Statefulset = sset
|
||||
return nil
|
||||
}
|
||||
|
||||
// getRollingUpdateFlagFromStatefulSet returns the value of the rollingUpdate flag from the passed
|
||||
// StatefulSet, reverting to the default value in case of errors
|
||||
func (c *Cluster) getRollingUpdateFlagFromStatefulSet(sset *appsv1.StatefulSet, defaultValue bool) (flag bool) {
|
||||
anno := sset.GetAnnotations()
|
||||
flag = defaultValue
|
||||
|
||||
stringFlag, exists := anno[rollingUpdateStatefulsetAnnotationKey]
|
||||
if exists {
|
||||
var err error
|
||||
if flag, err = strconv.ParseBool(stringFlag); err != nil {
|
||||
c.logger.Warnf("error when parsing %q annotation for the statefulset %q: expected boolean value, got %q\n",
|
||||
rollingUpdateStatefulsetAnnotationKey,
|
||||
types.NamespacedName{Namespace: sset.Namespace, Name: sset.Name},
|
||||
stringFlag)
|
||||
flag = defaultValue
|
||||
}
|
||||
}
|
||||
return flag
|
||||
}
|
||||
|
||||
// mergeRollingUpdateFlagUsingCache returns the value of the rollingUpdate flag from the passed
|
||||
// statefulset, however, the value can be cleared if there is a cached flag in the cluster that
|
||||
// is set to false (the discrepancy could be a result of a failed StatefulSet update)
|
||||
func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *appsv1.StatefulSet) bool {
|
||||
var (
|
||||
cachedStatefulsetExists, clearRollingUpdateFromCache, podsRollingUpdateRequired bool
|
||||
)
|
||||
|
||||
if c.Statefulset != nil {
|
||||
// if we reset the rolling update flag in the statefulset structure in memory but didn't manage to update
|
||||
// the actual object in Kubernetes for some reason we want to avoid doing an unnecessary update by relying
|
||||
// on the 'cached' in-memory flag.
|
||||
cachedStatefulsetExists = true
|
||||
clearRollingUpdateFromCache = !c.getRollingUpdateFlagFromStatefulSet(c.Statefulset, true)
|
||||
c.logger.Debugf("cached StatefulSet value exists, rollingUpdate flag is %t", clearRollingUpdateFromCache)
|
||||
}
|
||||
|
||||
if podsRollingUpdateRequired = c.getRollingUpdateFlagFromStatefulSet(runningStatefulSet, false); podsRollingUpdateRequired {
|
||||
if cachedStatefulsetExists && clearRollingUpdateFromCache {
|
||||
c.logger.Infof("clearing the rolling update flag based on the cached information")
|
||||
podsRollingUpdateRequired = false
|
||||
} else {
|
||||
c.logger.Infof("found a statefulset with an unfinished rolling update of the pods")
|
||||
}
|
||||
}
|
||||
return podsRollingUpdateRequired
|
||||
}
|
||||
|
||||
func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*appsv1.StatefulSet, error) {
|
||||
c.logger.Debugf("patching statefulset annotations")
|
||||
patchData, err := metaAnnotationsPatch(annotations)
|
||||
|
|
@ -237,8 +164,8 @@ func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*
|
|||
return nil, fmt.Errorf("could not patch statefulset annotations %q: %v", patchData, err)
|
||||
}
|
||||
return result, nil
|
||||
|
||||
}
|
||||
|
||||
func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
|
||||
c.setProcessName("updating statefulset")
|
||||
if c.Statefulset == nil {
|
||||
|
|
|
|||
|
|
@ -118,6 +118,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
return fmt.Errorf("could not sync connection pooler: %v", err)
|
||||
}
|
||||
|
||||
// Major version upgrade must only run after success of all earlier operations, must remain last item in sync
|
||||
if err := c.majorVersionUpgrade(); err != nil {
|
||||
c.logger.Errorf("major version upgrade failed: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -278,22 +283,24 @@ func (c *Cluster) mustUpdatePodsAfterLazyUpdate(desiredSset *appsv1.StatefulSet)
|
|||
}
|
||||
|
||||
func (c *Cluster) syncStatefulSet() error {
|
||||
var (
|
||||
podsRollingUpdateRequired bool
|
||||
)
|
||||
|
||||
podsToRecreate := make([]v1.Pod, 0)
|
||||
switchoverCandidates := make([]spec.NamespacedName, 0)
|
||||
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
c.logger.Infof("could not list pods of the statefulset: %v", err)
|
||||
}
|
||||
|
||||
// NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early.
|
||||
sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(context.TODO(), c.statefulSetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
return fmt.Errorf("could not get statefulset: %v", err)
|
||||
return fmt.Errorf("error during reading of statefulset: %v", err)
|
||||
}
|
||||
// statefulset does not exist, try to re-create it
|
||||
c.Statefulset = nil
|
||||
c.logger.Infof("could not find the cluster's statefulset")
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list pods of the statefulset: %v", err)
|
||||
}
|
||||
c.logger.Infof("cluster's statefulset does not exist")
|
||||
|
||||
sset, err = c.createStatefulSet()
|
||||
if err != nil {
|
||||
|
|
@ -304,41 +311,63 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
return fmt.Errorf("cluster is not ready: %v", err)
|
||||
}
|
||||
|
||||
podsRollingUpdateRequired = (len(pods) > 0)
|
||||
if podsRollingUpdateRequired {
|
||||
c.logger.Warningf("found pods from the previous statefulset: trigger rolling update")
|
||||
if err := c.applyRollingUpdateFlagforStatefulSet(podsRollingUpdateRequired); err != nil {
|
||||
return fmt.Errorf("could not set rolling update flag for the statefulset: %v", err)
|
||||
if len(pods) > 0 {
|
||||
for _, pod := range pods {
|
||||
if err = c.markRollingUpdateFlagForPod(&pod, "pod from previous statefulset"); err != nil {
|
||||
c.logger.Warnf("marking old pod for rolling update failed: %v", err)
|
||||
}
|
||||
podsToRecreate = append(podsToRecreate, pod)
|
||||
}
|
||||
}
|
||||
c.logger.Infof("created missing statefulset %q", util.NameFromMeta(sset.ObjectMeta))
|
||||
|
||||
} else {
|
||||
podsRollingUpdateRequired = c.mergeRollingUpdateFlagUsingCache(sset)
|
||||
// check if there are still pods with a rolling update flag
|
||||
for _, pod := range pods {
|
||||
if c.getRollingUpdateFlagFromPod(&pod) {
|
||||
podsToRecreate = append(podsToRecreate, pod)
|
||||
} else {
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if role == Master {
|
||||
continue
|
||||
}
|
||||
switchoverCandidates = append(switchoverCandidates, util.NameFromMeta(pod.ObjectMeta))
|
||||
}
|
||||
}
|
||||
|
||||
if len(podsToRecreate) > 0 {
|
||||
c.logger.Debugf("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods))
|
||||
}
|
||||
|
||||
// statefulset is already there, make sure we use its definition in order to compare with the spec.
|
||||
c.Statefulset = sset
|
||||
|
||||
desiredSS, err := c.generateStatefulSet(&c.Spec)
|
||||
desiredSts, err := c.generateStatefulSet(&c.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not generate statefulset: %v", err)
|
||||
}
|
||||
c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "from cache")
|
||||
|
||||
cmp := c.compareStatefulSetWith(desiredSS)
|
||||
cmp := c.compareStatefulSetWith(desiredSts)
|
||||
if !cmp.match {
|
||||
if cmp.rollingUpdate && !podsRollingUpdateRequired {
|
||||
podsRollingUpdateRequired = true
|
||||
c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "statefulset changes")
|
||||
if cmp.rollingUpdate {
|
||||
podsToRecreate = make([]v1.Pod, 0)
|
||||
switchoverCandidates = make([]spec.NamespacedName, 0)
|
||||
for _, pod := range pods {
|
||||
if err = c.markRollingUpdateFlagForPod(&pod, "pod changes"); err != nil {
|
||||
return fmt.Errorf("updating rolling update flag for pod failed: %v", err)
|
||||
}
|
||||
podsToRecreate = append(podsToRecreate, pod)
|
||||
}
|
||||
}
|
||||
|
||||
c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons)
|
||||
c.logStatefulSetChanges(c.Statefulset, desiredSts, false, cmp.reasons)
|
||||
|
||||
if !cmp.replace {
|
||||
if err := c.updateStatefulSet(desiredSS); err != nil {
|
||||
if err := c.updateStatefulSet(desiredSts); err != nil {
|
||||
return fmt.Errorf("could not update statefulset: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := c.replaceStatefulSet(desiredSS); err != nil {
|
||||
if err := c.replaceStatefulSet(desiredSts); err != nil {
|
||||
return fmt.Errorf("could not replace statefulset: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -346,18 +375,30 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
|
||||
c.updateStatefulSetAnnotations(c.AnnotationsToPropagate(c.annotationsSet(c.Statefulset.Annotations)))
|
||||
|
||||
if !podsRollingUpdateRequired && !c.OpConfig.EnableLazySpiloUpgrade {
|
||||
// even if desired and actual statefulsets match
|
||||
if len(podsToRecreate) == 0 && !c.OpConfig.EnableLazySpiloUpgrade {
|
||||
// even if the desired and the running statefulsets match
|
||||
// there still may be not up-to-date pods on condition
|
||||
// (a) the lazy update was just disabled
|
||||
// and
|
||||
// (b) some of the pods were not restarted when the lazy update was still in place
|
||||
podsRollingUpdateRequired, err = c.mustUpdatePodsAfterLazyUpdate(desiredSS)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not list pods of the statefulset: %v", err)
|
||||
}
|
||||
}
|
||||
for _, pod := range pods {
|
||||
effectivePodImage := pod.Spec.Containers[0].Image
|
||||
stsImage := desiredSts.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
if stsImage != effectivePodImage {
|
||||
if err = c.markRollingUpdateFlagForPod(&pod, "pod not yet restarted due to lazy update"); err != nil {
|
||||
c.logger.Warnf("updating rolling update flag failed for pod %q: %v", pod.Name, err)
|
||||
}
|
||||
podsToRecreate = append(podsToRecreate, pod)
|
||||
} else {
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if role == Master {
|
||||
continue
|
||||
}
|
||||
switchoverCandidates = append(switchoverCandidates, util.NameFromMeta(pod.ObjectMeta))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply special PostgreSQL parameters that can only be set via the Patroni API.
|
||||
|
|
@ -369,17 +410,13 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
|
||||
// if we get here we also need to re-create the pods (either leftovers from the old
|
||||
// statefulset or those that got their configuration from the outdated statefulset)
|
||||
if podsRollingUpdateRequired {
|
||||
if len(podsToRecreate) > 0 {
|
||||
c.logger.Debugln("performing rolling update")
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update")
|
||||
if err := c.recreatePods(); err != nil {
|
||||
if err := c.recreatePods(podsToRecreate, switchoverCandidates); err != nil {
|
||||
return fmt.Errorf("could not recreate pods: %v", err)
|
||||
}
|
||||
c.logger.Infof("pods have been recreated")
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated")
|
||||
if err := c.applyRollingUpdateFlagforStatefulSet(false); err != nil {
|
||||
c.logger.Warningf("could not clear rolling update for the statefulset: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -471,7 +508,7 @@ func (c *Cluster) syncSecrets() error {
|
|||
for secretUsername, secretSpec := range secrets {
|
||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(context.TODO(), secretSpec, metav1.CreateOptions{}); err == nil {
|
||||
c.Secrets[secret.UID] = secret
|
||||
c.logger.Debugf("created new secret %q, uid: %q", util.NameFromMeta(secret.ObjectMeta), secret.UID)
|
||||
c.logger.Debugf("created new secret %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), secret.UID)
|
||||
continue
|
||||
}
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
|
|
@ -480,7 +517,7 @@ func (c *Cluster) syncSecrets() error {
|
|||
return fmt.Errorf("could not get current secret: %v", err)
|
||||
}
|
||||
if secretUsername != string(secret.Data["username"]) {
|
||||
c.logger.Errorf("secret %s does not contain the role %q", secretSpec.Name, secretUsername)
|
||||
c.logger.Errorf("secret %s does not contain the role %s", secretSpec.Name, secretUsername)
|
||||
continue
|
||||
}
|
||||
c.Secrets[secret.UID] = secret
|
||||
|
|
@ -499,7 +536,7 @@ func (c *Cluster) syncSecrets() error {
|
|||
if pwdUser.Password != string(secret.Data["password"]) &&
|
||||
pwdUser.Origin == spec.RoleOriginInfrastructure {
|
||||
|
||||
c.logger.Debugf("updating the secret %q from the infrastructure roles", secretSpec.Name)
|
||||
c.logger.Debugf("updating the secret %s from the infrastructure roles", secretSpec.Name)
|
||||
if _, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(context.TODO(), secretSpec, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update infrastructure role secret for role %q: %v", secretUsername, err)
|
||||
}
|
||||
|
|
@ -509,7 +546,7 @@ func (c *Cluster) syncSecrets() error {
|
|||
userMap[secretUsername] = pwdUser
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("could not create secret for user %q: %v", secretUsername, err)
|
||||
return fmt.Errorf("could not create secret for user %s: %v", secretUsername, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -624,6 +661,9 @@ func (c *Cluster) syncDatabases() error {
|
|||
|
||||
// set default privileges for prepared database
|
||||
for _, preparedDatabase := range preparedDatabases {
|
||||
if err := c.initDbConnWithName(preparedDatabase); err != nil {
|
||||
return fmt.Errorf("could not init database connection to %s", preparedDatabase)
|
||||
}
|
||||
if err = c.execAlterGlobalDefaultPrivileges(preparedDatabase+constants.OwnerRoleNameSuffix, preparedDatabase); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p4")
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p6")
|
||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
result.MaxInstances = fromCRD.MaxInstances
|
||||
|
|
@ -54,6 +54,11 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.SuperUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.SuperUsername, "postgres")
|
||||
result.ReplicationUsername = util.Coalesce(fromCRD.PostgresUsersConfiguration.ReplicationUsername, "standby")
|
||||
|
||||
// major version upgrade config
|
||||
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "off")
|
||||
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "9.5")
|
||||
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "13")
|
||||
|
||||
// kubernetes config
|
||||
result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations
|
||||
result.PodServiceAccountName = util.Coalesce(fromCRD.Kubernetes.PodServiceAccountName, "postgres-pod")
|
||||
|
|
@ -63,6 +68,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret
|
||||
result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m")
|
||||
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
|
||||
result.SpiloAllowPrivilegeEscalation = util.CoalesceBool(fromCRD.Kubernetes.SpiloAllowPrivilegeEscalation, util.True())
|
||||
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
||||
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
||||
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
||||
|
|
@ -146,7 +152,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
|
||||
// logical backup config
|
||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.6.1")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.6.2")
|
||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
|
||||
result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region
|
||||
|
|
|
|||
|
|
@ -199,10 +199,10 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
|||
if event.EventType == EventRepair {
|
||||
runRepair, lastOperationStatus := cl.NeedsRepair()
|
||||
if !runRepair {
|
||||
lg.Debugf("Observed cluster status %s, repair is not required", lastOperationStatus)
|
||||
lg.Debugf("observed cluster status %s, repair is not required", lastOperationStatus)
|
||||
return
|
||||
}
|
||||
lg.Debugf("Observed cluster status %s, running sync scan to repair the cluster", lastOperationStatus)
|
||||
lg.Debugf("observed cluster status %s, running sync scan to repair the cluster", lastOperationStatus)
|
||||
event.EventType = EventSync
|
||||
}
|
||||
|
||||
|
|
@ -217,7 +217,7 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
|||
}
|
||||
|
||||
if err := c.submitRBACCredentials(event); err != nil {
|
||||
c.logger.Warnf("Pods and/or Patroni may misfunction due to the lack of permissions: %v", err)
|
||||
c.logger.Warnf("pods and/or Patroni may misfunction due to the lack of permissions: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -225,7 +225,7 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
|||
switch event.EventType {
|
||||
case EventAdd:
|
||||
if clusterFound {
|
||||
lg.Infof("Recieved add event for already existing Postgres cluster")
|
||||
lg.Infof("recieved add event for already existing Postgres cluster")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -348,11 +348,11 @@ func (c *Controller) processClusterEventsQueue(idx int, stopCh <-chan struct{},
|
|||
func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.PostgresSpec) {
|
||||
|
||||
deprecate := func(deprecated, replacement string) {
|
||||
c.logger.Warningf("Parameter %q is deprecated. Consider setting %q instead", deprecated, replacement)
|
||||
c.logger.Warningf("parameter %q is deprecated. Consider setting %q instead", deprecated, replacement)
|
||||
}
|
||||
|
||||
noeffect := func(param string, explanation string) {
|
||||
c.logger.Warningf("Parameter %q takes no effect. %s", param, explanation)
|
||||
c.logger.Warningf("parameter %q takes no effect. %s", param, explanation)
|
||||
}
|
||||
|
||||
if spec.UseLoadBalancer != nil {
|
||||
|
|
@ -368,7 +368,7 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.Postg
|
|||
|
||||
if (spec.UseLoadBalancer != nil || spec.ReplicaLoadBalancer != nil) &&
|
||||
(spec.EnableReplicaLoadBalancer != nil || spec.EnableMasterLoadBalancer != nil) {
|
||||
c.logger.Warnf("Both old and new load balancer parameters are present in the manifest, ignoring old ones")
|
||||
c.logger.Warnf("both old and new load balancer parameters are present in the manifest, ignoring old ones")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -480,3 +480,45 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
type SubConfig struct {
|
||||
teammap map[string]string
|
||||
}
|
||||
|
||||
type SuperConfig struct {
|
||||
sub SubConfig
|
||||
}
|
||||
|
||||
func TestUnderstandingMapsAndReferences(t *testing.T) {
|
||||
teams := map[string]string{"acid": "Felix"}
|
||||
|
||||
sc := SubConfig{
|
||||
teammap: teams,
|
||||
}
|
||||
|
||||
ssc := SuperConfig{
|
||||
sub: sc,
|
||||
}
|
||||
|
||||
teams["24x7"] = "alex"
|
||||
|
||||
if len(ssc.sub.teammap) != 2 {
|
||||
t.Errorf("Team Map does not contain 2 elements")
|
||||
}
|
||||
|
||||
ssc.sub.teammap["teapot"] = "Mikkel"
|
||||
|
||||
if len(teams) != 3 {
|
||||
t.Errorf("Team Map does not contain 3 elements")
|
||||
}
|
||||
|
||||
teams = make(map[string]string)
|
||||
|
||||
if len(ssc.sub.teammap) != 3 {
|
||||
t.Errorf("Team Map does not contain 0 elements")
|
||||
}
|
||||
|
||||
if &teams == &(ssc.sub.teammap) {
|
||||
t.Errorf("Identical maps")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ type Resources struct {
|
|||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
||||
SpiloAllowPrivilegeEscalation *bool `name:"spilo_allow_privilege_escalation" default:"true"`
|
||||
AdditionalPodCapabilities []string `name:"additional_pod_capabilities" default:""`
|
||||
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
|
||||
InheritedLabels []string `name:"inherited_labels" default:""`
|
||||
|
|
@ -113,7 +114,7 @@ type Scalyr struct {
|
|||
// LogicalBackup defines configuration for logical backup
|
||||
type LogicalBackup struct {
|
||||
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.6.1"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.6.2"`
|
||||
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
|
||||
LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""`
|
||||
LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""`
|
||||
|
|
@ -151,7 +152,7 @@ type Config struct {
|
|||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p4"`
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p6"`
|
||||
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
||||
SidecarContainers []v1.Container `name:"sidecars"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
|
|
@ -206,6 +207,9 @@ type Config struct {
|
|||
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
|
||||
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"`
|
||||
EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"`
|
||||
MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"`
|
||||
MinimalMajorVersion string `name:"minimal_major_version" default:"9.5"`
|
||||
TargetMajorVersion string `name:"target_major_version" default:"13"`
|
||||
}
|
||||
|
||||
// MustMarshal marshals the config or panics
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
package httpclient
|
||||
|
||||
//go:generate mockgen -package mocks -destination=$PWD/mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor
|
||||
|
||||
import "net/http"
|
||||
|
||||
// HTTPClient interface
|
||||
type HTTPClient interface {
|
||||
Do(req *http.Request) (*http.Response, error)
|
||||
Get(url string) (resp *http.Response, err error)
|
||||
}
|
||||
|
|
@ -3,7 +3,6 @@ package patroni
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
|
@ -11,6 +10,8 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
httpclient "github.com/zalando/postgres-operator/pkg/util/httpclient"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
|
@ -26,24 +27,28 @@ const (
|
|||
type Interface interface {
|
||||
Switchover(master *v1.Pod, candidate string) error
|
||||
SetPostgresParameters(server *v1.Pod, options map[string]string) error
|
||||
GetPatroniMemberState(pod *v1.Pod) (string, error)
|
||||
GetMemberData(server *v1.Pod) (MemberData, error)
|
||||
}
|
||||
|
||||
// Patroni API client
|
||||
type Patroni struct {
|
||||
httpClient *http.Client
|
||||
httpClient httpclient.HTTPClient
|
||||
logger *logrus.Entry
|
||||
}
|
||||
|
||||
// New create patroni
|
||||
func New(logger *logrus.Entry) *Patroni {
|
||||
cl := http.Client{
|
||||
func New(logger *logrus.Entry, client httpclient.HTTPClient) *Patroni {
|
||||
if client == nil {
|
||||
|
||||
client = &http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return &Patroni{
|
||||
logger: logger,
|
||||
httpClient: &cl,
|
||||
httpClient: client,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -68,7 +73,9 @@ func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer)
|
|||
return fmt.Errorf("could not create request: %v", err)
|
||||
}
|
||||
|
||||
if p.logger != nil {
|
||||
p.logger.Debugf("making %s http request: %s", method, request.URL.String())
|
||||
}
|
||||
|
||||
resp, err := p.httpClient.Do(request)
|
||||
if err != nil {
|
||||
|
|
@ -126,35 +133,45 @@ func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]st
|
|||
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
|
||||
}
|
||||
|
||||
//GetPatroniMemberState returns a state of member of a Patroni cluster
|
||||
func (p *Patroni) GetPatroniMemberState(server *v1.Pod) (string, error) {
|
||||
// MemberDataPatroni child element
|
||||
type MemberDataPatroni struct {
|
||||
Version string `json:"version"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// MemberData Patroni member data from Patroni API
|
||||
type MemberData struct {
|
||||
State string `json:"state"`
|
||||
Role string `json:"role"`
|
||||
ServerVersion int `json:"server_version"`
|
||||
PendingRestart bool `json:"pending_restart"`
|
||||
ClusterUnlocked bool `json:"cluster_unlocked"`
|
||||
Patroni MemberDataPatroni `json:"patroni"`
|
||||
}
|
||||
|
||||
// GetMemberData read member data from patroni API
|
||||
func (p *Patroni) GetMemberData(server *v1.Pod) (MemberData, error) {
|
||||
|
||||
apiURLString, err := apiURL(server)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return MemberData{}, err
|
||||
}
|
||||
response, err := p.httpClient.Get(apiURLString)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not perform Get request: %v", err)
|
||||
return MemberData{}, fmt.Errorf("could not perform Get request: %v", err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not read response: %v", err)
|
||||
return MemberData{}, fmt.Errorf("could not read response: %v", err)
|
||||
}
|
||||
|
||||
data := make(map[string]interface{})
|
||||
data := MemberData{}
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return MemberData{}, err
|
||||
}
|
||||
|
||||
state, ok := data["state"].(string)
|
||||
if !ok {
|
||||
return "", errors.New("Patroni Get call response contains wrong type for 'state' field")
|
||||
}
|
||||
|
||||
return state, nil
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,17 @@
|
|||
package patroni
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"k8s.io/api/core/v1"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/zalando/postgres-operator/mocks"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func newMockPod(ip string) *v1.Pod {
|
||||
|
|
@ -72,3 +79,32 @@ func TestApiURL(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatroniAPI(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
json := `{"state": "running", "postmaster_start_time": "2021-02-19 14:31:50.053 CET", "role": "master", "server_version": 90621, "cluster_unlocked": false, "xlog": {"location": 55978296057856}, "timeline": 6, "database_system_identifier": "6462555844314089962", "pending_restart": true, "patroni": {"version": "2.0.1", "scope": "acid-rest92-standby"}}`
|
||||
r := ioutil.NopCloser(bytes.NewReader([]byte(json)))
|
||||
|
||||
response := http.Response{
|
||||
Status: "200",
|
||||
Body: r,
|
||||
}
|
||||
|
||||
mockClient := mocks.NewMockHTTPClient(ctrl)
|
||||
mockClient.EXPECT().Get(gomock.Any()).Return(&response, nil)
|
||||
|
||||
p := New(nil, mockClient)
|
||||
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
PodIP: "192.168.100.1",
|
||||
},
|
||||
}
|
||||
_, err := p.GetMemberData(&pod)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Could not read Patroni data: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql
|
|||
queries := produceAlterRoleSetStmts(user)
|
||||
query := fmt.Sprintf(doBlockStmt, strings.Join(queries, ";"))
|
||||
if _, err := db.Exec(query); err != nil {
|
||||
return fmt.Errorf("dB error: %v, query: %s", err, query)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -146,7 +146,7 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D
|
|||
query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword)
|
||||
|
||||
if _, err := db.Exec(query); err != nil { // TODO: Try several times
|
||||
return fmt.Errorf("dB error: %v, query: %s", err, query)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(user.Parameters) > 0 {
|
||||
|
|
@ -174,7 +174,7 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB
|
|||
query := fmt.Sprintf(doBlockStmt, strings.Join(resultStmt, ";"))
|
||||
|
||||
if _, err := db.Exec(query); err != nil { // TODO: Try several times
|
||||
return fmt.Errorf("dB error: %v query %s", err, query)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue