Merge branch 'master' into gh-pages
This commit is contained in:
		
						commit
						b215a67434
					
				|  | @ -9,7 +9,7 @@ assignees: '' | ||||||
| 
 | 
 | ||||||
| Please, answer some short questions which should help us to understand your problem / question better? | Please, answer some short questions which should help us to understand your problem / question better? | ||||||
| 
 | 
 | ||||||
| - **Which image of the operator are you using?** e.g. ghcr.io/zalando/postgres-operator:v1.12.2 | - **Which image of the operator are you using?** e.g. ghcr.io/zalando/postgres-operator:v1.13.0 | ||||||
| - **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s] | - **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s] | ||||||
| - **Are you running Postgres Operator in production?** [yes | no] | - **Are you running Postgres Operator in production?** [yes | no] | ||||||
| - **Type of issue?** [Bug report, question, feature request, etc.] | - **Type of issue?** [Bug report, question, feature request, etc.] | ||||||
|  |  | ||||||
|  | @ -23,7 +23,7 @@ jobs: | ||||||
| 
 | 
 | ||||||
|       - uses: actions/setup-go@v2 |       - uses: actions/setup-go@v2 | ||||||
|         with: |         with: | ||||||
|           go-version: "^1.22.3" |           go-version: "^1.22.5" | ||||||
| 
 | 
 | ||||||
|       - name: Run unit tests |       - name: Run unit tests | ||||||
|         run: make deps mocks test |         run: make deps mocks test | ||||||
|  |  | ||||||
|  | @ -14,7 +14,7 @@ jobs: | ||||||
|     - uses: actions/checkout@v1 |     - uses: actions/checkout@v1 | ||||||
|     - uses: actions/setup-go@v2 |     - uses: actions/setup-go@v2 | ||||||
|       with: |       with: | ||||||
|           go-version: "^1.22.3" |           go-version: "^1.22.5" | ||||||
|     - name: Make dependencies |     - name: Make dependencies | ||||||
|       run: make deps mocks |       run: make deps mocks | ||||||
|     - name: Code generation |     - name: Code generation | ||||||
|  |  | ||||||
|  | @ -14,7 +14,7 @@ jobs: | ||||||
|     - uses: actions/checkout@v2 |     - uses: actions/checkout@v2 | ||||||
|     - uses: actions/setup-go@v2 |     - uses: actions/setup-go@v2 | ||||||
|       with: |       with: | ||||||
|           go-version: "^1.22.3" |           go-version: "^1.22.5" | ||||||
|     - name: Make dependencies |     - name: Make dependencies | ||||||
|       run: make deps mocks |       run: make deps mocks | ||||||
|     - name: Compile |     - name: Compile | ||||||
|  |  | ||||||
							
								
								
									
										4
									
								
								Makefile
								
								
								
								
							
							
						
						
									
										4
									
								
								Makefile
								
								
								
								
							|  | @ -69,7 +69,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE} | ||||||
| 	docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" . | 	docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" . | ||||||
| 
 | 
 | ||||||
| indocker-race: | indocker-race: | ||||||
| 	docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.22.3 bash -c "make linux" | 	docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.22.5 bash -c "make linux" | ||||||
| 
 | 
 | ||||||
| push: | push: | ||||||
| 	docker push "$(IMAGE):$(TAG)$(CDP_TAG)" | 	docker push "$(IMAGE):$(TAG)$(CDP_TAG)" | ||||||
|  | @ -78,7 +78,7 @@ mocks: | ||||||
| 	GO111MODULE=on go generate ./... | 	GO111MODULE=on go generate ./... | ||||||
| 
 | 
 | ||||||
| tools: | tools: | ||||||
| 	GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.28.10 | 	GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.28.12 | ||||||
| 	GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0 | 	GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0 | ||||||
| 	GO111MODULE=on go mod tidy | 	GO111MODULE=on go mod tidy | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -28,7 +28,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as | ||||||
| 
 | 
 | ||||||
| ### PostgreSQL features | ### PostgreSQL features | ||||||
| 
 | 
 | ||||||
| * Supports PostgreSQL 16, starting from 11+ | * Supports PostgreSQL 16, starting from 12+ | ||||||
| * Streaming replication cluster via Patroni | * Streaming replication cluster via Patroni | ||||||
| * Point-In-Time-Recovery with | * Point-In-Time-Recovery with | ||||||
| [pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html) / | [pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html) / | ||||||
|  | @ -57,13 +57,13 @@ production for over five years. | ||||||
| 
 | 
 | ||||||
| | Release   | Postgres versions | K8s versions      | Golang  | | | Release   | Postgres versions | K8s versions      | Golang  | | ||||||
| | :-------- | :---------------: | :---------------: | :-----: | | | :-------- | :---------------: | :---------------: | :-----: | | ||||||
| | v1.12.2   | 11 → 16      | 1.27+             | 1.22.3  | | | v1.13.0   | 12 → 16      | 1.27+             | 1.22.5  | | ||||||
|  | | v1.12.0   | 11 → 16      | 1.27+             | 1.22.3  | | ||||||
| | v1.11.0   | 11 → 16      | 1.27+             | 1.21.7  | | | v1.11.0   | 11 → 16      | 1.27+             | 1.21.7  | | ||||||
| | v1.10.1   | 10 → 15      | 1.21+             | 1.19.8  | | | v1.10.1   | 10 → 15      | 1.21+             | 1.19.8  | | ||||||
| | v1.9.0    | 10 → 15      | 1.21+             | 1.18.9  | | | v1.9.0    | 10 → 15      | 1.21+             | 1.18.9  | | ||||||
| | v1.8.2    | 9.5 → 14     | 1.20 → 1.24  | 1.17.4  | | | v1.8.2    | 9.5 → 14     | 1.20 → 1.24  | 1.17.4  | | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| ## Getting started | ## Getting started | ||||||
| 
 | 
 | ||||||
| For a quick first impression follow the instructions of this | For a quick first impression follow the instructions of this | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| apiVersion: v2 | apiVersion: v2 | ||||||
| name: postgres-operator-ui | name: postgres-operator-ui | ||||||
| version: 1.12.2 | version: 1.13.0 | ||||||
| appVersion: 1.12.2 | appVersion: 1.13.0 | ||||||
| home: https://github.com/zalando/postgres-operator | home: https://github.com/zalando/postgres-operator | ||||||
| description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience | description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience | ||||||
| keywords: | keywords: | ||||||
|  |  | ||||||
|  | @ -1,9 +1,32 @@ | ||||||
| apiVersion: v1 | apiVersion: v1 | ||||||
| entries: | entries: | ||||||
|   postgres-operator-ui: |   postgres-operator-ui: | ||||||
|  |   - apiVersion: v2 | ||||||
|  |     appVersion: 1.13.0 | ||||||
|  |     created: "2024-08-21T18:55:36.524305158+02:00" | ||||||
|  |     description: Postgres Operator UI provides a graphical interface for a convenient | ||||||
|  |       database-as-a-service user experience | ||||||
|  |     digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8 | ||||||
|  |     home: https://github.com/zalando/postgres-operator | ||||||
|  |     keywords: | ||||||
|  |     - postgres | ||||||
|  |     - operator | ||||||
|  |     - ui | ||||||
|  |     - cloud-native | ||||||
|  |     - patroni | ||||||
|  |     - spilo | ||||||
|  |     maintainers: | ||||||
|  |     - email: opensource@zalando.de | ||||||
|  |       name: Zalando | ||||||
|  |     name: postgres-operator-ui | ||||||
|  |     sources: | ||||||
|  |     - https://github.com/zalando/postgres-operator | ||||||
|  |     urls: | ||||||
|  |     - postgres-operator-ui-1.13.0.tgz | ||||||
|  |     version: 1.13.0 | ||||||
|   - apiVersion: v2 |   - apiVersion: v2 | ||||||
|     appVersion: 1.12.2 |     appVersion: 1.12.2 | ||||||
|     created: "2024-06-14T10:31:52.852963015+02:00" |     created: "2024-08-21T18:55:36.521875733+02:00" | ||||||
|     description: Postgres Operator UI provides a graphical interface for a convenient |     description: Postgres Operator UI provides a graphical interface for a convenient | ||||||
|       database-as-a-service user experience |       database-as-a-service user experience | ||||||
|     digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd |     digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd | ||||||
|  | @ -26,7 +49,7 @@ entries: | ||||||
|     version: 1.12.2 |     version: 1.12.2 | ||||||
|   - apiVersion: v2 |   - apiVersion: v2 | ||||||
|     appVersion: 1.11.0 |     appVersion: 1.11.0 | ||||||
|     created: "2024-06-14T10:31:52.849576888+02:00" |     created: "2024-08-21T18:55:36.51959105+02:00" | ||||||
|     description: Postgres Operator UI provides a graphical interface for a convenient |     description: Postgres Operator UI provides a graphical interface for a convenient | ||||||
|       database-as-a-service user experience |       database-as-a-service user experience | ||||||
|     digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2 |     digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2 | ||||||
|  | @ -49,7 +72,7 @@ entries: | ||||||
|     version: 1.11.0 |     version: 1.11.0 | ||||||
|   - apiVersion: v2 |   - apiVersion: v2 | ||||||
|     appVersion: 1.10.1 |     appVersion: 1.10.1 | ||||||
|     created: "2024-06-14T10:31:52.843219526+02:00" |     created: "2024-08-21T18:55:36.516518177+02:00" | ||||||
|     description: Postgres Operator UI provides a graphical interface for a convenient |     description: Postgres Operator UI provides a graphical interface for a convenient | ||||||
|       database-as-a-service user experience |       database-as-a-service user experience | ||||||
|     digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce |     digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce | ||||||
|  | @ -72,7 +95,7 @@ entries: | ||||||
|     version: 1.10.1 |     version: 1.10.1 | ||||||
|   - apiVersion: v2 |   - apiVersion: v2 | ||||||
|     appVersion: 1.9.0 |     appVersion: 1.9.0 | ||||||
|     created: "2024-06-14T10:31:52.857573553+02:00" |     created: "2024-08-21T18:55:36.52712908+02:00" | ||||||
|     description: Postgres Operator UI provides a graphical interface for a convenient |     description: Postgres Operator UI provides a graphical interface for a convenient | ||||||
|       database-as-a-service user experience |       database-as-a-service user experience | ||||||
|     digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc |     digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc | ||||||
|  | @ -93,27 +116,4 @@ entries: | ||||||
|     urls: |     urls: | ||||||
|     - postgres-operator-ui-1.9.0.tgz |     - postgres-operator-ui-1.9.0.tgz | ||||||
|     version: 1.9.0 |     version: 1.9.0 | ||||||
|   - apiVersion: v2 | generated: "2024-08-21T18:55:36.512456099+02:00" | ||||||
|     appVersion: 1.8.2 |  | ||||||
|     created: "2024-06-14T10:31:52.855335455+02:00" |  | ||||||
|     description: Postgres Operator UI provides a graphical interface for a convenient |  | ||||||
|       database-as-a-service user experience |  | ||||||
|     digest: fbfc90fa8fd007a08a7c02e0ec9108bb8282cbb42b8c976d88f2193d6edff30c |  | ||||||
|     home: https://github.com/zalando/postgres-operator |  | ||||||
|     keywords: |  | ||||||
|     - postgres |  | ||||||
|     - operator |  | ||||||
|     - ui |  | ||||||
|     - cloud-native |  | ||||||
|     - patroni |  | ||||||
|     - spilo |  | ||||||
|     maintainers: |  | ||||||
|     - email: opensource@zalando.de |  | ||||||
|       name: Zalando |  | ||||||
|     name: postgres-operator-ui |  | ||||||
|     sources: |  | ||||||
|     - https://github.com/zalando/postgres-operator |  | ||||||
|     urls: |  | ||||||
|     - postgres-operator-ui-1.8.2.tgz |  | ||||||
|     version: 1.8.2 |  | ||||||
| generated: "2024-06-14T10:31:52.839113675+02:00" |  | ||||||
|  |  | ||||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							|  | @ -8,7 +8,7 @@ replicaCount: 1 | ||||||
| image: | image: | ||||||
|   registry: ghcr.io |   registry: ghcr.io | ||||||
|   repository: zalando/postgres-operator-ui |   repository: zalando/postgres-operator-ui | ||||||
|   tag: v1.12.2 |   tag: v1.13.0 | ||||||
|   pullPolicy: "IfNotPresent" |   pullPolicy: "IfNotPresent" | ||||||
| 
 | 
 | ||||||
| # Optionally specify an array of imagePullSecrets. | # Optionally specify an array of imagePullSecrets. | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| apiVersion: v2 | apiVersion: v2 | ||||||
| name: postgres-operator | name: postgres-operator | ||||||
| version: 1.12.2 | version: 1.13.0 | ||||||
| appVersion: 1.12.2 | appVersion: 1.13.0 | ||||||
| home: https://github.com/zalando/postgres-operator | home: https://github.com/zalando/postgres-operator | ||||||
| description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes | description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes | ||||||
| keywords: | keywords: | ||||||
|  |  | ||||||
|  | @ -68,7 +68,7 @@ spec: | ||||||
|                   type: string |                   type: string | ||||||
|               docker_image: |               docker_image: | ||||||
|                 type: string |                 type: string | ||||||
|                 default: "ghcr.io/zalando/spilo-16:3.2-p3" |                 default: "ghcr.io/zalando/spilo-16:3.3-p1" | ||||||
|               enable_crd_registration: |               enable_crd_registration: | ||||||
|                 type: boolean |                 type: boolean | ||||||
|                 default: true |                 default: true | ||||||
|  | @ -160,7 +160,7 @@ spec: | ||||||
|                 properties: |                 properties: | ||||||
|                   major_version_upgrade_mode: |                   major_version_upgrade_mode: | ||||||
|                     type: string |                     type: string | ||||||
|                     default: "off" |                     default: "manual" | ||||||
|                   major_version_upgrade_team_allow_list: |                   major_version_upgrade_team_allow_list: | ||||||
|                     type: array |                     type: array | ||||||
|                     items: |                     items: | ||||||
|  | @ -211,9 +211,9 @@ spec: | ||||||
|                   enable_init_containers: |                   enable_init_containers: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: true |                     default: true | ||||||
|                   enable_secrets_deletion: |                   enable_owner_references: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: true |                     default: false | ||||||
|                   enable_persistent_volume_claim_deletion: |                   enable_persistent_volume_claim_deletion: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: true |                     default: true | ||||||
|  | @ -226,6 +226,9 @@ spec: | ||||||
|                   enable_readiness_probe: |                   enable_readiness_probe: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: false |                     default: false | ||||||
|  |                   enable_secrets_deletion: | ||||||
|  |                     type: boolean | ||||||
|  |                     default: true | ||||||
|                   enable_sidecars: |                   enable_sidecars: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: true |                     default: true | ||||||
|  | @ -469,7 +472,6 @@ spec: | ||||||
|                     type: string |                     type: string | ||||||
|                   additional_secret_mount_path: |                   additional_secret_mount_path: | ||||||
|                     type: string |                     type: string | ||||||
|                     default: "/meta/credentials" |  | ||||||
|                   aws_region: |                   aws_region: | ||||||
|                     type: string |                     type: string | ||||||
|                     default: "eu-central-1" |                     default: "eu-central-1" | ||||||
|  | @ -508,7 +510,7 @@ spec: | ||||||
|                     pattern: '^(\d+m|\d+(\.\d{1,3})?)$' |                     pattern: '^(\d+m|\d+(\.\d{1,3})?)$' | ||||||
|                   logical_backup_docker_image: |                   logical_backup_docker_image: | ||||||
|                     type: string |                     type: string | ||||||
|                     default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2" |                     default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" | ||||||
|                   logical_backup_google_application_credentials: |                   logical_backup_google_application_credentials: | ||||||
|                     type: string |                     type: string | ||||||
|                   logical_backup_job_prefix: |                   logical_backup_job_prefix: | ||||||
|  |  | ||||||
|  | @ -226,7 +226,7 @@ spec: | ||||||
|                 type: array |                 type: array | ||||||
|                 items: |                 items: | ||||||
|                   type: string |                   type: string | ||||||
|                   pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' |                   pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' | ||||||
|               masterServiceAnnotations: |               masterServiceAnnotations: | ||||||
|                 type: object |                 type: object | ||||||
|                 additionalProperties: |                 additionalProperties: | ||||||
|  | @ -375,7 +375,6 @@ spec: | ||||||
|                   version: |                   version: | ||||||
|                     type: string |                     type: string | ||||||
|                     enum: |                     enum: | ||||||
|                       - "11" |  | ||||||
|                       - "12" |                       - "12" | ||||||
|                       - "13" |                       - "13" | ||||||
|                       - "14" |                       - "14" | ||||||
|  |  | ||||||
|  | @ -1,9 +1,31 @@ | ||||||
| apiVersion: v1 | apiVersion: v1 | ||||||
| entries: | entries: | ||||||
|   postgres-operator: |   postgres-operator: | ||||||
|  |   - apiVersion: v2 | ||||||
|  |     appVersion: 1.13.0 | ||||||
|  |     created: "2024-08-21T18:54:43.160735116+02:00" | ||||||
|  |     description: Postgres Operator creates and manages PostgreSQL clusters running | ||||||
|  |       in Kubernetes | ||||||
|  |     digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef | ||||||
|  |     home: https://github.com/zalando/postgres-operator | ||||||
|  |     keywords: | ||||||
|  |     - postgres | ||||||
|  |     - operator | ||||||
|  |     - cloud-native | ||||||
|  |     - patroni | ||||||
|  |     - spilo | ||||||
|  |     maintainers: | ||||||
|  |     - email: opensource@zalando.de | ||||||
|  |       name: Zalando | ||||||
|  |     name: postgres-operator | ||||||
|  |     sources: | ||||||
|  |     - https://github.com/zalando/postgres-operator | ||||||
|  |     urls: | ||||||
|  |     - postgres-operator-1.13.0.tgz | ||||||
|  |     version: 1.13.0 | ||||||
|   - apiVersion: v2 |   - apiVersion: v2 | ||||||
|     appVersion: 1.12.2 |     appVersion: 1.12.2 | ||||||
|     created: "2024-06-14T10:30:44.071387784+02:00" |     created: "2024-08-21T18:54:43.152249286+02:00" | ||||||
|     description: Postgres Operator creates and manages PostgreSQL clusters running |     description: Postgres Operator creates and manages PostgreSQL clusters running | ||||||
|       in Kubernetes |       in Kubernetes | ||||||
|     digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8 |     digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8 | ||||||
|  | @ -25,7 +47,7 @@ entries: | ||||||
|     version: 1.12.2 |     version: 1.12.2 | ||||||
|   - apiVersion: v2 |   - apiVersion: v2 | ||||||
|     appVersion: 1.11.0 |     appVersion: 1.11.0 | ||||||
|     created: "2024-06-14T10:30:44.065353504+02:00" |     created: "2024-08-21T18:54:43.145837894+02:00" | ||||||
|     description: Postgres Operator creates and manages PostgreSQL clusters running |     description: Postgres Operator creates and manages PostgreSQL clusters running | ||||||
|       in Kubernetes |       in Kubernetes | ||||||
|     digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9 |     digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9 | ||||||
|  | @ -47,7 +69,7 @@ entries: | ||||||
|     version: 1.11.0 |     version: 1.11.0 | ||||||
|   - apiVersion: v2 |   - apiVersion: v2 | ||||||
|     appVersion: 1.10.1 |     appVersion: 1.10.1 | ||||||
|     created: "2024-06-14T10:30:44.059080224+02:00" |     created: "2024-08-21T18:54:43.139552116+02:00" | ||||||
|     description: Postgres Operator creates and manages PostgreSQL clusters running |     description: Postgres Operator creates and manages PostgreSQL clusters running | ||||||
|       in Kubernetes |       in Kubernetes | ||||||
|     digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c |     digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c | ||||||
|  | @ -69,7 +91,7 @@ entries: | ||||||
|     version: 1.10.1 |     version: 1.10.1 | ||||||
|   - apiVersion: v2 |   - apiVersion: v2 | ||||||
|     appVersion: 1.9.0 |     appVersion: 1.9.0 | ||||||
|     created: "2024-06-14T10:30:44.084760658+02:00" |     created: "2024-08-21T18:54:43.168490032+02:00" | ||||||
|     description: Postgres Operator creates and manages PostgreSQL clusters running |     description: Postgres Operator creates and manages PostgreSQL clusters running | ||||||
|       in Kubernetes |       in Kubernetes | ||||||
|     digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276 |     digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276 | ||||||
|  | @ -89,26 +111,4 @@ entries: | ||||||
|     urls: |     urls: | ||||||
|     - postgres-operator-1.9.0.tgz |     - postgres-operator-1.9.0.tgz | ||||||
|     version: 1.9.0 |     version: 1.9.0 | ||||||
|   - apiVersion: v2 | generated: "2024-08-21T18:54:43.126871802+02:00" | ||||||
|     appVersion: 1.8.2 |  | ||||||
|     created: "2024-06-14T10:30:44.077744166+02:00" |  | ||||||
|     description: Postgres Operator creates and manages PostgreSQL clusters running |  | ||||||
|       in Kubernetes |  | ||||||
|     digest: f77ffad2e98b72a621e5527015cf607935d3ed688f10ba4b626435acb9631b5b |  | ||||||
|     home: https://github.com/zalando/postgres-operator |  | ||||||
|     keywords: |  | ||||||
|     - postgres |  | ||||||
|     - operator |  | ||||||
|     - cloud-native |  | ||||||
|     - patroni |  | ||||||
|     - spilo |  | ||||||
|     maintainers: |  | ||||||
|     - email: opensource@zalando.de |  | ||||||
|       name: Zalando |  | ||||||
|     name: postgres-operator |  | ||||||
|     sources: |  | ||||||
|     - https://github.com/zalando/postgres-operator |  | ||||||
|     urls: |  | ||||||
|     - postgres-operator-1.8.2.tgz |  | ||||||
|     version: 1.8.2 |  | ||||||
| generated: "2024-06-14T10:30:44.052436544+02:00" |  | ||||||
|  |  | ||||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							|  | @ -120,6 +120,7 @@ rules: | ||||||
|   - create |   - create | ||||||
|   - delete |   - delete | ||||||
|   - get |   - get | ||||||
|  |   - patch | ||||||
|   - update |   - update | ||||||
| # to check nodes for node readiness label | # to check nodes for node readiness label | ||||||
| - apiGroups: | - apiGroups: | ||||||
|  | @ -196,6 +197,7 @@ rules: | ||||||
|   - get |   - get | ||||||
|   - list |   - list | ||||||
|   - patch |   - patch | ||||||
|  |   - update | ||||||
| # to CRUD cron jobs for logical backups | # to CRUD cron jobs for logical backups | ||||||
| - apiGroups: | - apiGroups: | ||||||
|   - batch |   - batch | ||||||
|  |  | ||||||
|  | @ -52,6 +52,9 @@ spec: | ||||||
|       {{- if .Values.controllerID.create }} |       {{- if .Values.controllerID.create }} | ||||||
|         - name: CONTROLLER_ID |         - name: CONTROLLER_ID | ||||||
|           value: {{ template "postgres-operator.controllerID" . }} |           value: {{ template "postgres-operator.controllerID" . }} | ||||||
|  |       {{- end }} | ||||||
|  |       {{- if .Values.extraEnvs }} | ||||||
|  |       {{- .Values.extraEnvs | toYaml | nindent 12 }} | ||||||
|       {{- end }} |       {{- end }} | ||||||
|         resources: |         resources: | ||||||
| {{ toYaml .Values.resources | indent 10 }} | {{ toYaml .Values.resources | indent 10 }} | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| image: | image: | ||||||
|   registry: ghcr.io |   registry: ghcr.io | ||||||
|   repository: zalando/postgres-operator |   repository: zalando/postgres-operator | ||||||
|   tag: v1.12.2 |   tag: v1.13.0 | ||||||
|   pullPolicy: "IfNotPresent" |   pullPolicy: "IfNotPresent" | ||||||
| 
 | 
 | ||||||
| # Optionally specify an array of imagePullSecrets. | # Optionally specify an array of imagePullSecrets. | ||||||
|  | @ -38,7 +38,7 @@ configGeneral: | ||||||
|   # etcd connection string for Patroni. Empty uses K8s-native DCS. |   # etcd connection string for Patroni. Empty uses K8s-native DCS. | ||||||
|   etcd_host: "" |   etcd_host: "" | ||||||
|   # Spilo docker image |   # Spilo docker image | ||||||
|   docker_image: ghcr.io/zalando/spilo-16:3.2-p3 |   docker_image: ghcr.io/zalando/spilo-16:3.3-p1 | ||||||
| 
 | 
 | ||||||
|   # key name for annotation to ignore globally configured instance limits |   # key name for annotation to ignore globally configured instance limits | ||||||
|   # ignore_instance_limits_annotation_key: "" |   # ignore_instance_limits_annotation_key: "" | ||||||
|  | @ -83,7 +83,7 @@ configUsers: | ||||||
| 
 | 
 | ||||||
| configMajorVersionUpgrade: | configMajorVersionUpgrade: | ||||||
|   # "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too |   # "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too | ||||||
|   major_version_upgrade_mode: "off" |   major_version_upgrade_mode: "manual" | ||||||
|   # upgrades will only be carried out for clusters of listed teams when mode is "off" |   # upgrades will only be carried out for clusters of listed teams when mode is "off" | ||||||
|   # major_version_upgrade_team_allow_list: |   # major_version_upgrade_team_allow_list: | ||||||
|   # - acid |   # - acid | ||||||
|  | @ -129,8 +129,8 @@ configKubernetes: | ||||||
|   enable_finalizers: false |   enable_finalizers: false | ||||||
|   # enables initContainers to run actions before Spilo is started |   # enables initContainers to run actions before Spilo is started | ||||||
|   enable_init_containers: true |   enable_init_containers: true | ||||||
|   # toggles if operator should delete secrets on cluster deletion |   # toggles if child resources should have an owner reference to the postgresql CR | ||||||
|   enable_secrets_deletion: true |   enable_owner_references: false | ||||||
|   # toggles if operator should delete PVCs on cluster deletion |   # toggles if operator should delete PVCs on cluster deletion | ||||||
|   enable_persistent_volume_claim_deletion: true |   enable_persistent_volume_claim_deletion: true | ||||||
|   # toggles pod anti affinity on the Postgres pods |   # toggles pod anti affinity on the Postgres pods | ||||||
|  | @ -139,6 +139,8 @@ configKubernetes: | ||||||
|   enable_pod_disruption_budget: true |   enable_pod_disruption_budget: true | ||||||
|   # toogles readiness probe for database pods |   # toogles readiness probe for database pods | ||||||
|   enable_readiness_probe: false |   enable_readiness_probe: false | ||||||
|  |   # toggles if operator should delete secrets on cluster deletion | ||||||
|  |   enable_secrets_deletion: true | ||||||
|   # enables sidecar containers to run alongside Spilo in the same pod |   # enables sidecar containers to run alongside Spilo in the same pod | ||||||
|   enable_sidecars: true |   enable_sidecars: true | ||||||
| 
 | 
 | ||||||
|  | @ -362,7 +364,7 @@ configLogicalBackup: | ||||||
|   # logical_backup_memory_request: "" |   # logical_backup_memory_request: "" | ||||||
| 
 | 
 | ||||||
|   # image for pods of the logical backup job (example runs pg_dumpall) |   # image for pods of the logical backup job (example runs pg_dumpall) | ||||||
|   logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2" |   logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" | ||||||
|   # path of google cloud service account json file |   # path of google cloud service account json file | ||||||
|   # logical_backup_google_application_credentials: "" |   # logical_backup_google_application_credentials: "" | ||||||
| 
 | 
 | ||||||
|  | @ -504,6 +506,24 @@ readinessProbe: | ||||||
|   initialDelaySeconds: 5 |   initialDelaySeconds: 5 | ||||||
|   periodSeconds: 10 |   periodSeconds: 10 | ||||||
| 
 | 
 | ||||||
|  | # configure extra environment variables | ||||||
|  | # Extra environment variables are writen in kubernetes format and added "as is" to the pod's env variables | ||||||
|  | # https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ | ||||||
|  | # https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables | ||||||
|  | extraEnvs: | ||||||
|  |   [] | ||||||
|  |   # Exemple of settings maximum amount of memory / cpu that can be used by go process (to match resources.limits) | ||||||
|  |   # - name: MY_VAR | ||||||
|  |   #   value: my-value | ||||||
|  |   # - name: GOMAXPROCS | ||||||
|  |   #   valueFrom: | ||||||
|  |   #     resourceFieldRef: | ||||||
|  |   #       resource: limits.cpu | ||||||
|  |   # - name: GOMEMLIMIT | ||||||
|  |   #   valueFrom: | ||||||
|  |   #     resourceFieldRef: | ||||||
|  |   #       resource: limits.memory | ||||||
|  | 
 | ||||||
| # Affinity for pod assignment | # Affinity for pod assignment | ||||||
| # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | ||||||
| affinity: {} | affinity: {} | ||||||
|  |  | ||||||
|  | @ -5,27 +5,18 @@ pipeline: | ||||||
|       vm_config: |       vm_config: | ||||||
|         type: linux |         type: linux | ||||||
|         size: large |         size: large | ||||||
|  |         image: cdp-runtime/go | ||||||
|       cache: |       cache: | ||||||
|         paths: |         paths: | ||||||
|           - /go/pkg/mod |           - /go/pkg/mod       # pkg cache for Go modules | ||||||
|  |           - ~/.cache/go-build # Go build cache | ||||||
|       commands: |       commands: | ||||||
|         - desc: 'Update' |         - desc: Run unit tests | ||||||
|           cmd: | |           cmd: | | ||||||
|             apt-get update |             make deps mocks test | ||||||
|         - desc: 'Install required build software' | 
 | ||||||
|  |         - desc: Build Docker image | ||||||
|           cmd: | |           cmd: | | ||||||
|             apt-get install -y make git apt-transport-https ca-certificates curl build-essential python3 python3-pip |  | ||||||
|         - desc: 'Install go' |  | ||||||
|           cmd: | |  | ||||||
|             cd /tmp |  | ||||||
|             wget -q https://storage.googleapis.com/golang/go1.22.3.linux-amd64.tar.gz -O go.tar.gz |  | ||||||
|             tar -xf go.tar.gz |  | ||||||
|             mv go /usr/local |  | ||||||
|             ln -s /usr/local/go/bin/go /usr/bin/go |  | ||||||
|             go version |  | ||||||
|         - desc: 'Build docker image' |  | ||||||
|           cmd: | |  | ||||||
|             export PATH=$PATH:$HOME/go/bin |  | ||||||
|             IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} |             IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} | ||||||
|             if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]] |             if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]] | ||||||
|             then |             then | ||||||
|  | @ -34,23 +25,7 @@ pipeline: | ||||||
|               IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test |               IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test | ||||||
|             fi |             fi | ||||||
|             export IMAGE |             export IMAGE | ||||||
|             make deps mocks docker |             make docker push | ||||||
|         - desc: 'Run unit tests' |  | ||||||
|           cmd: | |  | ||||||
|             export PATH=$PATH:$HOME/go/bin |  | ||||||
|             go test ./... |  | ||||||
|         - desc: 'Push docker image' |  | ||||||
|           cmd: | |  | ||||||
|             export PATH=$PATH:$HOME/go/bin |  | ||||||
|             IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} |  | ||||||
|             if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]] |  | ||||||
|             then |  | ||||||
|               IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator |  | ||||||
|             else |  | ||||||
|               IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test |  | ||||||
|             fi |  | ||||||
|             export IMAGE |  | ||||||
|             make push |  | ||||||
| 
 | 
 | ||||||
|     - id: build-operator-ui |     - id: build-operator-ui | ||||||
|       type: script |       type: script | ||||||
|  |  | ||||||
|  | @ -1,18 +1,14 @@ | ||||||
| FROM registry.opensource.zalan.do/library/alpine-3.15:latest | FROM golang:1.22-alpine | ||||||
| LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>" | LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>" | ||||||
| 
 | 
 | ||||||
| # We need root certificates to deal with teams api over https | # We need root certificates to deal with teams api over https | ||||||
| RUN apk --no-cache add ca-certificates go git musl-dev | RUN apk -U add --no-cache ca-certificates delve | ||||||
| 
 | 
 | ||||||
| COPY build/* / | COPY build/* / | ||||||
| 
 | 
 | ||||||
| RUN addgroup -g 1000 pgo | RUN addgroup -g 1000 pgo | ||||||
| RUN adduser -D -u 1000 -G pgo -g 'Postgres Operator' pgo | RUN adduser -D -u 1000 -G pgo -g 'Postgres Operator' pgo | ||||||
| 
 | 
 | ||||||
| RUN go get -d github.com/derekparker/delve/cmd/dlv |  | ||||||
| RUN cp /root/go/bin/dlv /dlv |  | ||||||
| RUN chown -R pgo:pgo /dlv |  | ||||||
| 
 |  | ||||||
| USER pgo:pgo | USER pgo:pgo | ||||||
| RUN ls -l / | RUN ls -l / | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,23 +1,20 @@ | ||||||
| ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest | ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest | ||||||
|  | FROM golang:1.22-alpine AS builder | ||||||
| ARG VERSION=latest | ARG VERSION=latest | ||||||
| 
 | 
 | ||||||
| FROM ubuntu:20.04 as builder |  | ||||||
| 
 |  | ||||||
| ARG VERSION |  | ||||||
| 
 |  | ||||||
| COPY  . /go/src/github.com/zalando/postgres-operator | COPY  . /go/src/github.com/zalando/postgres-operator | ||||||
| WORKDIR /go/src/github.com/zalando/postgres-operator | WORKDIR /go/src/github.com/zalando/postgres-operator | ||||||
| 
 | 
 | ||||||
| ENV OPERATOR_LDFLAGS="-X=main.version=${VERSION}" | RUN GO111MODULE=on go mod vendor \ | ||||||
| RUN bash docker/build_operator.sh |     && CGO_ENABLED=0 go build -o build/postgres-operator -v -ldflags "-X=main.version=${VERSION}" cmd/main.go | ||||||
| 
 | 
 | ||||||
| FROM ${BASE_IMAGE} | FROM ${BASE_IMAGE} | ||||||
| LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>" | LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>" | ||||||
| LABEL org.opencontainers.image.source="https://github.com/zalando/postgres-operator" | LABEL org.opencontainers.image.source="https://github.com/zalando/postgres-operator" | ||||||
| 
 | 
 | ||||||
| # We need root certificates to deal with teams api over https | # We need root certificates to deal with teams api over https | ||||||
| RUN apk --no-cache add curl | RUN apk -U upgrade --no-cache \ | ||||||
| RUN apk --no-cache add ca-certificates |     && apk add --no-cache curl ca-certificates | ||||||
| 
 | 
 | ||||||
| COPY --from=builder /go/src/github.com/zalando/postgres-operator/build/* / | COPY --from=builder /go/src/github.com/zalando/postgres-operator/build/* / | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -13,7 +13,7 @@ apt-get install -y wget | ||||||
| 
 | 
 | ||||||
| ( | ( | ||||||
|     cd /tmp |     cd /tmp | ||||||
|     wget -q "https://storage.googleapis.com/golang/go1.22.3.linux-${arch}.tar.gz" -O go.tar.gz |     wget -q "https://storage.googleapis.com/golang/go1.22.5.linux-${arch}.tar.gz" -O go.tar.gz | ||||||
|     tar -xf go.tar.gz |     tar -xf go.tar.gz | ||||||
|     mv go /usr/local |     mv go /usr/local | ||||||
|     ln -s /usr/local/go/bin/go /usr/bin/go |     ln -s /usr/local/go/bin/go /usr/bin/go | ||||||
|  |  | ||||||
|  | @ -70,7 +70,7 @@ the manifest. Still, a rolling update would be triggered updating the | ||||||
| script will notice the version mismatch and start the old version again. | script will notice the version mismatch and start the old version again. | ||||||
| 
 | 
 | ||||||
| In this scenario the major version could then be run by a user from within the | In this scenario the major version could then be run by a user from within the | ||||||
| master pod. Exec into the container and run: | primary pod. Exec into the container and run: | ||||||
| ```bash | ```bash | ||||||
| python3 /scripts/inplace_upgrade.py N | python3 /scripts/inplace_upgrade.py N | ||||||
| ``` | ``` | ||||||
|  | @ -81,6 +81,9 @@ upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/z | ||||||
| 
 | 
 | ||||||
| When `major_version_upgrade_mode` is set to `manual` the operator will run | When `major_version_upgrade_mode` is set to `manual` the operator will run | ||||||
| the upgrade script for you after the manifest is updated and pods are rotated. | the upgrade script for you after the manifest is updated and pods are rotated. | ||||||
|  | It is also possible to define `maintenanceWindows` in the Postgres manifest to | ||||||
|  | better control when such automated upgrades should take place after increasing | ||||||
|  | the version. | ||||||
| 
 | 
 | ||||||
| ## Non-default cluster domain | ## Non-default cluster domain | ||||||
| 
 | 
 | ||||||
|  | @ -223,9 +226,9 @@ configuration: | ||||||
| 
 | 
 | ||||||
| Now, every cluster manifest must contain the configured annotation keys to | Now, every cluster manifest must contain the configured annotation keys to | ||||||
| trigger the delete process when running `kubectl delete pg`. Note, that the | trigger the delete process when running `kubectl delete pg`. Note, that the | ||||||
| `Postgresql` resource would still get deleted as K8s' API server does not | `Postgresql` resource would still get deleted because the operator does not | ||||||
| block it. Only the operator logs will tell, that the delete criteria wasn't | instruct K8s' API server to block it. Only the operator logs will tell, that | ||||||
| met. | the delete criteria was not met. | ||||||
| 
 | 
 | ||||||
| **cluster manifest** | **cluster manifest** | ||||||
| 
 | 
 | ||||||
|  | @ -243,11 +246,64 @@ spec: | ||||||
| 
 | 
 | ||||||
| In case, the resource has been deleted accidentally or the annotations were | In case, the resource has been deleted accidentally or the annotations were | ||||||
| simply forgotten, it's safe to recreate the cluster with `kubectl create`. | simply forgotten, it's safe to recreate the cluster with `kubectl create`. | ||||||
| Existing Postgres cluster are not replaced by the operator. But, as the | Existing Postgres cluster are not replaced by the operator. But, when the | ||||||
| original cluster still exists the status will show `CreateFailed` at first. | original cluster still exists the status will be `CreateFailed` at first. On | ||||||
| On the next sync event it should change to `Running`. However, as it is in | the next sync event it should change to `Running`. However, because it is in | ||||||
| fact a new resource for K8s, the UID will differ which can trigger a rolling | fact a new resource for K8s, the UID and therefore, the backup path to S3, | ||||||
| update of the pods because the UID is used as part of backup path to S3. | will differ and trigger a rolling update of the pods. | ||||||
|  | 
 | ||||||
|  | ## Owner References and Finalizers | ||||||
|  | 
 | ||||||
|  | The Postgres Operator can set [owner references](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/) to most of a cluster's child resources to improve | ||||||
|  | monitoring with GitOps tools and enable cascading deletes. There are two | ||||||
|  | exceptions: | ||||||
|  | 
 | ||||||
|  | * Persistent Volume Claims, because they are handled by the [PV Reclaim Policy]https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/ of the Stateful Set | ||||||
|  | * Cross-namespace secrets, because owner references are not allowed across namespaces by design | ||||||
|  | 
 | ||||||
|  | The operator would clean these resources up with its regular delete loop | ||||||
|  | unless they got synced correctly. If for some reason the initial cluster sync | ||||||
|  | fails, e.g. after a cluster creation or operator restart, a deletion of the | ||||||
|  | cluster manifest might leave orphaned resources behind which the user has to | ||||||
|  | clean up manually. | ||||||
|  | 
 | ||||||
|  | Another option is to enable finalizers which first ensures the deletion of all | ||||||
|  | child resources before the cluster manifest gets removed. There is a trade-off | ||||||
|  | though: The deletion is only performed after the next two operator SYNC cycles | ||||||
|  | with the first one setting a `deletionTimestamp` and the latter reacting to it. | ||||||
|  | The final removal of the custom resource will add a DELETE event to the worker | ||||||
|  | queue but the child resources are already gone at this point. If you do not | ||||||
|  | desire this behavior consider enabling owner references instead. | ||||||
|  | 
 | ||||||
|  | **postgres-operator ConfigMap** | ||||||
|  | 
 | ||||||
|  | ```yaml | ||||||
|  | apiVersion: v1 | ||||||
|  | kind: ConfigMap | ||||||
|  | metadata: | ||||||
|  |   name: postgres-operator | ||||||
|  | data: | ||||||
|  |   enable_finalizers: "false" | ||||||
|  |   enable_owner_references: "true" | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | **OperatorConfiguration** | ||||||
|  | 
 | ||||||
|  | ```yaml | ||||||
|  | apiVersion: "acid.zalan.do/v1" | ||||||
|  | kind: OperatorConfiguration | ||||||
|  | metadata: | ||||||
|  |   name: postgresql-operator-configuration | ||||||
|  | configuration: | ||||||
|  |   kubernetes: | ||||||
|  |     enable_finalizers: false | ||||||
|  |     enable_owner_references: true | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | :warning: Please note, both options are disabled by default. When enabling owner | ||||||
|  | references the operator cannot block cascading deletes, even when the [delete protection annotations](administrator.md#delete-protection-via-annotations) | ||||||
|  | are in place. You would need an K8s admission controller that blocks the actual | ||||||
|  | `kubectl delete` API call e.g. based on existing annotations. | ||||||
| 
 | 
 | ||||||
| ## Role-based access control for the operator | ## Role-based access control for the operator | ||||||
| 
 | 
 | ||||||
|  | @ -1399,7 +1455,7 @@ make docker | ||||||
| 
 | 
 | ||||||
| # build in image in minikube docker env | # build in image in minikube docker env | ||||||
| eval $(minikube docker-env) | eval $(minikube docker-env) | ||||||
| docker build -t ghcr.io/zalando/postgres-operator-ui:v1.12.2 . | docker build -t ghcr.io/zalando/postgres-operator-ui:v1.13.0 . | ||||||
| 
 | 
 | ||||||
| # apply UI manifests next to a running Postgres Operator | # apply UI manifests next to a running Postgres Operator | ||||||
| kubectl apply -f manifests/ | kubectl apply -f manifests/ | ||||||
|  |  | ||||||
|  | @ -114,6 +114,12 @@ These parameters are grouped directly under  the `spec` key in the manifest. | ||||||
|   this parameter. Optional, when empty the load balancer service becomes |   this parameter. Optional, when empty the load balancer service becomes | ||||||
|   inaccessible from outside of the Kubernetes cluster. |   inaccessible from outside of the Kubernetes cluster. | ||||||
| 
 | 
 | ||||||
|  | * **maintenanceWindows** | ||||||
|  |   a list which defines specific time frames when certain maintenance operations | ||||||
|  |   are allowed. So far, it is only implemented for automatic major version | ||||||
|  |   upgrades. Accepted formats are "01:00-06:00" for daily maintenance windows or  | ||||||
|  |   "Sat:00:00-04:00" for specific days, with all times in UTC. | ||||||
|  | 
 | ||||||
| * **users** | * **users** | ||||||
|   a map of usernames to user flags for the users that should be created in the |   a map of usernames to user flags for the users that should be created in the | ||||||
|   cluster by the operator. User flags are a list, allowed elements are |   cluster by the operator. User flags are a list, allowed elements are | ||||||
|  |  | ||||||
|  | @ -242,7 +242,7 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key. | ||||||
|   `"manual"` = manifest triggers action, |   `"manual"` = manifest triggers action, | ||||||
|   `"full"` = manifest and minimal version violation trigger upgrade. |   `"full"` = manifest and minimal version violation trigger upgrade. | ||||||
|   Note, that with all three modes increasing the version in the manifest will |   Note, that with all three modes increasing the version in the manifest will | ||||||
|   trigger a rolling update of the pods. The default is `"off"`. |   trigger a rolling update of the pods. The default is `"manual"`. | ||||||
| 
 | 
 | ||||||
| * **major_version_upgrade_team_allow_list** | * **major_version_upgrade_team_allow_list** | ||||||
|   Upgrades will only be carried out for clusters of listed teams when mode is |   Upgrades will only be carried out for clusters of listed teams when mode is | ||||||
|  | @ -263,6 +263,31 @@ Parameters to configure cluster-related Kubernetes objects created by the | ||||||
| operator, as well as some timeouts associated with them. In a CRD-based | operator, as well as some timeouts associated with them. In a CRD-based | ||||||
| configuration they are grouped under the `kubernetes` key. | configuration they are grouped under the `kubernetes` key. | ||||||
| 
 | 
 | ||||||
|  | * **enable_finalizers** | ||||||
|  |   By default, a deletion of the Postgresql resource will trigger an event | ||||||
|  |   that leads to a cleanup of all child resources. However, if the database | ||||||
|  |   cluster is in a broken state (e.g. failed initialization) and the operator | ||||||
|  |   cannot fully sync it, there can be leftovers. By enabling finalizers the | ||||||
|  |   operator will ensure all managed resources are deleted prior to the | ||||||
|  |   Postgresql resource. See also [admin docs](../administrator.md#owner-references-and-finalizers) | ||||||
|  |   for more information The default is `false`. | ||||||
|  | 
 | ||||||
|  | * **enable_owner_references** | ||||||
|  |   The operator can set owner references on its child resources (except PVCs, | ||||||
|  |   Patroni config service/endpoint, cross-namespace secrets) to improve cluster | ||||||
|  |   monitoring and enable cascading deletion. The default is `false`. Warning, | ||||||
|  |   enabling this option disables configured delete protection checks (see below). | ||||||
|  | 
 | ||||||
|  | * **delete_annotation_date_key** | ||||||
|  |   key name for annotation that compares manifest value with current date in the | ||||||
|  |   YYYY-MM-DD format. Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. | ||||||
|  |   The default is empty which also disables this delete protection check. | ||||||
|  | 
 | ||||||
|  | * **delete_annotation_name_key** | ||||||
|  |   key name for annotation that compares manifest value with Postgres cluster name. | ||||||
|  |   Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. The default is | ||||||
|  |   empty which also disables this delete protection check. | ||||||
|  | 
 | ||||||
| * **pod_service_account_name** | * **pod_service_account_name** | ||||||
|   service account used by Patroni running on individual Pods to communicate |   service account used by Patroni running on individual Pods to communicate | ||||||
|   with the operator. Required even if native Kubernetes support in Patroni is |   with the operator. Required even if native Kubernetes support in Patroni is | ||||||
|  | @ -293,16 +318,6 @@ configuration they are grouped under the `kubernetes` key. | ||||||
|   of a database created by the operator. If the annotation key is also provided |   of a database created by the operator. If the annotation key is also provided | ||||||
|   by the database definition, the database definition value is used. |   by the database definition, the database definition value is used. | ||||||
| 
 | 
 | ||||||
| * **delete_annotation_date_key** |  | ||||||
|   key name for annotation that compares manifest value with current date in the |  | ||||||
|   YYYY-MM-DD format. Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. |  | ||||||
|   The default is empty which also disables this delete protection check. |  | ||||||
| 
 |  | ||||||
| * **delete_annotation_name_key** |  | ||||||
|   key name for annotation that compares manifest value with Postgres cluster name. |  | ||||||
|   Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. The default is |  | ||||||
|   empty which also disables this delete protection check. |  | ||||||
| 
 |  | ||||||
| * **downscaler_annotations** | * **downscaler_annotations** | ||||||
|   An array of annotations that should be passed from Postgres CRD on to the |   An array of annotations that should be passed from Postgres CRD on to the | ||||||
|   statefulset and, if exists, to the connection pooler deployment as well. |   statefulset and, if exists, to the connection pooler deployment as well. | ||||||
|  | @ -332,20 +347,6 @@ configuration they are grouped under the `kubernetes` key. | ||||||
|   drained if the node_readiness_label is not used. If this option if set to |   drained if the node_readiness_label is not used. If this option if set to | ||||||
|   `false` the `spilo-role=master` selector will not be added to the PDB. |   `false` the `spilo-role=master` selector will not be added to the PDB. | ||||||
| 
 | 
 | ||||||
| * **enable_finalizers** |  | ||||||
|   By default, a deletion of the Postgresql resource will trigger an event |  | ||||||
|   that leads to a cleanup of all child resources. However, if the database |  | ||||||
|   cluster is in a broken state (e.g. failed initialization) and the operator |  | ||||||
|   cannot fully sync it, there can be leftovers. By enabling finalizers the |  | ||||||
|   operator will ensure all managed resources are deleted prior to the |  | ||||||
|   Postgresql resource. There is a trade-off though: The deletion is only |  | ||||||
|   performed after the next two SYNC cycles with the first one updating the |  | ||||||
|   internal spec and the latter reacting on the `deletionTimestamp` while |  | ||||||
|   processing the SYNC event. The final removal of the custom resource will |  | ||||||
|   add a DELETE event to the worker queue but the child resources are already |  | ||||||
|   gone at this point. |  | ||||||
|   The default is `false`. |  | ||||||
| 
 |  | ||||||
| * **persistent_volume_claim_retention_policy** | * **persistent_volume_claim_retention_policy** | ||||||
|   The operator tries to protect volumes as much as possible. If somebody |   The operator tries to protect volumes as much as possible. If somebody | ||||||
|   accidentally deletes the statefulset or scales in the `numberOfInstances` the |   accidentally deletes the statefulset or scales in the `numberOfInstances` the | ||||||
|  | @ -821,7 +822,7 @@ grouped under the `logical_backup` key. | ||||||
|   runs `pg_dumpall` on a replica if possible and uploads compressed results to |   runs `pg_dumpall` on a replica if possible and uploads compressed results to | ||||||
|   an S3 bucket under the key `/<configured-s3-bucket-prefix>/<pg_cluster_name>/<cluster_k8s_uuid>/logical_backups`. |   an S3 bucket under the key `/<configured-s3-bucket-prefix>/<pg_cluster_name>/<cluster_k8s_uuid>/logical_backups`. | ||||||
|   The default image is the same image built with the Zalando-internal CI |   The default image is the same image built with the Zalando-internal CI | ||||||
|   pipeline. Default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2" |   pipeline. Default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" | ||||||
| 
 | 
 | ||||||
| * **logical_backup_google_application_credentials** | * **logical_backup_google_application_credentials** | ||||||
|   Specifies the path of the google cloud service account json file. Default is empty. |   Specifies the path of the google cloud service account json file. Default is empty. | ||||||
|  |  | ||||||
|  | @ -758,7 +758,7 @@ If you need to define a `nodeAffinity` for all your Postgres clusters use the | ||||||
| ## In-place major version upgrade | ## In-place major version upgrade | ||||||
| 
 | 
 | ||||||
| Starting with Spilo 13, operator supports in-place major version upgrade to a | Starting with Spilo 13, operator supports in-place major version upgrade to a | ||||||
| higher major version (e.g. from PG 11 to PG 13). To trigger the upgrade, | higher major version (e.g. from PG 14 to PG 16). To trigger the upgrade, | ||||||
| simply increase the version in the manifest. It is your responsibility to test | simply increase the version in the manifest. It is your responsibility to test | ||||||
| your applications against the new version before the upgrade; downgrading is | your applications against the new version before the upgrade; downgrading is | ||||||
| not supported. The easiest way to do so is to try the upgrade on the cloned | not supported. The easiest way to do so is to try the upgrade on the cloned | ||||||
|  |  | ||||||
|  | @ -46,7 +46,7 @@ tools: | ||||||
| 	# install pinned version of 'kind' | 	# install pinned version of 'kind' | ||||||
| 	# go install must run outside of a dir with a (module-based) Go project ! | 	# go install must run outside of a dir with a (module-based) Go project ! | ||||||
| 	# otherwise go install updates project's dependencies and/or behaves differently | 	# otherwise go install updates project's dependencies and/or behaves differently | ||||||
| 	cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.22.0 | 	cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.23.0 | ||||||
| 
 | 
 | ||||||
| e2etest: tools copy clean | e2etest: tools copy clean | ||||||
| 	./run.sh main | 	./run.sh main | ||||||
|  |  | ||||||
|  | @ -20,6 +20,7 @@ class K8sApi: | ||||||
| 
 | 
 | ||||||
|         self.config = config.load_kube_config() |         self.config = config.load_kube_config() | ||||||
|         self.k8s_client = client.ApiClient() |         self.k8s_client = client.ApiClient() | ||||||
|  |         self.rbac_api = client.RbacAuthorizationV1Api() | ||||||
| 
 | 
 | ||||||
|         self.core_v1 = client.CoreV1Api() |         self.core_v1 = client.CoreV1Api() | ||||||
|         self.apps_v1 = client.AppsV1Api() |         self.apps_v1 = client.AppsV1Api() | ||||||
|  | @ -217,7 +218,6 @@ class K8s: | ||||||
|         pod_phase = 'Failing over' |         pod_phase = 'Failing over' | ||||||
|         new_pod_node = '' |         new_pod_node = '' | ||||||
|         pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) |         pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) | ||||||
| 
 |  | ||||||
|         while (pod_phase != 'Running') or (new_pod_node not in failover_targets): |         while (pod_phase != 'Running') or (new_pod_node not in failover_targets): | ||||||
|             pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items |             pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items | ||||||
|             if pods: |             if pods: | ||||||
|  | @ -524,7 +524,6 @@ class K8sBase: | ||||||
|         pod_phase = 'Failing over' |         pod_phase = 'Failing over' | ||||||
|         new_pod_node = '' |         new_pod_node = '' | ||||||
|         pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) |         pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) | ||||||
| 
 |  | ||||||
|         while (pod_phase != 'Running') or (new_pod_node not in failover_targets): |         while (pod_phase != 'Running') or (new_pod_node not in failover_targets): | ||||||
|             pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items |             pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items | ||||||
|             if pods: |             if pods: | ||||||
|  |  | ||||||
|  | @ -14,6 +14,7 @@ from kubernetes.client.rest import ApiException | ||||||
| 
 | 
 | ||||||
| SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" | SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" | ||||||
| SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" | SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2" | ||||||
|  | SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3" | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def to_selector(labels): | def to_selector(labels): | ||||||
|  | @ -95,7 +96,7 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|             print("Failed to delete the 'standard' storage class: {0}".format(e)) |             print("Failed to delete the 'standard' storage class: {0}".format(e)) | ||||||
| 
 | 
 | ||||||
|         # operator deploys pod service account there on start up |         # operator deploys pod service account there on start up | ||||||
|         # needed for test_multi_namespace_support() |         # needed for test_multi_namespace_support and test_owner_references | ||||||
|         cls.test_namespace = "test" |         cls.test_namespace = "test" | ||||||
|         try: |         try: | ||||||
|             v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.test_namespace)) |             v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.test_namespace)) | ||||||
|  | @ -115,6 +116,7 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|             configmap = yaml.safe_load(f) |             configmap = yaml.safe_load(f) | ||||||
|             configmap["data"]["workers"] = "1" |             configmap["data"]["workers"] = "1" | ||||||
|             configmap["data"]["docker_image"] = SPILO_CURRENT |             configmap["data"]["docker_image"] = SPILO_CURRENT | ||||||
|  |             configmap["data"]["major_version_upgrade_mode"] = "full" | ||||||
| 
 | 
 | ||||||
|         with open("manifests/configmap.yaml", 'w') as f: |         with open("manifests/configmap.yaml", 'w') as f: | ||||||
|             yaml.dump(configmap, f, Dumper=yaml.Dumper) |             yaml.dump(configmap, f, Dumper=yaml.Dumper) | ||||||
|  | @ -129,7 +131,8 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|                          "infrastructure-roles.yaml", |                          "infrastructure-roles.yaml", | ||||||
|                          "infrastructure-roles-new.yaml", |                          "infrastructure-roles-new.yaml", | ||||||
|                          "custom-team-membership.yaml", |                          "custom-team-membership.yaml", | ||||||
|                          "e2e-storage-class.yaml"]: |                          "e2e-storage-class.yaml", | ||||||
|  |                          "fes.crd.yaml"]: | ||||||
|             result = k8s.create_with_kubectl("manifests/" + filename) |             result = k8s.create_with_kubectl("manifests/" + filename) | ||||||
|             print("stdout: {}, stderr: {}".format(result.stdout, result.stderr)) |             print("stdout: {}, stderr: {}".format(result.stdout, result.stderr)) | ||||||
| 
 | 
 | ||||||
|  | @ -199,6 +202,7 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|         self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", owner_query)), 3, |         self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", owner_query)), 3, | ||||||
|             "Not all additional users found in database", 10, 5) |             "Not all additional users found in database", 10, 5) | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     def test_additional_pod_capabilities(self): |     def test_additional_pod_capabilities(self): | ||||||
|         ''' |         ''' | ||||||
|  | @ -909,6 +913,19 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|         ''' |         ''' | ||||||
|         k8s = self.k8s |         k8s = self.k8s | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
|  |         try: | ||||||
|  |             patch_config_ignored_annotations = { | ||||||
|  |                 "data": { | ||||||
|  |                     "ignored_annotations": "k8s-status", | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |             k8s.update_config(patch_config_ignored_annotations) | ||||||
|  |             self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  | 
 | ||||||
|  |             sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default') | ||||||
|  |             svc = k8s.api.core_v1.read_namespaced_service('acid-minimal-cluster', 'default') | ||||||
|  | 
 | ||||||
|             annotation_patch = { |             annotation_patch = { | ||||||
|                 "metadata": { |                 "metadata": { | ||||||
|                     "annotations": { |                     "annotations": { | ||||||
|  | @ -917,20 +934,12 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
|              |              | ||||||
|         try: |  | ||||||
|             sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default') |  | ||||||
|             old_sts_creation_timestamp = sts.metadata.creation_timestamp |             old_sts_creation_timestamp = sts.metadata.creation_timestamp | ||||||
|             k8s.api.apps_v1.patch_namespaced_stateful_set(sts.metadata.name, sts.metadata.namespace, annotation_patch) |             k8s.api.apps_v1.patch_namespaced_stateful_set(sts.metadata.name, sts.metadata.namespace, annotation_patch) | ||||||
|             svc = k8s.api.core_v1.read_namespaced_service('acid-minimal-cluster', 'default') |  | ||||||
|             old_svc_creation_timestamp = svc.metadata.creation_timestamp |             old_svc_creation_timestamp = svc.metadata.creation_timestamp | ||||||
|             k8s.api.core_v1.patch_namespaced_service(svc.metadata.name, svc.metadata.namespace, annotation_patch) |             k8s.api.core_v1.patch_namespaced_service(svc.metadata.name, svc.metadata.namespace, annotation_patch) | ||||||
| 
 | 
 | ||||||
|             patch_config_ignored_annotations = { |             k8s.delete_operator_pod() | ||||||
|                 "data": { |  | ||||||
|                     "ignored_annotations": "k8s-status", |  | ||||||
|                 } |  | ||||||
|             } |  | ||||||
|             k8s.update_config(patch_config_ignored_annotations) |  | ||||||
|             self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") |             self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
| 
 | 
 | ||||||
|             sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default') |             sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default') | ||||||
|  | @ -1174,31 +1183,94 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|         self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") |         self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") | ||||||
| 
 | 
 | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     @unittest.skip("Skipping this test until fixed") |  | ||||||
|     def test_major_version_upgrade(self): |     def test_major_version_upgrade(self): | ||||||
|         k8s = self.k8s |         """ | ||||||
|         result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") |         Test major version upgrade | ||||||
|         self.eventuallyEqual(lambda: k8s.count_running_pods(labels="application=spilo,cluster-name=acid-upgrade-test"), 2, "No 2 pods running") |         """ | ||||||
|         self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") |         def check_version(): | ||||||
|  |             p = k8s.patroni_rest("acid-upgrade-test-0", "") | ||||||
|  |             version = p.get("server_version", 0) // 10000 | ||||||
|  |             return version | ||||||
| 
 | 
 | ||||||
|         pg_patch_version = { |         k8s = self.k8s | ||||||
|  |         cluster_label = 'application=spilo,cluster-name=acid-upgrade-test' | ||||||
|  | 
 | ||||||
|  |         with open("manifests/minimal-postgres-manifest-12.yaml", 'r+') as f: | ||||||
|  |             upgrade_manifest = yaml.safe_load(f) | ||||||
|  |             upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE | ||||||
|  | 
 | ||||||
|  |         with open("manifests/minimal-postgres-manifest-12.yaml", 'w') as f: | ||||||
|  |             yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper) | ||||||
|  | 
 | ||||||
|  |         k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") | ||||||
|  |         self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running") | ||||||
|  |         self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  |         self.eventuallyEqual(check_version, 12, "Version is not correct") | ||||||
|  | 
 | ||||||
|  |         master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label) | ||||||
|  |         # should upgrade immediately | ||||||
|  |         pg_patch_version_14 = { | ||||||
|             "spec": { |             "spec": { | ||||||
|                 "postgres": { |                 "postgresql": { | ||||||
|                     "version": "14" |                     "version": "14" | ||||||
|                 } |                 } | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|         k8s.api.custom_objects_api.patch_namespaced_custom_object( |         k8s.api.custom_objects_api.patch_namespaced_custom_object( | ||||||
|             "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version) |             "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14) | ||||||
| 
 |  | ||||||
|         self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") |         self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
| 
 | 
 | ||||||
|         def check_version_14(): |         # should have finish failover | ||||||
|             p = k8s.get_patroni_state("acid-upgrade-test-0") |         k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) | ||||||
|             version = p["server_version"][0:2] |         k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) | ||||||
|             return version |         k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) | ||||||
|  |         self.eventuallyEqual(check_version, 14, "Version should be upgraded from 12 to 14") | ||||||
| 
 | 
 | ||||||
|         self.evantuallyEqual(check_version_14, "14", "Version was not upgrade to 14") |         # should not upgrade because current time is not in maintenanceWindow | ||||||
|  |         current_time = datetime.now() | ||||||
|  |         maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}" | ||||||
|  |         pg_patch_version_15 = { | ||||||
|  |             "spec": { | ||||||
|  |                 "postgresql": { | ||||||
|  |                     "version": "15" | ||||||
|  |                 }, | ||||||
|  |                 "maintenanceWindows": [ | ||||||
|  |                     maintenance_window_future | ||||||
|  |                 ] | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |         k8s.api.custom_objects_api.patch_namespaced_custom_object( | ||||||
|  |             "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15) | ||||||
|  |         self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  | 
 | ||||||
|  |         # should have finish failover | ||||||
|  |         k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) | ||||||
|  |         k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) | ||||||
|  |         k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) | ||||||
|  |         self.eventuallyEqual(check_version, 14, "Version should not be upgraded") | ||||||
|  | 
 | ||||||
|  |         # change the version again to trigger operator sync | ||||||
|  |         maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}" | ||||||
|  |         pg_patch_version_16 = { | ||||||
|  |             "spec": { | ||||||
|  |                 "postgresql": { | ||||||
|  |                     "version": "16" | ||||||
|  |                 }, | ||||||
|  |                 "maintenanceWindows": [ | ||||||
|  |                     maintenance_window_current | ||||||
|  |                 ] | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         k8s.api.custom_objects_api.patch_namespaced_custom_object( | ||||||
|  |             "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16) | ||||||
|  |         self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  | 
 | ||||||
|  |         # should have finish failover | ||||||
|  |         k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) | ||||||
|  |         k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) | ||||||
|  |         k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) | ||||||
|  |         self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16") | ||||||
| 
 | 
 | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     def test_persistent_volume_claim_retention_policy(self): |     def test_persistent_volume_claim_retention_policy(self): | ||||||
|  | @ -1347,17 +1419,11 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|             k8s.wait_for_pod_start("spilo-role=master", self.test_namespace) |             k8s.wait_for_pod_start("spilo-role=master", self.test_namespace) | ||||||
|             k8s.wait_for_pod_start("spilo-role=replica", self.test_namespace) |             k8s.wait_for_pod_start("spilo-role=replica", self.test_namespace) | ||||||
|             self.assert_master_is_unique(self.test_namespace, "acid-test-cluster") |             self.assert_master_is_unique(self.test_namespace, "acid-test-cluster") | ||||||
|  |             # acid-test-cluster will be deleted in test_owner_references test | ||||||
| 
 | 
 | ||||||
|         except timeout_decorator.TimeoutError: |         except timeout_decorator.TimeoutError: | ||||||
|             print('Operator log: {}'.format(k8s.get_operator_log())) |             print('Operator log: {}'.format(k8s.get_operator_log())) | ||||||
|             raise |             raise | ||||||
|         finally: |  | ||||||
|             # delete the new cluster so that the k8s_api.get_operator_state works correctly in subsequent tests |  | ||||||
|             # ideally we should delete the 'test' namespace here but |  | ||||||
|             # the pods inside the namespace stuck in the Terminating state making the test time out |  | ||||||
|             k8s.api.custom_objects_api.delete_namespaced_custom_object( |  | ||||||
|                 "acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster") |  | ||||||
|             time.sleep(5) |  | ||||||
| 
 | 
 | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     @unittest.skip("Skipping this test until fixed") |     @unittest.skip("Skipping this test until fixed") | ||||||
|  | @ -1568,6 +1634,70 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|         self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler="+pooler_name), |         self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler="+pooler_name), | ||||||
|                              0, "Pooler pods not scaled down") |                              0, "Pooler pods not scaled down") | ||||||
| 
 | 
 | ||||||
|  |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|  |     def test_owner_references(self): | ||||||
|  |         ''' | ||||||
|  |            Enable owner references, test if resources get updated and test cascade deletion of test cluster. | ||||||
|  |         ''' | ||||||
|  |         k8s = self.k8s | ||||||
|  |         cluster_name = 'acid-test-cluster' | ||||||
|  |         cluster_label = 'application=spilo,cluster-name={}'.format(cluster_name) | ||||||
|  |         default_test_cluster = 'acid-minimal-cluster' | ||||||
|  | 
 | ||||||
|  |         try: | ||||||
|  |             # enable owner references in config | ||||||
|  |             enable_owner_refs = { | ||||||
|  |                 "data": { | ||||||
|  |                     "enable_owner_references": "true" | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |             k8s.update_config(enable_owner_refs) | ||||||
|  |             self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  | 
 | ||||||
|  |             time.sleep(5)  # wait for the operator to sync the cluster and update resources | ||||||
|  | 
 | ||||||
|  |             # check if child resources were updated with owner references | ||||||
|  |             self.assertTrue(self.check_cluster_child_resources_owner_references(cluster_name, self.test_namespace), "Owner references not set on all child resources of {}".format(cluster_name)) | ||||||
|  |             self.assertTrue(self.check_cluster_child_resources_owner_references(default_test_cluster), "Owner references not set on all child resources of {}".format(default_test_cluster)) | ||||||
|  | 
 | ||||||
|  |             # delete the new cluster to test owner references | ||||||
|  |             # and also to make k8s_api.get_operator_state work better in subsequent tests | ||||||
|  |             # ideally we should delete the 'test' namespace here but the pods | ||||||
|  |             # inside the namespace stuck in the Terminating state making the test time out | ||||||
|  |             k8s.api.custom_objects_api.delete_namespaced_custom_object( | ||||||
|  |                 "acid.zalan.do", "v1", self.test_namespace, "postgresqls", cluster_name) | ||||||
|  | 
 | ||||||
|  |             # child resources with owner references should be deleted via owner references | ||||||
|  |             self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_label), 0, "Pods not deleted") | ||||||
|  |             self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted") | ||||||
|  |             self.eventuallyEqual(lambda: k8s.count_services_with_label(cluster_label), 0, "Services not deleted") | ||||||
|  |             self.eventuallyEqual(lambda: k8s.count_endpoints_with_label(cluster_label), 0, "Endpoints not deleted") | ||||||
|  |             self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") | ||||||
|  |             self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets were not deleted") | ||||||
|  | 
 | ||||||
|  |             time.sleep(5)  # wait for the operator to also delete the PVCs | ||||||
|  | 
 | ||||||
|  |             # pvcs do not have an owner reference but will deleted by the operator almost immediately | ||||||
|  |             self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 0, "PVCs not deleted") | ||||||
|  | 
 | ||||||
|  |             # disable owner references in config | ||||||
|  |             disable_owner_refs = { | ||||||
|  |                 "data": { | ||||||
|  |                     "enable_owner_references": "false" | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |             k8s.update_config(disable_owner_refs) | ||||||
|  |             self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  | 
 | ||||||
|  |             time.sleep(5)  # wait for the operator to remove owner references | ||||||
|  | 
 | ||||||
|  |             # check if child resources were updated without Postgresql owner references | ||||||
|  |             self.assertTrue(self.check_cluster_child_resources_owner_references(default_test_cluster, "default", True), "Owner references still present on some child resources of {}".format(default_test_cluster)) | ||||||
|  | 
 | ||||||
|  |         except timeout_decorator.TimeoutError: | ||||||
|  |             print('Operator log: {}'.format(k8s.get_operator_log())) | ||||||
|  |             raise | ||||||
|  | 
 | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     def test_password_rotation(self): |     def test_password_rotation(self): | ||||||
|         ''' |         ''' | ||||||
|  | @ -1766,7 +1896,6 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|             replica = k8s.get_cluster_replica_pod() |             replica = k8s.get_cluster_replica_pod() | ||||||
|             self.assertTrue(replica.metadata.creation_timestamp > old_creation_timestamp, "Old master pod was not recreated") |             self.assertTrue(replica.metadata.creation_timestamp > old_creation_timestamp, "Old master pod was not recreated") | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
|         except timeout_decorator.TimeoutError: |         except timeout_decorator.TimeoutError: | ||||||
|             print('Operator log: {}'.format(k8s.get_operator_log())) |             print('Operator log: {}'.format(k8s.get_operator_log())) | ||||||
|             raise |             raise | ||||||
|  | @ -1984,6 +2113,155 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|                 "acid.zalan.do", "v1", "default", "postgresqls", "acid-standby-cluster") |                 "acid.zalan.do", "v1", "default", "postgresqls", "acid-standby-cluster") | ||||||
|             time.sleep(5) |             time.sleep(5) | ||||||
| 
 | 
 | ||||||
|  |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|  |     def test_stream_resources(self): | ||||||
|  |         ''' | ||||||
|  |            Create and delete fabric event streaming resources. | ||||||
|  |         ''' | ||||||
|  |         k8s = self.k8s | ||||||
|  |         self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, | ||||||
|  |             "Operator does not get in sync") | ||||||
|  |         leader = k8s.get_cluster_leader_pod() | ||||||
|  | 
 | ||||||
|  |         # patch ClusterRole with CRUD privileges on FES resources | ||||||
|  |         cluster_role = k8s.api.rbac_api.read_cluster_role("postgres-operator") | ||||||
|  |         fes_cluster_role_rule = client.V1PolicyRule( | ||||||
|  |             api_groups=["zalando.org"], | ||||||
|  |             resources=["fabriceventstreams"], | ||||||
|  |             verbs=["create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"] | ||||||
|  |         ) | ||||||
|  |         cluster_role.rules.append(fes_cluster_role_rule) | ||||||
|  | 
 | ||||||
|  |         try: | ||||||
|  |             k8s.api.rbac_api.patch_cluster_role("postgres-operator", cluster_role) | ||||||
|  | 
 | ||||||
|  |             # create a table in one of the database of acid-minimal-cluster | ||||||
|  |             create_stream_table = """ | ||||||
|  |                 CREATE TABLE test_table (id int, payload jsonb); | ||||||
|  |             """ | ||||||
|  |             self.query_database(leader.metadata.name, "foo", create_stream_table) | ||||||
|  | 
 | ||||||
|  |             # update the manifest with the streams section | ||||||
|  |             patch_streaming_config = { | ||||||
|  |                 "spec": { | ||||||
|  |                     "patroni": { | ||||||
|  |                         "slots": { | ||||||
|  |                             "manual_slot": { | ||||||
|  |                                 "type": "physical" | ||||||
|  |                             } | ||||||
|  |                         } | ||||||
|  |                     }, | ||||||
|  |                     "streams": [ | ||||||
|  |                         { | ||||||
|  |                             "applicationId": "test-app", | ||||||
|  |                             "batchSize": 100, | ||||||
|  |                             "database": "foo", | ||||||
|  |                             "enableRecovery": True, | ||||||
|  |                             "tables": { | ||||||
|  |                                 "test_table": { | ||||||
|  |                                     "eventType": "test-event", | ||||||
|  |                                     "idColumn": "id", | ||||||
|  |                                     "payloadColumn": "payload", | ||||||
|  |                                     "recoveryEventType": "test-event-dlq" | ||||||
|  |                                 } | ||||||
|  |                             } | ||||||
|  |                         }, | ||||||
|  |                         { | ||||||
|  |                             "applicationId": "test-app2", | ||||||
|  |                             "batchSize": 100, | ||||||
|  |                             "database": "foo", | ||||||
|  |                             "enableRecovery": True, | ||||||
|  |                             "tables": { | ||||||
|  |                                 "test_non_exist_table": { | ||||||
|  |                                     "eventType": "test-event", | ||||||
|  |                                     "idColumn": "id", | ||||||
|  |                                     "payloadColumn": "payload", | ||||||
|  |                                     "recoveryEventType": "test-event-dlq" | ||||||
|  |                                 } | ||||||
|  |                             } | ||||||
|  |                         } | ||||||
|  |                     ] | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |             k8s.api.custom_objects_api.patch_namespaced_custom_object( | ||||||
|  |                 'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', patch_streaming_config) | ||||||
|  |             self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  | 
 | ||||||
|  |             # check if publication, slot, and fes resource are created | ||||||
|  |             get_publication_query = """ | ||||||
|  |                 SELECT * FROM pg_publication WHERE pubname = 'fes_foo_test_app'; | ||||||
|  |             """ | ||||||
|  |             get_slot_query = """ | ||||||
|  |                 SELECT * FROM pg_replication_slots WHERE slot_name = 'fes_foo_test_app'; | ||||||
|  |             """ | ||||||
|  |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_publication_query)), 1, | ||||||
|  |                 "Publication is not created", 10, 5) | ||||||
|  |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_slot_query)), 1, | ||||||
|  |                 "Replication slot is not created", 10, 5) | ||||||
|  |             self.eventuallyEqual(lambda: len(k8s.api.custom_objects_api.list_namespaced_custom_object( | ||||||
|  |                 "zalando.org", "v1", "default", "fabriceventstreams", label_selector="cluster-name=acid-minimal-cluster")["items"]), 1, | ||||||
|  |                 "Could not find Fabric Event Stream resource", 10, 5) | ||||||
|  | 
 | ||||||
|  |             # check if the non-existing table in the stream section does not create a publication and slot | ||||||
|  |             get_publication_query_not_exist_table = """ | ||||||
|  |                 SELECT * FROM pg_publication WHERE pubname = 'fes_foo_test_app2'; | ||||||
|  |             """ | ||||||
|  |             get_slot_query_not_exist_table = """ | ||||||
|  |                 SELECT * FROM pg_replication_slots WHERE slot_name = 'fes_foo_test_app2'; | ||||||
|  |             """ | ||||||
|  |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_publication_query_not_exist_table)), 0, | ||||||
|  |                 "Publication is created for non-existing tables", 10, 5) | ||||||
|  |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_slot_query_not_exist_table)), 0, | ||||||
|  |                 "Replication slot is created for non-existing tables", 10, 5) | ||||||
|  | 
 | ||||||
|  |             # grant create and ownership of test_table to foo_user, reset search path to default | ||||||
|  |             grant_permission_foo_user = """ | ||||||
|  |                 GRANT CREATE ON DATABASE foo TO foo_user; | ||||||
|  |                 ALTER TABLE test_table OWNER TO foo_user; | ||||||
|  |                 ALTER ROLE foo_user RESET search_path; | ||||||
|  |             """ | ||||||
|  |             self.query_database(leader.metadata.name, "foo", grant_permission_foo_user) | ||||||
|  |             # non-postgres user creates a publication | ||||||
|  |             create_nonstream_publication = """ | ||||||
|  |                 CREATE PUBLICATION mypublication FOR TABLE test_table; | ||||||
|  |             """ | ||||||
|  |             self.query_database_with_user(leader.metadata.name, "foo", create_nonstream_publication, "foo_user") | ||||||
|  | 
 | ||||||
|  |             # remove the streams section from the manifest | ||||||
|  |             patch_streaming_config_removal = { | ||||||
|  |                 "spec": { | ||||||
|  |                     "streams": [] | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  |             k8s.api.custom_objects_api.patch_namespaced_custom_object( | ||||||
|  |                 'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', patch_streaming_config_removal) | ||||||
|  |             self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") | ||||||
|  | 
 | ||||||
|  |             # check if publication, slot, and fes resource are removed | ||||||
|  |             self.eventuallyEqual(lambda: len(k8s.api.custom_objects_api.list_namespaced_custom_object( | ||||||
|  |                 "zalando.org", "v1", "default", "fabriceventstreams", label_selector="cluster-name=acid-minimal-cluster")["items"]), 0, | ||||||
|  |                 'Could not delete Fabric Event Stream resource', 10, 5) | ||||||
|  |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_publication_query)), 0, | ||||||
|  |                 "Publication is not deleted", 10, 5) | ||||||
|  |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_slot_query)), 0, | ||||||
|  |                 "Replication slot is not deleted", 10, 5) | ||||||
|  | 
 | ||||||
|  |             # check the manual_slot and mypublication should not get deleted | ||||||
|  |             get_manual_slot_query = """ | ||||||
|  |                 SELECT * FROM pg_replication_slots WHERE slot_name = 'manual_slot'; | ||||||
|  |             """ | ||||||
|  |             get_nonstream_publication_query = """ | ||||||
|  |                 SELECT * FROM pg_publication WHERE pubname = 'mypublication'; | ||||||
|  |             """ | ||||||
|  |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", get_manual_slot_query)), 1, | ||||||
|  |                 "Slot defined in patroni config is deleted", 10, 5) | ||||||
|  |             self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_nonstream_publication_query)), 1, | ||||||
|  |                 "Publication defined not in stream section is deleted", 10, 5) | ||||||
|  | 
 | ||||||
|  |         except timeout_decorator.TimeoutError: | ||||||
|  |             print('Operator log: {}'.format(k8s.get_operator_log())) | ||||||
|  |             raise | ||||||
|  | 
 | ||||||
|     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) |     @timeout_decorator.timeout(TEST_TIMEOUT_SEC) | ||||||
|     def test_taint_based_eviction(self): |     def test_taint_based_eviction(self): | ||||||
|         ''' |         ''' | ||||||
|  | @ -2110,7 +2388,7 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
|             self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted") |             self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted") | ||||||
|             self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted") |             self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted") | ||||||
|             self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") |             self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") | ||||||
|             self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 7, "Secrets were deleted although disabled in config") |             self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 8, "Secrets were deleted although disabled in config") | ||||||
|             self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 3, "PVCs were deleted although disabled in config") |             self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 3, "PVCs were deleted although disabled in config") | ||||||
| 
 | 
 | ||||||
|         except timeout_decorator.TimeoutError: |         except timeout_decorator.TimeoutError: | ||||||
|  | @ -2197,6 +2475,43 @@ class EndToEndTestCase(unittest.TestCase): | ||||||
| 
 | 
 | ||||||
|         return True |         return True | ||||||
| 
 | 
 | ||||||
|  |     def check_cluster_child_resources_owner_references(self, cluster_name, cluster_namespace='default', inverse=False): | ||||||
|  |         k8s = self.k8s | ||||||
|  | 
 | ||||||
|  |         # check if child resources were updated with owner references | ||||||
|  |         sset = k8s.api.apps_v1.read_namespaced_stateful_set(cluster_name, cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(sset.metadata.owner_references, inverse), "statefulset owner reference check failed") | ||||||
|  | 
 | ||||||
|  |         svc = k8s.api.core_v1.read_namespaced_service(cluster_name, cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(svc.metadata.owner_references, inverse), "primary service owner reference check failed") | ||||||
|  |         replica_svc = k8s.api.core_v1.read_namespaced_service(cluster_name + "-repl", cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(replica_svc.metadata.owner_references, inverse), "replica service owner reference check failed") | ||||||
|  |         config_svc = k8s.api.core_v1.read_namespaced_service(cluster_name + "-config", cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(config_svc.metadata.owner_references, inverse), "config service owner reference check failed") | ||||||
|  | 
 | ||||||
|  |         ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name, cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(ep.metadata.owner_references, inverse), "primary endpoint owner reference check failed") | ||||||
|  |         replica_ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name + "-repl", cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(replica_ep.metadata.owner_references, inverse), "replica endpoint owner reference check failed") | ||||||
|  |         config_ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name + "-config", cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(config_ep.metadata.owner_references, inverse), "config endpoint owner reference check failed") | ||||||
|  | 
 | ||||||
|  |         pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-pdb".format(cluster_name), cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption owner reference check failed") | ||||||
|  | 
 | ||||||
|  |         pg_secret = k8s.api.core_v1.read_namespaced_secret("postgres.{}.credentials.postgresql.acid.zalan.do".format(cluster_name), cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(pg_secret.metadata.owner_references, inverse), "postgres secret owner reference check failed") | ||||||
|  |         standby_secret = k8s.api.core_v1.read_namespaced_secret("standby.{}.credentials.postgresql.acid.zalan.do".format(cluster_name), cluster_namespace) | ||||||
|  |         self.assertTrue(self.has_postgresql_owner_reference(standby_secret.metadata.owner_references, inverse), "standby secret owner reference check failed") | ||||||
|  | 
 | ||||||
|  |         return True | ||||||
|  | 
 | ||||||
|  |     def has_postgresql_owner_reference(self, owner_references, inverse): | ||||||
|  |         if inverse: | ||||||
|  |             return owner_references is None or owner_references[0].kind != 'postgresql' | ||||||
|  | 
 | ||||||
|  |         return owner_references is not None and owner_references[0].kind == 'postgresql' and owner_references[0].controller | ||||||
|  | 
 | ||||||
|     def list_databases(self, pod_name): |     def list_databases(self, pod_name): | ||||||
|         ''' |         ''' | ||||||
|            Get list of databases we might want to iterate over |            Get list of databases we might want to iterate over | ||||||
|  |  | ||||||
							
								
								
									
										23
									
								
								go.mod
								
								
								
								
							
							
						
						
									
										23
									
								
								go.mod
								
								
								
								
							|  | @ -11,13 +11,13 @@ require ( | ||||||
| 	github.com/r3labs/diff v1.1.0 | 	github.com/r3labs/diff v1.1.0 | ||||||
| 	github.com/sirupsen/logrus v1.9.3 | 	github.com/sirupsen/logrus v1.9.3 | ||||||
| 	github.com/stretchr/testify v1.9.0 | 	github.com/stretchr/testify v1.9.0 | ||||||
| 	golang.org/x/crypto v0.23.0 | 	golang.org/x/crypto v0.26.0 | ||||||
| 	golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 | 	golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 | ||||||
| 	gopkg.in/yaml.v2 v2.4.0 | 	gopkg.in/yaml.v2 v2.4.0 | ||||||
| 	k8s.io/api v0.28.10 | 	k8s.io/api v0.28.12 | ||||||
| 	k8s.io/apiextensions-apiserver v0.25.9 | 	k8s.io/apiextensions-apiserver v0.25.9 | ||||||
| 	k8s.io/apimachinery v0.28.10 | 	k8s.io/apimachinery v0.28.12 | ||||||
| 	k8s.io/client-go v0.28.10 | 	k8s.io/client-go v0.28.12 | ||||||
| 	k8s.io/code-generator v0.25.9 | 	k8s.io/code-generator v0.25.9 | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | @ -33,7 +33,7 @@ require ( | ||||||
| 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect | 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect | ||||||
| 	github.com/golang/protobuf v1.5.4 // indirect | 	github.com/golang/protobuf v1.5.4 // indirect | ||||||
| 	github.com/google/gnostic-models v0.6.8 // indirect | 	github.com/google/gnostic-models v0.6.8 // indirect | ||||||
| 	github.com/google/go-cmp v0.5.9 // indirect | 	github.com/google/go-cmp v0.6.0 // indirect | ||||||
| 	github.com/google/gofuzz v1.2.0 // indirect | 	github.com/google/gofuzz v1.2.0 // indirect | ||||||
| 	github.com/google/uuid v1.3.0 // indirect | 	github.com/google/uuid v1.3.0 // indirect | ||||||
| 	github.com/imdario/mergo v0.3.6 // indirect | 	github.com/imdario/mergo v0.3.6 // indirect | ||||||
|  | @ -48,14 +48,15 @@ require ( | ||||||
| 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect | 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect | ||||||
| 	github.com/pmezard/go-difflib v1.0.0 // indirect | 	github.com/pmezard/go-difflib v1.0.0 // indirect | ||||||
| 	github.com/spf13/pflag v1.0.5 // indirect | 	github.com/spf13/pflag v1.0.5 // indirect | ||||||
| 	golang.org/x/mod v0.14.0 // indirect | 	golang.org/x/mod v0.17.0 // indirect | ||||||
| 	golang.org/x/net v0.23.0 // indirect | 	golang.org/x/net v0.25.0 // indirect | ||||||
| 	golang.org/x/oauth2 v0.8.0 // indirect | 	golang.org/x/oauth2 v0.8.0 // indirect | ||||||
| 	golang.org/x/sys v0.20.0 // indirect | 	golang.org/x/sync v0.8.0 // indirect | ||||||
| 	golang.org/x/term v0.20.0 // indirect | 	golang.org/x/sys v0.23.0 // indirect | ||||||
| 	golang.org/x/text v0.15.0 // indirect | 	golang.org/x/term v0.23.0 // indirect | ||||||
|  | 	golang.org/x/text v0.17.0 // indirect | ||||||
| 	golang.org/x/time v0.3.0 // indirect | 	golang.org/x/time v0.3.0 // indirect | ||||||
| 	golang.org/x/tools v0.17.0 // indirect | 	golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect | ||||||
| 	google.golang.org/appengine v1.6.7 // indirect | 	google.golang.org/appengine v1.6.7 // indirect | ||||||
| 	google.golang.org/protobuf v1.33.0 // indirect | 	google.golang.org/protobuf v1.33.0 // indirect | ||||||
| 	gopkg.in/inf.v0 v0.9.1 // indirect | 	gopkg.in/inf.v0 v0.9.1 // indirect | ||||||
|  |  | ||||||
							
								
								
									
										48
									
								
								go.sum
								
								
								
								
							
							
						
						
									
										48
									
								
								go.sum
								
								
								
								
							|  | @ -34,8 +34,8 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 | ||||||
| github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= | github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= | ||||||
| github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= | github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= | ||||||
| github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||||
| github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= | ||||||
| github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | ||||||
| github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||||
| github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||||
| github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= | github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= | ||||||
|  | @ -113,31 +113,31 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 | ||||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||||
| golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= | golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= | ||||||
| golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= | golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= | ||||||
| golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= | golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= | ||||||
| golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= | golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= | ||||||
| golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||||
| golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||||
| golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||||
| golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= | golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= | ||||||
| golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= | golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= | ||||||
| golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | ||||||
| golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= | golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= | ||||||
| golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||||
| golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||||
| golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||||
| golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= | ||||||
| golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= | golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= | ||||||
| golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= | golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= | ||||||
| golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= | golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= | ||||||
| golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= | golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= | ||||||
| golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= | golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= | ||||||
| golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= | golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= | ||||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||||
|  | @ -145,16 +145,16 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w | ||||||
| golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||||
| golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||||
| golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||||
| golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= | golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= | ||||||
| golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||||
| golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | ||||||
| golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= | golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= | ||||||
| golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= | golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= | ||||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||||
| golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= | ||||||
| golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||||
| golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= | golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= | ||||||
| golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= | golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= | ||||||
| golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= | golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= | ||||||
| golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | ||||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||||
|  | @ -163,8 +163,8 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY | ||||||
| golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | ||||||
| golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||||
| golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= | golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= | ||||||
| golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= | ||||||
| golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= | ||||||
| golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||||
| golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||||
|  | @ -186,14 +186,14 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= | ||||||
| gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||||
| gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | ||||||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||||
| k8s.io/api v0.28.10 h1:q1Y+h3F+siuwP/qCQuqgqGJjaIuQWN0yFE7z367E3Q0= | k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ= | ||||||
| k8s.io/api v0.28.10/go.mod h1:u6EzGdzmEC2vfhyw4sD89i7OIc/2v1EAwvd1t4chQac= | k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ= | ||||||
| k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= | k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= | ||||||
| k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= | k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= | ||||||
| k8s.io/apimachinery v0.28.10 h1:cWonrYsJK3lbuf9IgMs5+L5Jzw6QR3ZGA3hzwG0HDeI= | k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0= | ||||||
| k8s.io/apimachinery v0.28.10/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= | k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= | ||||||
| k8s.io/client-go v0.28.10 h1:y+mvUei3+RU0rE7r2BZFA2ApTAsXSN1glGs4QfULLt4= | k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec= | ||||||
| k8s.io/client-go v0.28.10/go.mod h1:JLwjCWhQhvm1F4J+7YAr9WVhSRNmfkRofPWU43m8LZk= | k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE= | ||||||
| k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w= | k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w= | ||||||
| k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI= | k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI= | ||||||
| k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= | k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= | ||||||
|  |  | ||||||
|  | @ -3,20 +3,20 @@ module github.com/zalando/postgres-operator/kubectl-pg | ||||||
| go 1.22 | go 1.22 | ||||||
| 
 | 
 | ||||||
| require ( | require ( | ||||||
| 	github.com/spf13/cobra v1.8.0 | 	github.com/spf13/cobra v1.8.1 | ||||||
| 	github.com/spf13/viper v1.18.2 | 	github.com/spf13/viper v1.19.0 | ||||||
| 	github.com/zalando/postgres-operator v1.12.0 | 	github.com/zalando/postgres-operator v1.12.2 | ||||||
| 	k8s.io/api v0.28.10 | 	k8s.io/api v0.28.12 | ||||||
| 	k8s.io/apiextensions-apiserver v0.25.9 | 	k8s.io/apiextensions-apiserver v0.25.9 | ||||||
| 	k8s.io/apimachinery v0.28.10 | 	k8s.io/apimachinery v0.28.12 | ||||||
| 	k8s.io/client-go v0.28.10 | 	k8s.io/client-go v0.28.12 | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| require ( | require ( | ||||||
| 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect | 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect | ||||||
| 	github.com/emicklei/go-restful/v3 v3.9.0 // indirect | 	github.com/emicklei/go-restful/v3 v3.9.0 // indirect | ||||||
| 	github.com/fsnotify/fsnotify v1.7.0 // indirect | 	github.com/fsnotify/fsnotify v1.7.0 // indirect | ||||||
| 	github.com/go-logr/logr v1.2.4 // indirect | 	github.com/go-logr/logr v1.4.1 // indirect | ||||||
| 	github.com/go-openapi/jsonpointer v0.19.6 // indirect | 	github.com/go-openapi/jsonpointer v0.19.6 // indirect | ||||||
| 	github.com/go-openapi/jsonreference v0.20.2 // indirect | 	github.com/go-openapi/jsonreference v0.20.2 // indirect | ||||||
| 	github.com/go-openapi/swag v0.22.3 // indirect | 	github.com/go-openapi/swag v0.22.3 // indirect | ||||||
|  | @ -40,7 +40,7 @@ require ( | ||||||
| 	github.com/modern-go/reflect2 v1.0.2 // indirect | 	github.com/modern-go/reflect2 v1.0.2 // indirect | ||||||
| 	github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect | 	github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect | ||||||
| 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect | 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect | ||||||
| 	github.com/pelletier/go-toml/v2 v2.1.0 // indirect | 	github.com/pelletier/go-toml/v2 v2.2.2 // indirect | ||||||
| 	github.com/sagikazarmark/locafero v0.4.0 // indirect | 	github.com/sagikazarmark/locafero v0.4.0 // indirect | ||||||
| 	github.com/sagikazarmark/slog-shim v0.1.0 // indirect | 	github.com/sagikazarmark/slog-shim v0.1.0 // indirect | ||||||
| 	github.com/sirupsen/logrus v1.9.3 // indirect | 	github.com/sirupsen/logrus v1.9.3 // indirect | ||||||
|  | @ -53,12 +53,12 @@ require ( | ||||||
| 	golang.org/x/crypto v0.23.0 // indirect | 	golang.org/x/crypto v0.23.0 // indirect | ||||||
| 	golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect | 	golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect | ||||||
| 	golang.org/x/net v0.23.0 // indirect | 	golang.org/x/net v0.23.0 // indirect | ||||||
| 	golang.org/x/oauth2 v0.15.0 // indirect | 	golang.org/x/oauth2 v0.18.0 // indirect | ||||||
| 	golang.org/x/sys v0.20.0 // indirect | 	golang.org/x/sys v0.20.0 // indirect | ||||||
| 	golang.org/x/term v0.20.0 // indirect | 	golang.org/x/term v0.20.0 // indirect | ||||||
| 	golang.org/x/text v0.15.0 // indirect | 	golang.org/x/text v0.15.0 // indirect | ||||||
| 	golang.org/x/time v0.5.0 // indirect | 	golang.org/x/time v0.5.0 // indirect | ||||||
| 	google.golang.org/appengine v1.6.7 // indirect | 	google.golang.org/appengine v1.6.8 // indirect | ||||||
| 	google.golang.org/protobuf v1.33.0 // indirect | 	google.golang.org/protobuf v1.33.0 // indirect | ||||||
| 	gopkg.in/inf.v0 v0.9.1 // indirect | 	gopkg.in/inf.v0 v0.9.1 // indirect | ||||||
| 	gopkg.in/ini.v1 v1.67.0 // indirect | 	gopkg.in/ini.v1 v1.67.0 // indirect | ||||||
|  |  | ||||||
|  | @ -1,6 +1,6 @@ | ||||||
| github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= | github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= | ||||||
| github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= | github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= | ||||||
| github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= | github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= | ||||||
| github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= | ||||||
| github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||||
|  | @ -13,8 +13,8 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z | ||||||
| github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= | github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= | ||||||
| github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= | github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= | ||||||
| github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= | github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= | ||||||
| github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= | github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= | ||||||
| github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= | github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= | ||||||
| github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= | github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= | ||||||
| github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= | github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= | ||||||
| github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= | github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= | ||||||
|  | @ -25,11 +25,13 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe | ||||||
| github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= | ||||||
| github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= | ||||||
| github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= | ||||||
| github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= | ||||||
|  | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= | ||||||
| github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= | ||||||
| github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= | ||||||
| github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= | github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= | ||||||
| github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= | github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= | ||||||
|  | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||||
| github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= | ||||||
| github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | ||||||
| github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||||
|  | @ -80,8 +82,8 @@ github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= | ||||||
| github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= | github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= | ||||||
| github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= | github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= | ||||||
| github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= | github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= | ||||||
| github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= | github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= | ||||||
| github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= | github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= | ||||||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= | ||||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||||
|  | @ -100,15 +102,16 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= | ||||||
| github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= | github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= | ||||||
| github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= | github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= | ||||||
| github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= | github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= | ||||||
| github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= | github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= | ||||||
| github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= | github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= | ||||||
| github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= | ||||||
| github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | ||||||
| github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= | github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= | ||||||
| github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= | github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= | ||||||
| github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | ||||||
| github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= | ||||||
| github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= | ||||||
|  | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= | ||||||
| github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= | ||||||
| github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||||
| github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||||
|  | @ -121,42 +124,54 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 | ||||||
| github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= | github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= | ||||||
| github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||||
| github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||||
| github.com/zalando/postgres-operator v1.12.0 h1:9C5u8UgrVQDRdzB3/T7kKWYKEf2vbF9EZHqtCRSgJtE= | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= | ||||||
| github.com/zalando/postgres-operator v1.12.0/go.mod h1:tKNY4pMjnr5BhuzGiGngf1SPJ7K1vVRCmMkfmV9KZoQ= | github.com/zalando/postgres-operator v1.12.2 h1:HJLrGSJLKYkvdpHIxlAKhXWTeRsgDQki2s9QOyApUX0= | ||||||
|  | github.com/zalando/postgres-operator v1.12.2/go.mod h1:tKNY4pMjnr5BhuzGiGngf1SPJ7K1vVRCmMkfmV9KZoQ= | ||||||
| go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= | go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= | ||||||
| go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= | go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= | ||||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||||
|  | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= | ||||||
| golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= | golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= | ||||||
| golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= | golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= | ||||||
| golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= | golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= | ||||||
| golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= | golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= | ||||||
| golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||||
| golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||||
|  | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= | ||||||
| golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | ||||||
| golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= |  | ||||||
| golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||||
| golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||||
| golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||||
|  | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= | ||||||
|  | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= | ||||||
| golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= | golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= | ||||||
| golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= | golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= | ||||||
| golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= | golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= | ||||||
| golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= | golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= | ||||||
| golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
|  | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||||
|  | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||||
|  | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||||
|  | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||||
| golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||||
|  | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||||
| golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= | golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= | ||||||
| golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||||
|  | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | ||||||
|  | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= | ||||||
| golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= | golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= | ||||||
| golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= | golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= | ||||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||||
| golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= |  | ||||||
| golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||||
|  | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= | ||||||
|  | golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= | ||||||
| golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= | golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= | ||||||
| golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= | golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= | ||||||
| golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= | golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= | ||||||
|  | @ -165,14 +180,17 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm | ||||||
| golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | ||||||
| golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | ||||||
| golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||||
|  | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= | ||||||
| golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= | golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= | ||||||
| golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= | golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= | ||||||
| golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||||
| golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||||
| golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||||
| google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= | google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= | ||||||
| google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= | google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= | ||||||
|  | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= | ||||||
|  | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= | ||||||
| google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= | google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= | ||||||
| google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= | google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= | ||||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||||
|  | @ -188,14 +206,14 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= | ||||||
| gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||||
| gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | ||||||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||||
| k8s.io/api v0.28.10 h1:q1Y+h3F+siuwP/qCQuqgqGJjaIuQWN0yFE7z367E3Q0= | k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ= | ||||||
| k8s.io/api v0.28.10/go.mod h1:u6EzGdzmEC2vfhyw4sD89i7OIc/2v1EAwvd1t4chQac= | k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ= | ||||||
| k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= | k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= | ||||||
| k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= | k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= | ||||||
| k8s.io/apimachinery v0.28.10 h1:cWonrYsJK3lbuf9IgMs5+L5Jzw6QR3ZGA3hzwG0HDeI= | k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0= | ||||||
| k8s.io/apimachinery v0.28.10/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= | k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= | ||||||
| k8s.io/client-go v0.28.10 h1:y+mvUei3+RU0rE7r2BZFA2ApTAsXSN1glGs4QfULLt4= | k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec= | ||||||
| k8s.io/client-go v0.28.10/go.mod h1:JLwjCWhQhvm1F4J+7YAr9WVhSRNmfkRofPWU43m8LZk= | k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE= | ||||||
| k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= | k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= | ||||||
| k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= | k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= | ||||||
| k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= | k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= | ||||||
|  |  | ||||||
|  | @ -10,7 +10,7 @@ metadata: | ||||||
| #    "delete-date": "2020-08-31"  # can only be deleted on that day if "delete-date "key is configured | #    "delete-date": "2020-08-31"  # can only be deleted on that day if "delete-date "key is configured | ||||||
| #    "delete-clustername": "acid-test-cluster"  # can only be deleted when name matches if "delete-clustername" key is configured | #    "delete-clustername": "acid-test-cluster"  # can only be deleted when name matches if "delete-clustername" key is configured | ||||||
| spec: | spec: | ||||||
|   dockerImage: ghcr.io/zalando/spilo-16:3.2-p3 |   dockerImage: ghcr.io/zalando/spilo-16:3.3-p1 | ||||||
|   teamId: "acid" |   teamId: "acid" | ||||||
|   numberOfInstances: 2 |   numberOfInstances: 2 | ||||||
|   users:  # Application/Robot users |   users:  # Application/Robot users | ||||||
|  |  | ||||||
|  | @ -18,11 +18,11 @@ data: | ||||||
|   connection_pooler_default_memory_limit: 100Mi |   connection_pooler_default_memory_limit: 100Mi | ||||||
|   connection_pooler_default_memory_request: 100Mi |   connection_pooler_default_memory_request: 100Mi | ||||||
|   connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32" |   connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32" | ||||||
|   # connection_pooler_max_db_connections: 60 |   connection_pooler_max_db_connections: "60" | ||||||
|   # connection_pooler_mode: "transaction" |   connection_pooler_mode: "transaction" | ||||||
|   # connection_pooler_number_of_instances: 2 |   connection_pooler_number_of_instances: "2" | ||||||
|   # connection_pooler_schema: "pooler" |   connection_pooler_schema: "pooler" | ||||||
|   # connection_pooler_user: "pooler" |   connection_pooler_user: "pooler" | ||||||
|   crd_categories: "all" |   crd_categories: "all" | ||||||
|   # custom_service_annotations: "keyx:valuez,keya:valuea" |   # custom_service_annotations: "keyx:valuez,keya:valuea" | ||||||
|   # custom_pod_annotations: "keya:valuea,keyb:valueb" |   # custom_pod_annotations: "keya:valuea,keyb:valueb" | ||||||
|  | @ -34,39 +34,41 @@ data: | ||||||
|   default_memory_request: 100Mi |   default_memory_request: 100Mi | ||||||
|   # delete_annotation_date_key: delete-date |   # delete_annotation_date_key: delete-date | ||||||
|   # delete_annotation_name_key: delete-clustername |   # delete_annotation_name_key: delete-clustername | ||||||
|   docker_image: ghcr.io/zalando/spilo-16:3.2-p3 |   docker_image: ghcr.io/zalando/spilo-16:3.3-p1 | ||||||
|   # downscaler_annotations: "deployment-time,downscaler/*" |   # downscaler_annotations: "deployment-time,downscaler/*" | ||||||
|   # enable_admin_role_for_users: "true" |   enable_admin_role_for_users: "true" | ||||||
|   # enable_crd_registration: "true" |   enable_crd_registration: "true" | ||||||
|   # enable_cross_namespace_secret: "false" |   enable_crd_validation: "true" | ||||||
|  |   enable_cross_namespace_secret: "false" | ||||||
|   enable_finalizers: "false" |   enable_finalizers: "false" | ||||||
|   # enable_database_access: "true" |   enable_database_access: "true" | ||||||
|   enable_ebs_gp3_migration: "false" |   enable_ebs_gp3_migration: "false" | ||||||
|   # enable_ebs_gp3_migration_max_size: "1000" |   enable_ebs_gp3_migration_max_size: "1000" | ||||||
|   # enable_init_containers: "true" |   enable_init_containers: "true" | ||||||
|   # enable_lazy_spilo_upgrade: "false" |   enable_lazy_spilo_upgrade: "false" | ||||||
|   enable_master_load_balancer: "false" |   enable_master_load_balancer: "false" | ||||||
|   enable_master_pooler_load_balancer: "false" |   enable_master_pooler_load_balancer: "false" | ||||||
|   enable_password_rotation: "false" |   enable_password_rotation: "false" | ||||||
|   enable_patroni_failsafe_mode: "false" |   enable_patroni_failsafe_mode: "false" | ||||||
|   enable_secrets_deletion: "true" |   enable_owner_references: "false" | ||||||
|   enable_persistent_volume_claim_deletion: "true" |   enable_persistent_volume_claim_deletion: "true" | ||||||
|   enable_pgversion_env_var: "true" |   enable_pgversion_env_var: "true" | ||||||
|   # enable_pod_antiaffinity: "false" |   enable_pod_antiaffinity: "false" | ||||||
|   # enable_pod_disruption_budget: "true" |   enable_pod_disruption_budget: "true" | ||||||
|   # enable_postgres_team_crd: "false" |   enable_postgres_team_crd: "false" | ||||||
|   # enable_postgres_team_crd_superusers: "false" |   enable_postgres_team_crd_superusers: "false" | ||||||
|   enable_readiness_probe: "false" |   enable_readiness_probe: "false" | ||||||
|   enable_replica_load_balancer: "false" |   enable_replica_load_balancer: "false" | ||||||
|   enable_replica_pooler_load_balancer: "false" |   enable_replica_pooler_load_balancer: "false" | ||||||
|   # enable_shm_volume: "true" |   enable_secrets_deletion: "true" | ||||||
|   # enable_sidecars: "true" |   enable_shm_volume: "true" | ||||||
|  |   enable_sidecars: "true" | ||||||
|   enable_spilo_wal_path_compat: "true" |   enable_spilo_wal_path_compat: "true" | ||||||
|   enable_team_id_clustername_prefix: "false" |   enable_team_id_clustername_prefix: "false" | ||||||
|   enable_team_member_deprecation: "false" |   enable_team_member_deprecation: "false" | ||||||
|   # enable_team_superuser: "false" |   enable_team_superuser: "false" | ||||||
|   enable_teams_api: "false" |   enable_teams_api: "false" | ||||||
|   # etcd_host: "" |   etcd_host: "" | ||||||
|   external_traffic_policy: "Cluster" |   external_traffic_policy: "Cluster" | ||||||
|   # gcp_credentials: "" |   # gcp_credentials: "" | ||||||
|   # ignored_annotations: "" |   # ignored_annotations: "" | ||||||
|  | @ -76,56 +78,55 @@ data: | ||||||
|   # inherited_annotations: owned-by |   # inherited_annotations: owned-by | ||||||
|   # inherited_labels: application,environment |   # inherited_labels: application,environment | ||||||
|   # kube_iam_role: "" |   # kube_iam_role: "" | ||||||
|   # kubernetes_use_configmaps: "false" |   kubernetes_use_configmaps: "false" | ||||||
|   # log_s3_bucket: "" |   # log_s3_bucket: "" | ||||||
|   # logical_backup_azure_storage_account_name: "" |   # logical_backup_azure_storage_account_name: "" | ||||||
|   # logical_backup_azure_storage_container: "" |   # logical_backup_azure_storage_container: "" | ||||||
|   # logical_backup_azure_storage_account_key: "" |   # logical_backup_azure_storage_account_key: "" | ||||||
|   # logical_backup_cpu_limit: "" |   # logical_backup_cpu_limit: "" | ||||||
|   # logical_backup_cpu_request: "" |   # logical_backup_cpu_request: "" | ||||||
|   logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2" |   logical_backup_cronjob_environment_secret: "" | ||||||
|  |   logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" | ||||||
|   # logical_backup_google_application_credentials: "" |   # logical_backup_google_application_credentials: "" | ||||||
|   logical_backup_job_prefix: "logical-backup-" |   logical_backup_job_prefix: "logical-backup-" | ||||||
|   # logical_backup_memory_limit: "" |   # logical_backup_memory_limit: "" | ||||||
|   # logical_backup_memory_request: "" |   # logical_backup_memory_request: "" | ||||||
|   logical_backup_provider: "s3" |   logical_backup_provider: "s3" | ||||||
|   # logical_backup_s3_access_key_id: "" |   logical_backup_s3_access_key_id: "" | ||||||
|   logical_backup_s3_bucket: "my-bucket-url" |   logical_backup_s3_bucket: "my-bucket-url" | ||||||
|   # logical_backup_s3_bucket_prefix: "spilo" |   logical_backup_s3_bucket_prefix: "spilo" | ||||||
|   # logical_backup_s3_region: "" |   logical_backup_s3_region: "" | ||||||
|   # logical_backup_s3_endpoint: "" |   logical_backup_s3_endpoint: "" | ||||||
|   # logical_backup_s3_secret_access_key: "" |   logical_backup_s3_secret_access_key: "" | ||||||
|   logical_backup_s3_sse: "AES256" |   logical_backup_s3_sse: "AES256" | ||||||
|   # logical_backup_s3_retention_time: "" |   logical_backup_s3_retention_time: "" | ||||||
|   logical_backup_schedule: "30 00 * * *" |   logical_backup_schedule: "30 00 * * *" | ||||||
|   # logical_backup_cronjob_environment_secret: "" |  | ||||||
|   major_version_upgrade_mode: "manual" |   major_version_upgrade_mode: "manual" | ||||||
|   # major_version_upgrade_team_allow_list: "" |   # major_version_upgrade_team_allow_list: "" | ||||||
|   master_dns_name_format: "{cluster}.{namespace}.{hostedzone}" |   master_dns_name_format: "{cluster}.{namespace}.{hostedzone}" | ||||||
|   # master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}" |   master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}" | ||||||
|   # master_pod_move_timeout: 20m |   master_pod_move_timeout: 20m | ||||||
|   # max_instances: "-1" |  | ||||||
|   # min_instances: "-1" |  | ||||||
|   # max_cpu_request: "1" |   # max_cpu_request: "1" | ||||||
|  |   max_instances: "-1" | ||||||
|   # max_memory_request: 4Gi |   # max_memory_request: 4Gi | ||||||
|   # min_cpu_limit: 250m |   min_cpu_limit: 250m | ||||||
|   # min_memory_limit: 250Mi |   min_instances: "-1" | ||||||
|   # minimal_major_version: "12" |   min_memory_limit: 250Mi | ||||||
|  |   minimal_major_version: "12" | ||||||
|   # node_readiness_label: "status:ready" |   # node_readiness_label: "status:ready" | ||||||
|   # node_readiness_label_merge: "OR" |   # node_readiness_label_merge: "OR" | ||||||
|   # oauth_token_secret_name: postgresql-operator |   oauth_token_secret_name: postgresql-operator | ||||||
|   # pam_configuration: | |   pam_configuration: "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees" | ||||||
|   #  https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees |   pam_role_name: zalandos | ||||||
|   # pam_role_name: zalandos |  | ||||||
|   patroni_api_check_interval: "1s" |   patroni_api_check_interval: "1s" | ||||||
|   patroni_api_check_timeout: "5s" |   patroni_api_check_timeout: "5s" | ||||||
|   # password_rotation_interval: "90" |   password_rotation_interval: "90" | ||||||
|   # password_rotation_user_retention: "180" |   password_rotation_user_retention: "180" | ||||||
|   pdb_master_label_selector: "true" |   pdb_master_label_selector: "true" | ||||||
|   pdb_name_format: "postgres-{cluster}-pdb" |   pdb_name_format: "postgres-{cluster}-pdb" | ||||||
|   persistent_volume_claim_retention_policy: "when_deleted:retain,when_scaled:retain" |   persistent_volume_claim_retention_policy: "when_deleted:retain,when_scaled:retain" | ||||||
|   # pod_antiaffinity_preferred_during_scheduling: "false" |   pod_antiaffinity_preferred_during_scheduling: "false" | ||||||
|   # pod_antiaffinity_topology_key: "kubernetes.io/hostname" |   pod_antiaffinity_topology_key: "kubernetes.io/hostname" | ||||||
|   pod_deletion_wait_timeout: 10m |   pod_deletion_wait_timeout: 10m | ||||||
|   # pod_environment_configmap: "default/my-custom-config" |   # pod_environment_configmap: "default/my-custom-config" | ||||||
|   # pod_environment_secret: "my-custom-secret" |   # pod_environment_secret: "my-custom-secret" | ||||||
|  | @ -133,17 +134,17 @@ data: | ||||||
|   pod_management_policy: "ordered_ready" |   pod_management_policy: "ordered_ready" | ||||||
|   # pod_priority_class_name: "postgres-pod-priority" |   # pod_priority_class_name: "postgres-pod-priority" | ||||||
|   pod_role_label: spilo-role |   pod_role_label: spilo-role | ||||||
|   # pod_service_account_definition: "" |   pod_service_account_definition: "" | ||||||
|   pod_service_account_name: "postgres-pod" |   pod_service_account_name: "postgres-pod" | ||||||
|   # pod_service_account_role_binding_definition: "" |   pod_service_account_role_binding_definition: "" | ||||||
|   pod_terminate_grace_period: 5m |   pod_terminate_grace_period: 5m | ||||||
|   # postgres_superuser_teams: "postgres_superusers" |   postgres_superuser_teams: "postgres_superusers" | ||||||
|   # protected_role_names: "admin,cron_admin" |   protected_role_names: "admin,cron_admin" | ||||||
|   ready_wait_interval: 3s |   ready_wait_interval: 3s | ||||||
|   ready_wait_timeout: 30s |   ready_wait_timeout: 30s | ||||||
|   repair_period: 5m |   repair_period: 5m | ||||||
|   replica_dns_name_format: "{cluster}-repl.{namespace}.{hostedzone}" |   replica_dns_name_format: "{cluster}-repl.{namespace}.{hostedzone}" | ||||||
|   # replica_legacy_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" |   replica_legacy_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" | ||||||
|   replication_username: standby |   replication_username: standby | ||||||
|   resource_check_interval: 3s |   resource_check_interval: 3s | ||||||
|   resource_check_timeout: 10m |   resource_check_timeout: 10m | ||||||
|  | @ -153,7 +154,7 @@ data: | ||||||
|   secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" |   secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" | ||||||
|   share_pgsocket_with_sidecars: "false" |   share_pgsocket_with_sidecars: "false" | ||||||
|   # sidecar_docker_images: "" |   # sidecar_docker_images: "" | ||||||
|   # set_memory_request_to_limit: "false" |   set_memory_request_to_limit: "false" | ||||||
|   spilo_allow_privilege_escalation: "true" |   spilo_allow_privilege_escalation: "true" | ||||||
|   # spilo_runasuser: 101 |   # spilo_runasuser: 101 | ||||||
|   # spilo_runasgroup: 103 |   # spilo_runasgroup: 103 | ||||||
|  | @ -161,10 +162,10 @@ data: | ||||||
|   spilo_privileged: "false" |   spilo_privileged: "false" | ||||||
|   storage_resize_mode: "pvc" |   storage_resize_mode: "pvc" | ||||||
|   super_username: postgres |   super_username: postgres | ||||||
|   # target_major_version: "16" |   target_major_version: "16" | ||||||
|   # team_admin_role: "admin" |   team_admin_role: "admin" | ||||||
|   # team_api_role_configuration: "log_statement:all" |   team_api_role_configuration: "log_statement:all" | ||||||
|   # teams_api_url: http://fake-teams-api.default.svc.cluster.local |   teams_api_url: http://fake-teams-api.default.svc.cluster.local | ||||||
|   # toleration: "key:db-only,operator:Exists,effect:NoSchedule" |   # toleration: "key:db-only,operator:Exists,effect:NoSchedule" | ||||||
|   # wal_az_storage_account: "" |   # wal_az_storage_account: "" | ||||||
|   # wal_gs_bucket: "" |   # wal_gs_bucket: "" | ||||||
|  |  | ||||||
|  | @ -0,0 +1,23 @@ | ||||||
|  | apiVersion: apiextensions.k8s.io/v1 | ||||||
|  | kind: CustomResourceDefinition | ||||||
|  | metadata: | ||||||
|  |   name: fabriceventstreams.zalando.org | ||||||
|  | spec: | ||||||
|  |   group: zalando.org | ||||||
|  |   names: | ||||||
|  |     kind: FabricEventStream | ||||||
|  |     listKind: FabricEventStreamList | ||||||
|  |     plural: fabriceventstreams | ||||||
|  |     singular: fabriceventstream | ||||||
|  |     shortNames: | ||||||
|  |     - fes | ||||||
|  |     categories: | ||||||
|  |     - all | ||||||
|  |   scope: Namespaced | ||||||
|  |   versions: | ||||||
|  |   - name: v1 | ||||||
|  |     served: true | ||||||
|  |     storage: true | ||||||
|  |     schema: | ||||||
|  |       openAPIV3Schema: | ||||||
|  |         type: object | ||||||
|  | @ -94,6 +94,7 @@ rules: | ||||||
|   - create |   - create | ||||||
|   - delete |   - delete | ||||||
|   - get |   - get | ||||||
|  |   - patch | ||||||
|   - update |   - update | ||||||
| # to check nodes for node readiness label | # to check nodes for node readiness label | ||||||
| - apiGroups: | - apiGroups: | ||||||
|  | @ -166,6 +167,7 @@ rules: | ||||||
|   - get |   - get | ||||||
|   - list |   - list | ||||||
|   - patch |   - patch | ||||||
|  |   - update | ||||||
| # to CRUD cron jobs for logical backups | # to CRUD cron jobs for logical backups | ||||||
| - apiGroups: | - apiGroups: | ||||||
|   - batch |   - batch | ||||||
|  |  | ||||||
|  | @ -102,6 +102,7 @@ rules: | ||||||
|   - delete |   - delete | ||||||
|   - get |   - get | ||||||
|   - update |   - update | ||||||
|  |   - patch | ||||||
| # to check nodes for node readiness label | # to check nodes for node readiness label | ||||||
| - apiGroups: | - apiGroups: | ||||||
|   - "" |   - "" | ||||||
|  | @ -173,6 +174,7 @@ rules: | ||||||
|   - get |   - get | ||||||
|   - list |   - list | ||||||
|   - patch |   - patch | ||||||
|  |   - update | ||||||
| # to CRUD cron jobs for logical backups | # to CRUD cron jobs for logical backups | ||||||
| - apiGroups: | - apiGroups: | ||||||
|   - batch |   - batch | ||||||
|  |  | ||||||
|  | @ -66,7 +66,7 @@ spec: | ||||||
|                   type: string |                   type: string | ||||||
|               docker_image: |               docker_image: | ||||||
|                 type: string |                 type: string | ||||||
|                 default: "ghcr.io/zalando/spilo-16:3.2-p3" |                 default: "ghcr.io/zalando/spilo-16:3.3-p1" | ||||||
|               enable_crd_registration: |               enable_crd_registration: | ||||||
|                 type: boolean |                 type: boolean | ||||||
|                 default: true |                 default: true | ||||||
|  | @ -158,7 +158,7 @@ spec: | ||||||
|                 properties: |                 properties: | ||||||
|                   major_version_upgrade_mode: |                   major_version_upgrade_mode: | ||||||
|                     type: string |                     type: string | ||||||
|                     default: "off" |                     default: "manual" | ||||||
|                   major_version_upgrade_team_allow_list: |                   major_version_upgrade_team_allow_list: | ||||||
|                     type: array |                     type: array | ||||||
|                     items: |                     items: | ||||||
|  | @ -209,9 +209,9 @@ spec: | ||||||
|                   enable_init_containers: |                   enable_init_containers: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: true |                     default: true | ||||||
|                   enable_secrets_deletion: |                   enable_owner_references: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: true |                     default: false | ||||||
|                   enable_persistent_volume_claim_deletion: |                   enable_persistent_volume_claim_deletion: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: true |                     default: true | ||||||
|  | @ -224,6 +224,9 @@ spec: | ||||||
|                   enable_readiness_probe: |                   enable_readiness_probe: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: false |                     default: false | ||||||
|  |                   enable_secrets_deletion: | ||||||
|  |                     type: boolean | ||||||
|  |                     default: true | ||||||
|                   enable_sidecars: |                   enable_sidecars: | ||||||
|                     type: boolean |                     type: boolean | ||||||
|                     default: true |                     default: true | ||||||
|  | @ -467,7 +470,6 @@ spec: | ||||||
|                     type: string |                     type: string | ||||||
|                   additional_secret_mount_path: |                   additional_secret_mount_path: | ||||||
|                     type: string |                     type: string | ||||||
|                     default: "/meta/credentials" |  | ||||||
|                   aws_region: |                   aws_region: | ||||||
|                     type: string |                     type: string | ||||||
|                     default: "eu-central-1" |                     default: "eu-central-1" | ||||||
|  | @ -506,7 +508,7 @@ spec: | ||||||
|                     pattern: '^(\d+m|\d+(\.\d{1,3})?)$' |                     pattern: '^(\d+m|\d+(\.\d{1,3})?)$' | ||||||
|                   logical_backup_docker_image: |                   logical_backup_docker_image: | ||||||
|                     type: string |                     type: string | ||||||
|                     default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2" |                     default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" | ||||||
|                   logical_backup_google_application_credentials: |                   logical_backup_google_application_credentials: | ||||||
|                     type: string |                     type: string | ||||||
|                   logical_backup_job_prefix: |                   logical_backup_job_prefix: | ||||||
|  |  | ||||||
|  | @ -19,7 +19,7 @@ spec: | ||||||
|       serviceAccountName: postgres-operator |       serviceAccountName: postgres-operator | ||||||
|       containers: |       containers: | ||||||
|       - name: postgres-operator |       - name: postgres-operator | ||||||
|         image: ghcr.io/zalando/postgres-operator:v1.12.2 |         image: ghcr.io/zalando/postgres-operator:v1.13.0 | ||||||
|         imagePullPolicy: IfNotPresent |         imagePullPolicy: IfNotPresent | ||||||
|         resources: |         resources: | ||||||
|           requests: |           requests: | ||||||
|  |  | ||||||
|  | @ -3,7 +3,7 @@ kind: OperatorConfiguration | ||||||
| metadata: | metadata: | ||||||
|   name: postgresql-operator-default-configuration |   name: postgresql-operator-default-configuration | ||||||
| configuration: | configuration: | ||||||
|   docker_image: ghcr.io/zalando/spilo-16:3.2-p3 |   docker_image: ghcr.io/zalando/spilo-16:3.3-p1 | ||||||
|   # enable_crd_registration: true |   # enable_crd_registration: true | ||||||
|   # crd_categories: |   # crd_categories: | ||||||
|   # - all |   # - all | ||||||
|  | @ -36,7 +36,7 @@ configuration: | ||||||
|     replication_username: standby |     replication_username: standby | ||||||
|     super_username: postgres |     super_username: postgres | ||||||
|   major_version_upgrade: |   major_version_upgrade: | ||||||
|     major_version_upgrade_mode: "off" |     major_version_upgrade_mode: "manual" | ||||||
|     # major_version_upgrade_team_allow_list: |     # major_version_upgrade_team_allow_list: | ||||||
|     # - acid |     # - acid | ||||||
|     minimal_major_version: "12" |     minimal_major_version: "12" | ||||||
|  | @ -59,11 +59,12 @@ configuration: | ||||||
|     # enable_cross_namespace_secret: "false" |     # enable_cross_namespace_secret: "false" | ||||||
|     enable_finalizers: false |     enable_finalizers: false | ||||||
|     enable_init_containers: true |     enable_init_containers: true | ||||||
|     enable_secrets_deletion: true |     enable_owner_references: false | ||||||
|     enable_persistent_volume_claim_deletion: true |     enable_persistent_volume_claim_deletion: true | ||||||
|     enable_pod_antiaffinity: false |     enable_pod_antiaffinity: false | ||||||
|     enable_pod_disruption_budget: true |     enable_pod_disruption_budget: true | ||||||
|     enable_readiness_probe: false |     enable_readiness_probe: false | ||||||
|  |     enable_secrets_deletion: true | ||||||
|     enable_sidecars: true |     enable_sidecars: true | ||||||
|     # ignored_annotations: |     # ignored_annotations: | ||||||
|     # - k8s.v1.cni.cncf.io/network-status |     # - k8s.v1.cni.cncf.io/network-status | ||||||
|  | @ -167,7 +168,7 @@ configuration: | ||||||
|     # logical_backup_cpu_request: "" |     # logical_backup_cpu_request: "" | ||||||
|     # logical_backup_memory_limit: "" |     # logical_backup_memory_limit: "" | ||||||
|     # logical_backup_memory_request: "" |     # logical_backup_memory_request: "" | ||||||
|     logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2" |     logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" | ||||||
|     # logical_backup_google_application_credentials: "" |     # logical_backup_google_application_credentials: "" | ||||||
|     logical_backup_job_prefix: "logical-backup-" |     logical_backup_job_prefix: "logical-backup-" | ||||||
|     logical_backup_provider: "s3" |     logical_backup_provider: "s3" | ||||||
|  |  | ||||||
|  | @ -224,7 +224,7 @@ spec: | ||||||
|                 type: array |                 type: array | ||||||
|                 items: |                 items: | ||||||
|                   type: string |                   type: string | ||||||
|                   pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' |                   pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' | ||||||
|               masterServiceAnnotations: |               masterServiceAnnotations: | ||||||
|                 type: object |                 type: object | ||||||
|                 additionalProperties: |                 additionalProperties: | ||||||
|  | @ -373,7 +373,6 @@ spec: | ||||||
|                   version: |                   version: | ||||||
|                     type: string |                     type: string | ||||||
|                     enum: |                     enum: | ||||||
|                       - "11" |  | ||||||
|                       - "12" |                       - "12" | ||||||
|                       - "13" |                       - "13" | ||||||
|                       - "14" |                       - "14" | ||||||
|  |  | ||||||
|  | @ -595,9 +595,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ | ||||||
| 							"version": { | 							"version": { | ||||||
| 								Type: "string", | 								Type: "string", | ||||||
| 								Enum: []apiextv1.JSON{ | 								Enum: []apiextv1.JSON{ | ||||||
| 									{ |  | ||||||
| 										Raw: []byte(`"11"`), |  | ||||||
| 									}, |  | ||||||
| 									{ | 									{ | ||||||
| 										Raw: []byte(`"12"`), | 										Raw: []byte(`"12"`), | ||||||
| 									}, | 									}, | ||||||
|  | @ -1329,7 +1326,7 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ | ||||||
| 							"enable_init_containers": { | 							"enable_init_containers": { | ||||||
| 								Type: "boolean", | 								Type: "boolean", | ||||||
| 							}, | 							}, | ||||||
| 							"enable_secrets_deletion": { | 							"enable_owner_references": { | ||||||
| 								Type: "boolean", | 								Type: "boolean", | ||||||
| 							}, | 							}, | ||||||
| 							"enable_persistent_volume_claim_deletion": { | 							"enable_persistent_volume_claim_deletion": { | ||||||
|  | @ -1344,6 +1341,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ | ||||||
| 							"enable_readiness_probe": { | 							"enable_readiness_probe": { | ||||||
| 								Type: "boolean", | 								Type: "boolean", | ||||||
| 							}, | 							}, | ||||||
|  | 							"enable_secrets_deletion": { | ||||||
|  | 								Type: "boolean", | ||||||
|  | 							}, | ||||||
| 							"enable_sidecars": { | 							"enable_sidecars": { | ||||||
| 								Type: "boolean", | 								Type: "boolean", | ||||||
| 							}, | 							}, | ||||||
|  |  | ||||||
|  | @ -47,7 +47,7 @@ type PostgresUsersConfiguration struct { | ||||||
| 
 | 
 | ||||||
| // MajorVersionUpgradeConfiguration defines how to execute major version upgrades of Postgres.
 | // MajorVersionUpgradeConfiguration defines how to execute major version upgrades of Postgres.
 | ||||||
| type MajorVersionUpgradeConfiguration struct { | type MajorVersionUpgradeConfiguration struct { | ||||||
| 	MajorVersionUpgradeMode          string   `json:"major_version_upgrade_mode" default:"off"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
 | 	MajorVersionUpgradeMode          string   `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
 | ||||||
| 	MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"` | 	MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"` | ||||||
| 	MinimalMajorVersion              string   `json:"minimal_major_version" default:"12"` | 	MinimalMajorVersion              string   `json:"minimal_major_version" default:"12"` | ||||||
| 	TargetMajorVersion               string   `json:"target_major_version" default:"16"` | 	TargetMajorVersion               string   `json:"target_major_version" default:"16"` | ||||||
|  | @ -55,6 +55,7 @@ type MajorVersionUpgradeConfiguration struct { | ||||||
| 
 | 
 | ||||||
| // KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
 | // KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
 | ||||||
| type KubernetesMetaConfiguration struct { | type KubernetesMetaConfiguration struct { | ||||||
|  | 	EnableOwnerReferences *bool  `json:"enable_owner_references,omitempty"` | ||||||
| 	PodServiceAccountName string `json:"pod_service_account_name,omitempty"` | 	PodServiceAccountName string `json:"pod_service_account_name,omitempty"` | ||||||
| 	// TODO: change it to the proper json
 | 	// TODO: change it to the proper json
 | ||||||
| 	PodServiceAccountDefinition            string                       `json:"pod_service_account_definition,omitempty"` | 	PodServiceAccountDefinition            string                       `json:"pod_service_account_definition,omitempty"` | ||||||
|  | @ -159,7 +160,7 @@ type AWSGCPConfiguration struct { | ||||||
| 	LogS3Bucket                  string `json:"log_s3_bucket,omitempty"` | 	LogS3Bucket                  string `json:"log_s3_bucket,omitempty"` | ||||||
| 	KubeIAMRole                  string `json:"kube_iam_role,omitempty"` | 	KubeIAMRole                  string `json:"kube_iam_role,omitempty"` | ||||||
| 	AdditionalSecretMount        string `json:"additional_secret_mount,omitempty"` | 	AdditionalSecretMount        string `json:"additional_secret_mount,omitempty"` | ||||||
| 	AdditionalSecretMountPath    string `json:"additional_secret_mount_path" default:"/meta/credentials"` | 	AdditionalSecretMountPath    string `json:"additional_secret_mount_path,omitempty"` | ||||||
| 	EnableEBSGp3Migration        bool   `json:"enable_ebs_gp3_migration" default:"false"` | 	EnableEBSGp3Migration        bool   `json:"enable_ebs_gp3_migration" default:"false"` | ||||||
| 	EnableEBSGp3MigrationMaxSize int64  `json:"enable_ebs_gp3_migration_max_size" default:"1000"` | 	EnableEBSGp3MigrationMaxSize int64  `json:"enable_ebs_gp3_migration_max_size" default:"1000"` | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -133,7 +133,7 @@ type Volume struct { | ||||||
| 	Size          string                `json:"size"` | 	Size          string                `json:"size"` | ||||||
| 	StorageClass  string                `json:"storageClass,omitempty"` | 	StorageClass  string                `json:"storageClass,omitempty"` | ||||||
| 	SubPath       string                `json:"subPath,omitempty"` | 	SubPath       string                `json:"subPath,omitempty"` | ||||||
| 	IsSubPathExpr *bool                 `json:"isSubPathExpr,omitemtpy"` | 	IsSubPathExpr *bool                 `json:"isSubPathExpr,omitempty"` | ||||||
| 	Iops          *int64                `json:"iops,omitempty"` | 	Iops          *int64                `json:"iops,omitempty"` | ||||||
| 	Throughput    *int64                `json:"throughput,omitempty"` | 	Throughput    *int64                `json:"throughput,omitempty"` | ||||||
| 	VolumeType    string                `json:"type,omitempty"` | 	VolumeType    string                `json:"type,omitempty"` | ||||||
|  | @ -144,7 +144,7 @@ type AdditionalVolume struct { | ||||||
| 	Name             string          `json:"name"` | 	Name             string          `json:"name"` | ||||||
| 	MountPath        string          `json:"mountPath"` | 	MountPath        string          `json:"mountPath"` | ||||||
| 	SubPath          string          `json:"subPath,omitempty"` | 	SubPath          string          `json:"subPath,omitempty"` | ||||||
| 	IsSubPathExpr    *bool           `json:"isSubPathExpr,omitemtpy"` | 	IsSubPathExpr    *bool           `json:"isSubPathExpr,omitempty"` | ||||||
| 	TargetContainers []string        `json:"targetContainers"` | 	TargetContainers []string        `json:"targetContainers"` | ||||||
| 	VolumeSource     v1.VolumeSource `json:"volumeSource"` | 	VolumeSource     v1.VolumeSource `json:"volumeSource"` | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -123,6 +123,8 @@ var maintenanceWindows = []struct { | ||||||
| 	{"expect error as weekday is empty", []byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, | 	{"expect error as weekday is empty", []byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, | ||||||
| 	{"expect error as maintenance window set seconds", []byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)}, | 	{"expect error as maintenance window set seconds", []byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)}, | ||||||
| 	{"expect error as 'To' time set seconds", []byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}, | 	{"expect error as 'To' time set seconds", []byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}, | ||||||
|  | 	// ideally, should be implemented
 | ||||||
|  | 	{"expect error as 'To' has a weekday", []byte(`"Mon:00:00-Fri:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}, | ||||||
| 	{"expect error as 'To' time is missing", []byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}} | 	{"expect error as 'To' time is missing", []byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}} | ||||||
| 
 | 
 | ||||||
| var postgresStatus = []struct { | var postgresStatus = []struct { | ||||||
|  |  | ||||||
|  | @ -158,6 +158,11 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati | ||||||
| // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 | ||||||
| func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { | func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { | ||||||
| 	*out = *in | 	*out = *in | ||||||
|  | 	if in.EnableOwnerReferences != nil { | ||||||
|  | 		in, out := &in.EnableOwnerReferences, &out.EnableOwnerReferences | ||||||
|  | 		*out = new(bool) | ||||||
|  | 		**out = **in | ||||||
|  | 	} | ||||||
| 	if in.SpiloAllowPrivilegeEscalation != nil { | 	if in.SpiloAllowPrivilegeEscalation != nil { | ||||||
| 		in, out := &in.SpiloAllowPrivilegeEscalation, &out.SpiloAllowPrivilegeEscalation | 		in, out := &in.SpiloAllowPrivilegeEscalation, &out.SpiloAllowPrivilegeEscalation | ||||||
| 		*out = new(bool) | 		*out = new(bool) | ||||||
|  |  | ||||||
|  | @ -1,6 +1,7 @@ | ||||||
| package v1 | package v1 | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | @ -89,3 +90,8 @@ type DBAuth struct { | ||||||
| 	UserKey     string `json:"userKey,omitempty"` | 	UserKey     string `json:"userKey,omitempty"` | ||||||
| 	PasswordKey string `json:"passwordKey,omitempty"` | 	PasswordKey string `json:"passwordKey,omitempty"` | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | type Slot struct { | ||||||
|  | 	Slot        map[string]string             `json:"slot"` | ||||||
|  | 	Publication map[string]acidv1.StreamTable `json:"publication"` | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -3,7 +3,6 @@ package cluster | ||||||
| // Postgres CustomResourceDefinition object i.e. Spilo
 | // Postgres CustomResourceDefinition object i.e. Spilo
 | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"context" |  | ||||||
| 	"database/sql" | 	"database/sql" | ||||||
| 	"encoding/json" | 	"encoding/json" | ||||||
| 	"fmt" | 	"fmt" | ||||||
|  | @ -15,6 +14,7 @@ import ( | ||||||
| 
 | 
 | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | ||||||
|  | 	zalandov1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1" | ||||||
| 
 | 
 | ||||||
| 	"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" | 	"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/spec" | 	"github.com/zalando/postgres-operator/pkg/spec" | ||||||
|  | @ -61,9 +61,13 @@ type Config struct { | ||||||
| type kubeResources struct { | type kubeResources struct { | ||||||
| 	Services            map[PostgresRole]*v1.Service | 	Services            map[PostgresRole]*v1.Service | ||||||
| 	Endpoints           map[PostgresRole]*v1.Endpoints | 	Endpoints           map[PostgresRole]*v1.Endpoints | ||||||
|  | 	PatroniEndpoints    map[string]*v1.Endpoints | ||||||
|  | 	PatroniConfigMaps   map[string]*v1.ConfigMap | ||||||
| 	Secrets             map[types.UID]*v1.Secret | 	Secrets             map[types.UID]*v1.Secret | ||||||
| 	Statefulset         *appsv1.StatefulSet | 	Statefulset         *appsv1.StatefulSet | ||||||
| 	PodDisruptionBudget *policyv1.PodDisruptionBudget | 	PodDisruptionBudget *policyv1.PodDisruptionBudget | ||||||
|  | 	LogicalBackupJob    *batchv1.CronJob | ||||||
|  | 	Streams             map[string]*zalandov1.FabricEventStream | ||||||
| 	//Pods are treated separately
 | 	//Pods are treated separately
 | ||||||
| 	//PVCs are treated separately
 | 	//PVCs are treated separately
 | ||||||
| } | } | ||||||
|  | @ -133,7 +137,10 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres | ||||||
| 		kubeResources: kubeResources{ | 		kubeResources: kubeResources{ | ||||||
| 			Secrets:           make(map[types.UID]*v1.Secret), | 			Secrets:           make(map[types.UID]*v1.Secret), | ||||||
| 			Services:          make(map[PostgresRole]*v1.Service), | 			Services:          make(map[PostgresRole]*v1.Service), | ||||||
| 			Endpoints: make(map[PostgresRole]*v1.Endpoints)}, | 			Endpoints:         make(map[PostgresRole]*v1.Endpoints), | ||||||
|  | 			PatroniEndpoints:  make(map[string]*v1.Endpoints), | ||||||
|  | 			PatroniConfigMaps: make(map[string]*v1.ConfigMap), | ||||||
|  | 			Streams:           make(map[string]*zalandov1.FabricEventStream)}, | ||||||
| 		userSyncStrategy: users.DefaultUserSyncStrategy{ | 		userSyncStrategy: users.DefaultUserSyncStrategy{ | ||||||
| 			PasswordEncryption:   passwordEncryption, | 			PasswordEncryption:   passwordEncryption, | ||||||
| 			RoleDeletionSuffix:   cfg.OpConfig.RoleDeletionSuffix, | 			RoleDeletionSuffix:   cfg.OpConfig.RoleDeletionSuffix, | ||||||
|  | @ -356,6 +363,11 @@ func (c *Cluster) Create() (err error) { | ||||||
| 	c.logger.Infof("pods are ready") | 	c.logger.Infof("pods are ready") | ||||||
| 	c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready") | 	c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready") | ||||||
| 
 | 
 | ||||||
|  | 	// sync resources created by Patroni
 | ||||||
|  | 	if err = c.syncPatroniResources(); err != nil { | ||||||
|  | 		c.logger.Warnf("Patroni resources not yet synced: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	// create database objects unless we are running without pods or disabled
 | 	// create database objects unless we are running without pods or disabled
 | ||||||
| 	// that feature explicitly
 | 	// that feature explicitly
 | ||||||
| 	if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) { | 	if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) { | ||||||
|  | @ -381,10 +393,6 @@ func (c *Cluster) Create() (err error) { | ||||||
| 		c.logger.Info("a k8s cron job for logical backup has been successfully created") | 		c.logger.Info("a k8s cron job for logical backup has been successfully created") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := c.listResources(); err != nil { |  | ||||||
| 		c.logger.Errorf("could not list resources: %v", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Create connection pooler deployment and services if necessary. Since we
 | 	// Create connection pooler deployment and services if necessary. Since we
 | ||||||
| 	// need to perform some operations with the database itself (e.g. install
 | 	// need to perform some operations with the database itself (e.g. install
 | ||||||
| 	// lookup function), do it as the last step, when everything is available.
 | 	// lookup function), do it as the last step, when everything is available.
 | ||||||
|  | @ -409,6 +417,10 @@ func (c *Cluster) Create() (err error) { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if err := c.listResources(); err != nil { | ||||||
|  | 		c.logger.Errorf("could not list resources: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -422,6 +434,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa | ||||||
| 		match = false | 		match = false | ||||||
| 		reasons = append(reasons, "new statefulset's number of replicas does not match the current one") | 		reasons = append(reasons, "new statefulset's number of replicas does not match the current one") | ||||||
| 	} | 	} | ||||||
|  | 	if !reflect.DeepEqual(c.Statefulset.OwnerReferences, statefulSet.OwnerReferences) { | ||||||
|  | 		match = false | ||||||
|  | 		needsReplace = true | ||||||
|  | 		reasons = append(reasons, "new statefulset's ownerReferences do not match") | ||||||
|  | 	} | ||||||
| 	if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations); changed { | 	if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations); changed { | ||||||
| 		match = false | 		match = false | ||||||
| 		needsReplace = true | 		needsReplace = true | ||||||
|  | @ -433,6 +450,12 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa | ||||||
| 		reasons = append(reasons, "new statefulset's pod management policy do not match") | 		reasons = append(reasons, "new statefulset's pod management policy do not match") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy == nil { | ||||||
|  | 		c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ | ||||||
|  | 			WhenDeleted: appsv1.RetainPersistentVolumeClaimRetentionPolicyType, | ||||||
|  | 			WhenScaled:  appsv1.RetainPersistentVolumeClaimRetentionPolicyType, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
| 	if !reflect.DeepEqual(c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy, statefulSet.Spec.PersistentVolumeClaimRetentionPolicy) { | 	if !reflect.DeepEqual(c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy, statefulSet.Spec.PersistentVolumeClaimRetentionPolicy) { | ||||||
| 		match = false | 		match = false | ||||||
| 		needsReplace = true | 		needsReplace = true | ||||||
|  | @ -493,7 +516,6 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa | ||||||
| 	if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations); changed { | 	if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations); changed { | ||||||
| 		match = false | 		match = false | ||||||
| 		needsReplace = true | 		needsReplace = true | ||||||
| 		needsRollUpdate = true |  | ||||||
| 		reasons = append(reasons, "new statefulset's pod template metadata annotations does not match "+reason) | 		reasons = append(reasons, "new statefulset's pod template metadata annotations does not match "+reason) | ||||||
| 	} | 	} | ||||||
| 	if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) { | 	if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) { | ||||||
|  | @ -513,9 +535,9 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa | ||||||
| 				reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) | 				reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) | ||||||
| 				continue | 				continue | ||||||
| 			} | 			} | ||||||
| 			if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) { | 			if changed, reason := c.compareAnnotations(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations); changed { | ||||||
| 				needsReplace = true | 				needsReplace = true | ||||||
| 				reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name)) | 				reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q do not match the current ones: %s", name, reason)) | ||||||
| 			} | 			} | ||||||
| 			if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) { | 			if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) { | ||||||
| 				name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name | 				name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name | ||||||
|  | @ -591,7 +613,7 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe | ||||||
| 		newCheck("new %s's %s (index %d) security context does not match the current one", | 		newCheck("new %s's %s (index %d) security context does not match the current one", | ||||||
| 			func(a, b v1.Container) bool { return !reflect.DeepEqual(a.SecurityContext, b.SecurityContext) }), | 			func(a, b v1.Container) bool { return !reflect.DeepEqual(a.SecurityContext, b.SecurityContext) }), | ||||||
| 		newCheck("new %s's %s (index %d) volume mounts do not match the current one", | 		newCheck("new %s's %s (index %d) volume mounts do not match the current one", | ||||||
| 			func(a, b v1.Container) bool { return !reflect.DeepEqual(a.VolumeMounts, b.VolumeMounts) }), | 			func(a, b v1.Container) bool { return !compareVolumeMounts(a.VolumeMounts, b.VolumeMounts) }), | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if !c.OpConfig.EnableLazySpiloUpgrade { | 	if !c.OpConfig.EnableLazySpiloUpgrade { | ||||||
|  | @ -646,7 +668,7 @@ func compareEnv(a, b []v1.EnvVar) bool { | ||||||
| 	if len(a) != len(b) { | 	if len(a) != len(b) { | ||||||
| 		return false | 		return false | ||||||
| 	} | 	} | ||||||
| 	equal := true | 	var equal bool | ||||||
| 	for _, enva := range a { | 	for _, enva := range a { | ||||||
| 		hasmatch := false | 		hasmatch := false | ||||||
| 		for _, envb := range b { | 		for _, envb := range b { | ||||||
|  | @ -732,6 +754,27 @@ func comparePorts(a, b []v1.ContainerPort) bool { | ||||||
| 	return true | 	return true | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func compareVolumeMounts(old, new []v1.VolumeMount) bool { | ||||||
|  | 	if len(old) != len(new) { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	for _, mount := range old { | ||||||
|  | 		if !volumeMountExists(mount, new) { | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func volumeMountExists(mount v1.VolumeMount, mounts []v1.VolumeMount) bool { | ||||||
|  | 	for _, m := range mounts { | ||||||
|  | 		if reflect.DeepEqual(mount, m) { | ||||||
|  | 			return true | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) { | func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) { | ||||||
| 	reason := "" | 	reason := "" | ||||||
| 	ignoredAnnotations := make(map[string]bool) | 	ignoredAnnotations := make(map[string]bool) | ||||||
|  | @ -780,8 +823,8 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if changed, reason := c.compareAnnotations(old.Annotations, new.Annotations); changed { | 	if !reflect.DeepEqual(old.ObjectMeta.OwnerReferences, new.ObjectMeta.OwnerReferences) { | ||||||
| 		return !changed, "new service's annotations does not match the current one:" + reason | 		return false, "new service's owner references do not match the current ones" | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return true, "" | 	return true, "" | ||||||
|  | @ -801,6 +844,12 @@ func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) (match bool | ||||||
| 			newImage, curImage) | 			newImage, curImage) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	newPodAnnotation := new.Spec.JobTemplate.Spec.Template.Annotations | ||||||
|  | 	curPodAnnotation := cur.Spec.JobTemplate.Spec.Template.Annotations | ||||||
|  | 	if changed, reason := c.compareAnnotations(curPodAnnotation, newPodAnnotation); changed { | ||||||
|  | 		return false, fmt.Sprintf("new job's pod template metadata annotations does not match " + reason) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	newPgVersion := getPgVersion(new) | 	newPgVersion := getPgVersion(new) | ||||||
| 	curPgVersion := getPgVersion(cur) | 	curPgVersion := getPgVersion(cur) | ||||||
| 	if newPgVersion != curPgVersion { | 	if newPgVersion != curPgVersion { | ||||||
|  | @ -818,6 +867,20 @@ func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) (match bool | ||||||
| 	return true, "" | 	return true, "" | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBudget) (bool, string) { | ||||||
|  | 	//TODO: improve comparison
 | ||||||
|  | 	if !reflect.DeepEqual(new.Spec, cur.Spec) { | ||||||
|  | 		return false, "new PDB's spec does not match the current one" | ||||||
|  | 	} | ||||||
|  | 	if !reflect.DeepEqual(new.ObjectMeta.OwnerReferences, cur.ObjectMeta.OwnerReferences) { | ||||||
|  | 		return false, "new PDB's owner references do not match the current ones" | ||||||
|  | 	} | ||||||
|  | 	if changed, reason := c.compareAnnotations(cur.Annotations, new.Annotations); changed { | ||||||
|  | 		return false, "new PDB's annotations do not match the current ones:" + reason | ||||||
|  | 	} | ||||||
|  | 	return true, "" | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func getPgVersion(cronJob *batchv1.CronJob) string { | func getPgVersion(cronJob *batchv1.CronJob) string { | ||||||
| 	envs := cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env | 	envs := cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env | ||||||
| 	for _, env := range envs { | 	for _, env := range envs { | ||||||
|  | @ -883,7 +946,6 @@ func (c *Cluster) hasFinalizer() bool { | ||||||
| func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 	updateFailed := false | 	updateFailed := false | ||||||
| 	userInitFailed := false | 	userInitFailed := false | ||||||
| 	syncStatefulSet := false |  | ||||||
| 
 | 
 | ||||||
| 	c.mu.Lock() | 	c.mu.Lock() | ||||||
| 	defer c.mu.Unlock() | 	defer c.mu.Unlock() | ||||||
|  | @ -914,7 +976,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 	if IsBiggerPostgresVersion(oldSpec.Spec.PostgresqlParam.PgVersion, c.GetDesiredMajorVersion()) { | 	if IsBiggerPostgresVersion(oldSpec.Spec.PostgresqlParam.PgVersion, c.GetDesiredMajorVersion()) { | ||||||
| 		c.logger.Infof("postgresql version increased (%s -> %s), depending on config manual upgrade needed", | 		c.logger.Infof("postgresql version increased (%s -> %s), depending on config manual upgrade needed", | ||||||
| 			oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) | 			oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) | ||||||
| 		syncStatefulSet = true |  | ||||||
| 	} else { | 	} else { | ||||||
| 		c.logger.Infof("postgresql major version unchanged or smaller, no changes needed") | 		c.logger.Infof("postgresql major version unchanged or smaller, no changes needed") | ||||||
| 		// sticking with old version, this will also advance GetDesiredVersion next time.
 | 		// sticking with old version, this will also advance GetDesiredVersion next time.
 | ||||||
|  | @ -922,12 +983,15 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Service
 | 	// Service
 | ||||||
| 	if !reflect.DeepEqual(c.generateService(Master, &oldSpec.Spec), c.generateService(Master, &newSpec.Spec)) || |  | ||||||
| 		!reflect.DeepEqual(c.generateService(Replica, &oldSpec.Spec), c.generateService(Replica, &newSpec.Spec)) { |  | ||||||
| 	if err := c.syncServices(); err != nil { | 	if err := c.syncServices(); err != nil { | ||||||
| 		c.logger.Errorf("could not sync services: %v", err) | 		c.logger.Errorf("could not sync services: %v", err) | ||||||
| 		updateFailed = true | 		updateFailed = true | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
|  | 	// Patroni service and endpoints / config maps
 | ||||||
|  | 	if err := c.syncPatroniResources(); err != nil { | ||||||
|  | 		c.logger.Errorf("could not sync services: %v", err) | ||||||
|  | 		updateFailed = true | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Users
 | 	// Users
 | ||||||
|  | @ -946,7 +1010,10 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 		// only when streams were not specified in oldSpec but in newSpec
 | 		// only when streams were not specified in oldSpec but in newSpec
 | ||||||
| 		needStreamUser := len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0 | 		needStreamUser := len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0 | ||||||
| 
 | 
 | ||||||
| 		if !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser { | 		annotationsChanged, _ := c.compareAnnotations(oldSpec.Annotations, newSpec.Annotations) | ||||||
|  | 
 | ||||||
|  | 		initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser | ||||||
|  | 		if initUsers { | ||||||
| 			c.logger.Debugf("initialize users") | 			c.logger.Debugf("initialize users") | ||||||
| 			if err := c.initUsers(); err != nil { | 			if err := c.initUsers(); err != nil { | ||||||
| 				c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err) | 				c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err) | ||||||
|  | @ -954,7 +1021,8 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 				updateFailed = true | 				updateFailed = true | ||||||
| 				return | 				return | ||||||
| 			} | 			} | ||||||
| 
 | 		} | ||||||
|  | 		if initUsers || annotationsChanged { | ||||||
| 			c.logger.Debugf("syncing secrets") | 			c.logger.Debugf("syncing secrets") | ||||||
| 			//TODO: mind the secrets of the deleted/new users
 | 			//TODO: mind the secrets of the deleted/new users
 | ||||||
| 			if err := c.syncSecrets(); err != nil { | 			if err := c.syncSecrets(); err != nil { | ||||||
|  | @ -968,39 +1036,15 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 	if c.OpConfig.StorageResizeMode != "off" { | 	if c.OpConfig.StorageResizeMode != "off" { | ||||||
| 		c.syncVolumes() | 		c.syncVolumes() | ||||||
| 	} else { | 	} else { | ||||||
| 		c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") | 		c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume size sync.") | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// streams configuration
 |  | ||||||
| 	if len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0 { |  | ||||||
| 		syncStatefulSet = true |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Statefulset
 | 	// Statefulset
 | ||||||
| 	func() { | 	func() { | ||||||
| 		oldSs, err := c.generateStatefulSet(&oldSpec.Spec) |  | ||||||
| 		if err != nil { |  | ||||||
| 			c.logger.Errorf("could not generate old statefulset spec: %v", err) |  | ||||||
| 			updateFailed = true |  | ||||||
| 			return |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		newSs, err := c.generateStatefulSet(&newSpec.Spec) |  | ||||||
| 		if err != nil { |  | ||||||
| 			c.logger.Errorf("could not generate new statefulset spec: %v", err) |  | ||||||
| 			updateFailed = true |  | ||||||
| 			return |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if syncStatefulSet || !reflect.DeepEqual(oldSs, newSs) { |  | ||||||
| 			c.logger.Debugf("syncing statefulsets") |  | ||||||
| 			syncStatefulSet = false |  | ||||||
| 			// TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet
 |  | ||||||
| 		if err := c.syncStatefulSet(); err != nil { | 		if err := c.syncStatefulSet(); err != nil { | ||||||
| 			c.logger.Errorf("could not sync statefulsets: %v", err) | 			c.logger.Errorf("could not sync statefulsets: %v", err) | ||||||
| 			updateFailed = true | 			updateFailed = true | ||||||
| 		} | 		} | ||||||
| 		} |  | ||||||
| 	}() | 	}() | ||||||
| 
 | 
 | ||||||
| 	// add or remove standby_cluster section from Patroni config depending on changes in standby section
 | 	// add or remove standby_cluster section from Patroni config depending on changes in standby section
 | ||||||
|  | @ -1011,13 +1055,10 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// pod disruption budget
 | 	// pod disruption budget
 | ||||||
| 	if oldSpec.Spec.NumberOfInstances != newSpec.Spec.NumberOfInstances { |  | ||||||
| 		c.logger.Debug("syncing pod disruption budgets") |  | ||||||
| 	if err := c.syncPodDisruptionBudget(true); err != nil { | 	if err := c.syncPodDisruptionBudget(true); err != nil { | ||||||
| 		c.logger.Errorf("could not sync pod disruption budget: %v", err) | 		c.logger.Errorf("could not sync pod disruption budget: %v", err) | ||||||
| 		updateFailed = true | 		updateFailed = true | ||||||
| 	} | 	} | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	// logical backup job
 | 	// logical backup job
 | ||||||
| 	func() { | 	func() { | ||||||
|  | @ -1043,11 +1084,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 
 | 
 | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// apply schedule changes
 | 		if oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup { | ||||||
| 		// this is the only parameter of logical backups a user can overwrite in the cluster manifest
 |  | ||||||
| 		if (oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup) && |  | ||||||
| 			(newSpec.Spec.LogicalBackupSchedule != oldSpec.Spec.LogicalBackupSchedule) { |  | ||||||
| 			c.logger.Debugf("updating schedule of the backup cron job") |  | ||||||
| 			if err := c.syncLogicalBackupJob(); err != nil { | 			if err := c.syncLogicalBackupJob(); err != nil { | ||||||
| 				c.logger.Errorf("could not sync logical backup jobs: %v", err) | 				c.logger.Errorf("could not sync logical backup jobs: %v", err) | ||||||
| 				updateFailed = true | 				updateFailed = true | ||||||
|  | @ -1091,7 +1128,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// streams
 | 	// streams
 | ||||||
| 	if len(newSpec.Spec.Streams) > 0 { | 	if len(newSpec.Spec.Streams) > 0 || len(oldSpec.Spec.Streams) != len(newSpec.Spec.Streams) { | ||||||
| 		if err := c.syncStreams(); err != nil { | 		if err := c.syncStreams(); err != nil { | ||||||
| 			c.logger.Errorf("could not sync streams: %v", err) | 			c.logger.Errorf("could not sync streams: %v", err) | ||||||
| 			updateFailed = true | 			updateFailed = true | ||||||
|  | @ -1171,7 +1208,6 @@ func (c *Cluster) Delete() error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for _, role := range []PostgresRole{Master, Replica} { | 	for _, role := range []PostgresRole{Master, Replica} { | ||||||
| 
 |  | ||||||
| 		if !c.patroniKubernetesUseConfigMaps() { | 		if !c.patroniKubernetesUseConfigMaps() { | ||||||
| 			if err := c.deleteEndpoint(role); err != nil { | 			if err := c.deleteEndpoint(role); err != nil { | ||||||
| 				anyErrors = true | 				anyErrors = true | ||||||
|  | @ -1187,10 +1223,10 @@ func (c *Cluster) Delete() error { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := c.deletePatroniClusterObjects(); err != nil { | 	if err := c.deletePatroniResources(); err != nil { | ||||||
| 		anyErrors = true | 		anyErrors = true | ||||||
| 		c.logger.Warningf("could not remove leftover patroni objects; %v", err) | 		c.logger.Warningf("could not delete all Patroni resources: %v", err) | ||||||
| 		c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not remove leftover patroni objects; %v", err) | 		c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete all Patroni resources: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Delete connection pooler objects anyway, even if it's not mentioned in the
 | 	// Delete connection pooler objects anyway, even if it's not mentioned in the
 | ||||||
|  | @ -1722,96 +1758,3 @@ func (c *Cluster) Lock() { | ||||||
| func (c *Cluster) Unlock() { | func (c *Cluster) Unlock() { | ||||||
| 	c.mu.Unlock() | 	c.mu.Unlock() | ||||||
| } | } | ||||||
| 
 |  | ||||||
| type simpleActionWithResult func() |  | ||||||
| 
 |  | ||||||
| type clusterObjectGet func(name string) (spec.NamespacedName, error) |  | ||||||
| 
 |  | ||||||
| type clusterObjectDelete func(name string) error |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) deletePatroniClusterObjects() error { |  | ||||||
| 	// TODO: figure out how to remove leftover patroni objects in other cases
 |  | ||||||
| 	var actionsList []simpleActionWithResult |  | ||||||
| 
 |  | ||||||
| 	if !c.patroniUsesKubernetes() { |  | ||||||
| 		c.logger.Infof("not cleaning up Etcd Patroni objects on cluster delete") |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	actionsList = append(actionsList, c.deletePatroniClusterServices) |  | ||||||
| 	if c.patroniKubernetesUseConfigMaps() { |  | ||||||
| 		actionsList = append(actionsList, c.deletePatroniClusterConfigMaps) |  | ||||||
| 	} else { |  | ||||||
| 		actionsList = append(actionsList, c.deletePatroniClusterEndpoints) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	c.logger.Debugf("removing leftover Patroni objects (endpoints / services and configmaps)") |  | ||||||
| 	for _, deleter := range actionsList { |  | ||||||
| 		deleter() |  | ||||||
| 	} |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func deleteClusterObject( |  | ||||||
| 	get clusterObjectGet, |  | ||||||
| 	del clusterObjectDelete, |  | ||||||
| 	objType string, |  | ||||||
| 	clusterName string, |  | ||||||
| 	logger *logrus.Entry) { |  | ||||||
| 	for _, suffix := range patroniObjectSuffixes { |  | ||||||
| 		name := fmt.Sprintf("%s-%s", clusterName, suffix) |  | ||||||
| 
 |  | ||||||
| 		namespacedName, err := get(name) |  | ||||||
| 		if err == nil { |  | ||||||
| 			logger.Debugf("deleting %s %q", |  | ||||||
| 				objType, namespacedName) |  | ||||||
| 
 |  | ||||||
| 			if err = del(name); err != nil { |  | ||||||
| 				logger.Warningf("could not delete %s %q: %v", |  | ||||||
| 					objType, namespacedName, err) |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 		} else if !k8sutil.ResourceNotFound(err) { |  | ||||||
| 			logger.Warningf("could not fetch %s %q: %v", |  | ||||||
| 				objType, namespacedName, err) |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) deletePatroniClusterServices() { |  | ||||||
| 	get := func(name string) (spec.NamespacedName, error) { |  | ||||||
| 		svc, err := c.KubeClient.Services(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) |  | ||||||
| 		return util.NameFromMeta(svc.ObjectMeta), err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	deleteServiceFn := func(name string) error { |  | ||||||
| 		return c.KubeClient.Services(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	deleteClusterObject(get, deleteServiceFn, "service", c.Name, c.logger) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) deletePatroniClusterEndpoints() { |  | ||||||
| 	get := func(name string) (spec.NamespacedName, error) { |  | ||||||
| 		ep, err := c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) |  | ||||||
| 		return util.NameFromMeta(ep.ObjectMeta), err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	deleteEndpointFn := func(name string) error { |  | ||||||
| 		return c.KubeClient.Endpoints(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	deleteClusterObject(get, deleteEndpointFn, "endpoint", c.Name, c.logger) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) deletePatroniClusterConfigMaps() { |  | ||||||
| 	get := func(name string) (spec.NamespacedName, error) { |  | ||||||
| 		cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) |  | ||||||
| 		return util.NameFromMeta(cm.ObjectMeta), err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	deleteConfigMapFn := func(name string) error { |  | ||||||
| 		return c.KubeClient.ConfigMaps(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	deleteClusterObject(get, deleteConfigMapFn, "configmap", c.Name, c.logger) |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  | @ -18,9 +18,11 @@ import ( | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/config" | 	"github.com/zalando/postgres-operator/pkg/util/config" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/constants" | 	"github.com/zalando/postgres-operator/pkg/util/constants" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/k8sutil" | 	"github.com/zalando/postgres-operator/pkg/util/k8sutil" | ||||||
|  | 	"github.com/zalando/postgres-operator/pkg/util/patroni" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/teams" | 	"github.com/zalando/postgres-operator/pkg/util/teams" | ||||||
| 	batchv1 "k8s.io/api/batch/v1" | 	batchv1 "k8s.io/api/batch/v1" | ||||||
| 	v1 "k8s.io/api/core/v1" | 	v1 "k8s.io/api/core/v1" | ||||||
|  | 	"k8s.io/apimachinery/pkg/api/resource" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 	"k8s.io/client-go/kubernetes/fake" | 	"k8s.io/client-go/kubernetes/fake" | ||||||
| 	"k8s.io/client-go/tools/record" | 	"k8s.io/client-go/tools/record" | ||||||
|  | @ -1361,6 +1363,23 @@ func TestCompareServices(t *testing.T) { | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	serviceWithOwnerReference := newService( | ||||||
|  | 		map[string]string{ | ||||||
|  | 			constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", | ||||||
|  | 			constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, | ||||||
|  | 		}, | ||||||
|  | 		v1.ServiceTypeClusterIP, | ||||||
|  | 		[]string{"128.141.0.0/16", "137.138.0.0/16"}) | ||||||
|  | 
 | ||||||
|  | 	ownerRef := metav1.OwnerReference{ | ||||||
|  | 		APIVersion: "acid.zalan.do/v1", | ||||||
|  | 		Controller: boolToPointer(true), | ||||||
|  | 		Kind:       "Postgresql", | ||||||
|  | 		Name:       "clstr", | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	serviceWithOwnerReference.ObjectMeta.OwnerReferences = append(serviceWithOwnerReference.ObjectMeta.OwnerReferences, ownerRef) | ||||||
|  | 
 | ||||||
| 	tests := []struct { | 	tests := []struct { | ||||||
| 		about   string | 		about   string | ||||||
| 		current *v1.Service | 		current *v1.Service | ||||||
|  | @ -1444,203 +1463,16 @@ func TestCompareServices(t *testing.T) { | ||||||
| 			reason: `new service's LoadBalancerSourceRange does not match the current one`, | 			reason: `new service's LoadBalancerSourceRange does not match the current one`, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			about: "services differ on DNS annotation", | 			about: "new service doesn't have owner references", | ||||||
| 			current: newService( | 			current: newService( | ||||||
| 				map[string]string{ | 				map[string]string{ | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", | 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, | 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, | ||||||
| 				}, | 				}, | ||||||
| 				v1.ServiceTypeLoadBalancer, | 				v1.ServiceTypeClusterIP, | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "new_clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), | 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), | ||||||
|  | 			new:   serviceWithOwnerReference, | ||||||
| 			match: false, | 			match: false, | ||||||
| 			reason: `new service's annotations does not match the current one: "external-dns.alpha.kubernetes.io/hostname" changed from "clstr.acid.zalan.do" to "new_clstr.acid.zalan.do".`, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "services differ on AWS ELB annotation", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: "1800", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match:  false, |  | ||||||
| 			reason: `new service's annotations does not match the current one: "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout" changed from "3600" to "1800".`, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "service changes existing annotation", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"foo":                              "bar", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"foo":                              "baz", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match:  false, |  | ||||||
| 			reason: `new service's annotations does not match the current one: "foo" changed from "bar" to "baz".`, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "service changes multiple existing annotations", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"foo":                              "bar", |  | ||||||
| 					"bar":                              "foo", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"foo":                              "baz", |  | ||||||
| 					"bar":                              "fooz", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match: false, |  | ||||||
| 			// Test just the prefix to avoid flakiness and map sorting
 |  | ||||||
| 			reason: `new service's annotations does not match the current one:`, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "service adds a new custom annotation", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"foo":                              "bar", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match:  false, |  | ||||||
| 			reason: `new service's annotations does not match the current one: Added "foo" with value "bar".`, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "service removes a custom annotation", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"foo":                              "bar", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match:  false, |  | ||||||
| 			reason: `new service's annotations does not match the current one: Removed "foo".`, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "service removes a custom annotation and adds a new one", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"foo":                              "bar", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"bar":                              "foo", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match:  false, |  | ||||||
| 			reason: `new service's annotations does not match the current one: Removed "foo". Added "bar" with value "foo".`, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "service removes a custom annotation, adds a new one and change another", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"foo":                              "bar", |  | ||||||
| 					"zalan":                            "do", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 					"bar":                              "foo", |  | ||||||
| 					"zalan":                            "do.com", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match: false, |  | ||||||
| 			// Test just the prefix to avoid flakiness and map sorting
 |  | ||||||
| 			reason: `new service's annotations does not match the current one: Removed "foo".`, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "service add annotations", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", |  | ||||||
| 					constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match: false, |  | ||||||
| 			// Test just the prefix to avoid flakiness and map sorting
 |  | ||||||
| 			reason: `new service's annotations does not match the current one: Added `, |  | ||||||
| 		}, |  | ||||||
| 		{ |  | ||||||
| 			about: "ignored annotations", |  | ||||||
| 			current: newService( |  | ||||||
| 				map[string]string{}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			new: newService( |  | ||||||
| 				map[string]string{ |  | ||||||
| 					"k8s.v1.cni.cncf.io/network-status": "up", |  | ||||||
| 				}, |  | ||||||
| 				v1.ServiceTypeLoadBalancer, |  | ||||||
| 				[]string{"128.141.0.0/16", "137.138.0.0/16"}), |  | ||||||
| 			match: true, |  | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -1663,7 +1495,7 @@ func TestCompareServices(t *testing.T) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func newCronJob(image, schedule string, vars []v1.EnvVar) *batchv1.CronJob { | func newCronJob(image, schedule string, vars []v1.EnvVar, mounts []v1.VolumeMount) *batchv1.CronJob { | ||||||
| 	cron := &batchv1.CronJob{ | 	cron := &batchv1.CronJob{ | ||||||
| 		Spec: batchv1.CronJobSpec{ | 		Spec: batchv1.CronJobSpec{ | ||||||
| 			Schedule: schedule, | 			Schedule: schedule, | ||||||
|  | @ -1676,6 +1508,37 @@ func newCronJob(image, schedule string, vars []v1.EnvVar) *batchv1.CronJob { | ||||||
| 									Name:  "logical-backup", | 									Name:  "logical-backup", | ||||||
| 									Image: image, | 									Image: image, | ||||||
| 									Env:   vars, | 									Env:   vars, | ||||||
|  | 									Ports: []v1.ContainerPort{ | ||||||
|  | 										{ | ||||||
|  | 											ContainerPort: patroni.ApiPort, | ||||||
|  | 											Protocol:      v1.ProtocolTCP, | ||||||
|  | 										}, | ||||||
|  | 										{ | ||||||
|  | 											ContainerPort: pgPort, | ||||||
|  | 											Protocol:      v1.ProtocolTCP, | ||||||
|  | 										}, | ||||||
|  | 										{ | ||||||
|  | 											ContainerPort: operatorPort, | ||||||
|  | 											Protocol:      v1.ProtocolTCP, | ||||||
|  | 										}, | ||||||
|  | 									}, | ||||||
|  | 									Resources: v1.ResourceRequirements{ | ||||||
|  | 										Requests: v1.ResourceList{ | ||||||
|  | 											v1.ResourceCPU:    resource.MustParse("100m"), | ||||||
|  | 											v1.ResourceMemory: resource.MustParse("100Mi"), | ||||||
|  | 										}, | ||||||
|  | 										Limits: v1.ResourceList{ | ||||||
|  | 											v1.ResourceCPU:    resource.MustParse("100m"), | ||||||
|  | 											v1.ResourceMemory: resource.MustParse("100Mi"), | ||||||
|  | 										}, | ||||||
|  | 									}, | ||||||
|  | 									SecurityContext: &v1.SecurityContext{ | ||||||
|  | 										AllowPrivilegeEscalation: nil, | ||||||
|  | 										Privileged:               util.False(), | ||||||
|  | 										ReadOnlyRootFilesystem:   util.False(), | ||||||
|  | 										Capabilities:             nil, | ||||||
|  | 									}, | ||||||
|  | 									VolumeMounts: mounts, | ||||||
| 								}, | 								}, | ||||||
| 							}, | 							}, | ||||||
| 						}, | 						}, | ||||||
|  | @ -1692,37 +1555,110 @@ func TestCompareLogicalBackupJob(t *testing.T) { | ||||||
| 	img1 := "registry.opensource.zalan.do/acid/logical-backup:v1.0" | 	img1 := "registry.opensource.zalan.do/acid/logical-backup:v1.0" | ||||||
| 	img2 := "registry.opensource.zalan.do/acid/logical-backup:v2.0" | 	img2 := "registry.opensource.zalan.do/acid/logical-backup:v2.0" | ||||||
| 
 | 
 | ||||||
|  | 	clientSet := fake.NewSimpleClientset() | ||||||
|  | 	acidClientSet := fakeacidv1.NewSimpleClientset() | ||||||
|  | 	namespace := "default" | ||||||
|  | 
 | ||||||
|  | 	client := k8sutil.KubernetesClient{ | ||||||
|  | 		CronJobsGetter:    clientSet.BatchV1(), | ||||||
|  | 		PostgresqlsGetter: acidClientSet.AcidV1(), | ||||||
|  | 	} | ||||||
|  | 	pg := acidv1.Postgresql{ | ||||||
|  | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
|  | 			Name:      "acid-cron-cluster", | ||||||
|  | 			Namespace: namespace, | ||||||
|  | 		}, | ||||||
|  | 		Spec: acidv1.PostgresSpec{ | ||||||
|  | 			Volume: acidv1.Volume{ | ||||||
|  | 				Size: "1Gi", | ||||||
|  | 			}, | ||||||
|  | 			EnableLogicalBackup:    true, | ||||||
|  | 			LogicalBackupSchedule:  "0 0 * * *", | ||||||
|  | 			LogicalBackupRetention: "3 months", | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	var cluster = New( | ||||||
|  | 		Config{ | ||||||
|  | 			OpConfig: config.Config{ | ||||||
|  | 				PodManagementPolicy: "ordered_ready", | ||||||
|  | 				Resources: config.Resources{ | ||||||
|  | 					ClusterLabels:        map[string]string{"application": "spilo"}, | ||||||
|  | 					ClusterNameLabel:     "cluster-name", | ||||||
|  | 					DefaultCPURequest:    "300m", | ||||||
|  | 					DefaultCPULimit:      "300m", | ||||||
|  | 					DefaultMemoryRequest: "300Mi", | ||||||
|  | 					DefaultMemoryLimit:   "300Mi", | ||||||
|  | 					PodRoleLabel:         "spilo-role", | ||||||
|  | 				}, | ||||||
|  | 				LogicalBackup: config.LogicalBackup{ | ||||||
|  | 					LogicalBackupSchedule:                 "30 00 * * *", | ||||||
|  | 					LogicalBackupDockerImage:              img1, | ||||||
|  | 					LogicalBackupJobPrefix:                "logical-backup-", | ||||||
|  | 					LogicalBackupCPURequest:               "100m", | ||||||
|  | 					LogicalBackupCPULimit:                 "100m", | ||||||
|  | 					LogicalBackupMemoryRequest:            "100Mi", | ||||||
|  | 					LogicalBackupMemoryLimit:              "100Mi", | ||||||
|  | 					LogicalBackupProvider:                 "s3", | ||||||
|  | 					LogicalBackupS3Bucket:                 "testBucket", | ||||||
|  | 					LogicalBackupS3BucketPrefix:           "spilo", | ||||||
|  | 					LogicalBackupS3Region:                 "eu-central-1", | ||||||
|  | 					LogicalBackupS3Endpoint:               "https://s3.amazonaws.com", | ||||||
|  | 					LogicalBackupS3AccessKeyID:            "access", | ||||||
|  | 					LogicalBackupS3SecretAccessKey:        "secret", | ||||||
|  | 					LogicalBackupS3SSE:                    "aws:kms", | ||||||
|  | 					LogicalBackupS3RetentionTime:          "3 months", | ||||||
|  | 					LogicalBackupCronjobEnvironmentSecret: "", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, client, pg, logger, eventRecorder) | ||||||
|  | 
 | ||||||
|  | 	desiredCronJob, err := cluster.generateLogicalBackupJob() | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Errorf("Could not generate logical backup job with error: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	err = cluster.createLogicalBackupJob() | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Errorf("Could not create logical backup job with error: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	currentCronJob, err := cluster.KubeClient.CronJobs(namespace).Get(context.TODO(), cluster.getLogicalBackupJobName(), metav1.GetOptions{}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Errorf("Could not create logical backup job with error: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	tests := []struct { | 	tests := []struct { | ||||||
| 		about   string | 		about   string | ||||||
| 		current *batchv1.CronJob | 		cronjob *batchv1.CronJob | ||||||
| 		new     *batchv1.CronJob |  | ||||||
| 		match   bool | 		match   bool | ||||||
| 		reason  string | 		reason  string | ||||||
| 	}{ | 	}{ | ||||||
| 		{ | 		{ | ||||||
| 			about:   "two equal cronjobs", | 			about:   "two equal cronjobs", | ||||||
| 			current: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}), | 			cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}, []v1.VolumeMount{}), | ||||||
| 			new:     newCronJob(img1, "0 0 * * *", []v1.EnvVar{}), |  | ||||||
| 			match:   true, | 			match:   true, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			about:   "two cronjobs with different image", | 			about:   "two cronjobs with different image", | ||||||
| 			current: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}), | 			cronjob: newCronJob(img2, "0 0 * * *", []v1.EnvVar{}, []v1.VolumeMount{}), | ||||||
| 			new:     newCronJob(img2, "0 0 * * *", []v1.EnvVar{}), |  | ||||||
| 			match:   false, | 			match:   false, | ||||||
| 			reason:  fmt.Sprintf("new job's image %q does not match the current one %q", img2, img1), | 			reason:  fmt.Sprintf("new job's image %q does not match the current one %q", img2, img1), | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			about:   "two cronjobs with different schedule", | 			about:   "two cronjobs with different schedule", | ||||||
| 			current: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}), | 			cronjob: newCronJob(img1, "0 * * * *", []v1.EnvVar{}, []v1.VolumeMount{}), | ||||||
| 			new:     newCronJob(img1, "0 * * * *", []v1.EnvVar{}), |  | ||||||
| 			match:   false, | 			match:   false, | ||||||
| 			reason:  fmt.Sprintf("new job's schedule %q does not match the current one %q", "0 * * * *", "0 0 * * *"), | 			reason:  fmt.Sprintf("new job's schedule %q does not match the current one %q", "0 * * * *", "0 0 * * *"), | ||||||
| 		}, | 		}, | ||||||
|  | 		{ | ||||||
|  | 			about:   "two cronjobs with empty and nil volume mounts", | ||||||
|  | 			cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}, nil), | ||||||
|  | 			match:   true, | ||||||
|  | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			about:   "two cronjobs with different environment variables", | 			about:   "two cronjobs with different environment variables", | ||||||
| 			current: newCronJob(img1, "0 0 * * *", []v1.EnvVar{{Name: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", Value: "spilo"}}), | 			cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{{Name: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", Value: "logical-backup"}}, []v1.VolumeMount{}), | ||||||
| 			new:     newCronJob(img1, "0 0 * * *", []v1.EnvVar{{Name: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", Value: "logical-backup"}}), |  | ||||||
| 			match:   false, | 			match:   false, | ||||||
| 			reason:  "logical backup container specs do not match: new cronjob container's logical-backup (index 0) environment does not match the current one", | 			reason:  "logical backup container specs do not match: new cronjob container's logical-backup (index 0) environment does not match the current one", | ||||||
| 		}, | 		}, | ||||||
|  | @ -1730,9 +1666,21 @@ func TestCompareLogicalBackupJob(t *testing.T) { | ||||||
| 
 | 
 | ||||||
| 	for _, tt := range tests { | 	for _, tt := range tests { | ||||||
| 		t.Run(tt.about, func(t *testing.T) { | 		t.Run(tt.about, func(t *testing.T) { | ||||||
| 			match, reason := cl.compareLogicalBackupJob(tt.current, tt.new) | 			desiredCronJob.Spec.Schedule = tt.cronjob.Spec.Schedule | ||||||
|  | 			desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image = tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image | ||||||
|  | 			desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts | ||||||
|  | 
 | ||||||
|  | 			for _, testEnv := range tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env { | ||||||
|  | 				for i, env := range desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env { | ||||||
|  | 					if env.Name == testEnv.Name { | ||||||
|  | 						desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env[i] = testEnv | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			match, reason := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob) | ||||||
| 			if match != tt.match { | 			if match != tt.match { | ||||||
| 				t.Errorf("%s - unexpected match result %t when comparing cronjobs %q and %q", t.Name(), match, tt.current, tt.new) | 				t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), match, currentCronJob, desiredCronJob) | ||||||
| 			} else { | 			} else { | ||||||
| 				if !strings.HasPrefix(reason, tt.reason) { | 				if !strings.HasPrefix(reason, tt.reason) { | ||||||
| 					t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason) | 					t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason) | ||||||
|  | @ -1927,3 +1875,183 @@ func TestComparePorts(t *testing.T) { | ||||||
| 		}) | 		}) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func TestCompareVolumeMounts(t *testing.T) { | ||||||
|  | 	testCases := []struct { | ||||||
|  | 		name     string | ||||||
|  | 		mountsA  []v1.VolumeMount | ||||||
|  | 		mountsB  []v1.VolumeMount | ||||||
|  | 		expected bool | ||||||
|  | 	}{ | ||||||
|  | 		{ | ||||||
|  | 			name:     "empty vs nil", | ||||||
|  | 			mountsA:  []v1.VolumeMount{}, | ||||||
|  | 			mountsB:  nil, | ||||||
|  | 			expected: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:     "both empty", | ||||||
|  | 			mountsA:  []v1.VolumeMount{}, | ||||||
|  | 			mountsB:  []v1.VolumeMount{}, | ||||||
|  | 			expected: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "same mounts", | ||||||
|  | 			mountsA: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			mountsB: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "different mounts", | ||||||
|  | 			mountsA: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:        "data", | ||||||
|  | 					ReadOnly:    false, | ||||||
|  | 					MountPath:   "/data", | ||||||
|  | 					SubPathExpr: "$(POD_NAME)", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			mountsB: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: false, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "one equal mount one different", | ||||||
|  | 			mountsA: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					Name:        "poddata", | ||||||
|  | 					ReadOnly:    false, | ||||||
|  | 					MountPath:   "/poddata", | ||||||
|  | 					SubPathExpr: "$(POD_NAME)", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			mountsB: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					Name:      "etc", | ||||||
|  | 					ReadOnly:  true, | ||||||
|  | 					MountPath: "/etc", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: false, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "same mounts, different order", | ||||||
|  | 			mountsA: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					Name:      "etc", | ||||||
|  | 					ReadOnly:  true, | ||||||
|  | 					MountPath: "/etc", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			mountsB: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "etc", | ||||||
|  | 					ReadOnly:  true, | ||||||
|  | 					MountPath: "/etc", | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "new mounts added", | ||||||
|  | 			mountsA: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			mountsB: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "etc", | ||||||
|  | 					ReadOnly:  true, | ||||||
|  | 					MountPath: "/etc", | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: false, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "one mount removed", | ||||||
|  | 			mountsA: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					Name:      "etc", | ||||||
|  | 					ReadOnly:  true, | ||||||
|  | 					MountPath: "/etc", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			mountsB: []v1.VolumeMount{ | ||||||
|  | 				{ | ||||||
|  | 					Name:      "data", | ||||||
|  | 					ReadOnly:  false, | ||||||
|  | 					MountPath: "/data", | ||||||
|  | 					SubPath:   "subdir", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: false, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, tt := range testCases { | ||||||
|  | 		t.Run(tt.name, func(t *testing.T) { | ||||||
|  | 			got := compareVolumeMounts(tt.mountsA, tt.mountsB) | ||||||
|  | 			assert.Equal(t, tt.expected, got) | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -3,6 +3,7 @@ package cluster | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
|  | 	"reflect" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"time" | 	"time" | ||||||
| 
 | 
 | ||||||
|  | @ -654,7 +655,7 @@ func (c *Cluster) deleteConnectionPoolerSecret() (err error) { | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		c.logger.Debugf("could not get connection pooler secret %s: %v", secretName, err) | 		c.logger.Debugf("could not get connection pooler secret %s: %v", secretName, err) | ||||||
| 	} else { | 	} else { | ||||||
| 		if err = c.deleteSecret(secret.UID, *secret); err != nil { | 		if err = c.deleteSecret(secret.UID); err != nil { | ||||||
| 			return fmt.Errorf("could not delete pooler secret: %v", err) | 			return fmt.Errorf("could not delete pooler secret: %v", err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -663,11 +664,19 @@ func (c *Cluster) deleteConnectionPoolerSecret() (err error) { | ||||||
| 
 | 
 | ||||||
| // Perform actual patching of a connection pooler deployment, assuming that all
 | // Perform actual patching of a connection pooler deployment, assuming that all
 | ||||||
| // the check were already done before.
 | // the check were already done before.
 | ||||||
| func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) { | func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment, doUpdate bool) (*appsv1.Deployment, error) { | ||||||
| 	if newDeployment == nil { | 	if newDeployment == nil { | ||||||
| 		return nil, fmt.Errorf("there is no connection pooler in the cluster") | 		return nil, fmt.Errorf("there is no connection pooler in the cluster") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if doUpdate { | ||||||
|  | 		updatedDeployment, err := KubeClient.Deployments(newDeployment.Namespace).Update(context.TODO(), newDeployment, metav1.UpdateOptions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, fmt.Errorf("could not update pooler deployment to match desired state: %v", err) | ||||||
|  | 		} | ||||||
|  | 		return updatedDeployment, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	patchData, err := specPatch(newDeployment.Spec) | 	patchData, err := specPatch(newDeployment.Spec) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, fmt.Errorf("could not form patch for the connection pooler deployment: %v", err) | 		return nil, fmt.Errorf("could not form patch for the connection pooler deployment: %v", err) | ||||||
|  | @ -691,8 +700,8 @@ func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDe | ||||||
| 	return deployment, nil | 	return deployment, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment
 | // patchConnectionPoolerAnnotations updates the annotations of connection pooler deployment
 | ||||||
| func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, deployment *appsv1.Deployment, annotations map[string]string) (*appsv1.Deployment, error) { | func patchConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, deployment *appsv1.Deployment, annotations map[string]string) (*appsv1.Deployment, error) { | ||||||
| 	patchData, err := metaAnnotationsPatch(annotations) | 	patchData, err := metaAnnotationsPatch(annotations) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, fmt.Errorf("could not form patch for the connection pooler deployment metadata: %v", err) | 		return nil, fmt.Errorf("could not form patch for the connection pooler deployment metadata: %v", err) | ||||||
|  | @ -751,6 +760,7 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1. | ||||||
| 	if spec == nil { | 	if spec == nil { | ||||||
| 		spec = &acidv1.ConnectionPooler{} | 		spec = &acidv1.ConnectionPooler{} | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
| 	if spec.NumberOfInstances == nil && | 	if spec.NumberOfInstances == nil && | ||||||
| 		*deployment.Spec.Replicas != *config.NumberOfInstances { | 		*deployment.Spec.Replicas != *config.NumberOfInstances { | ||||||
| 
 | 
 | ||||||
|  | @ -1014,18 +1024,30 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql | ||||||
| 			newConnectionPooler = &acidv1.ConnectionPooler{} | 			newConnectionPooler = &acidv1.ConnectionPooler{} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		var specSync bool | 		var specSync, updateDeployment bool | ||||||
| 		var specReason []string | 		var specReason []string | ||||||
| 
 | 
 | ||||||
|  | 		if !reflect.DeepEqual(deployment.ObjectMeta.OwnerReferences, c.ownerReferences()) { | ||||||
|  | 			c.logger.Info("new connection pooler owner references do not match the current ones") | ||||||
|  | 			updateDeployment = true | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
| 		if oldSpec != nil { | 		if oldSpec != nil { | ||||||
| 			specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger) | 			specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger) | ||||||
| 			syncReason = append(syncReason, specReason...) | 			syncReason = append(syncReason, specReason...) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | 		newPodAnnotations := c.annotationsSet(c.generatePodAnnotations(&c.Spec)) | ||||||
|  | 		if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations); changed { | ||||||
|  | 			specSync = true | ||||||
|  | 			syncReason = append(syncReason, []string{"new connection pooler's pod template annotations do not match the current ones: " + reason}...) | ||||||
|  | 			deployment.Spec.Template.Annotations = newPodAnnotations | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
| 		defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment) | 		defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment) | ||||||
| 		syncReason = append(syncReason, defaultsReason...) | 		syncReason = append(syncReason, defaultsReason...) | ||||||
| 
 | 
 | ||||||
| 		if specSync || defaultsSync { | 		if specSync || defaultsSync || updateDeployment { | ||||||
| 			c.logger.Infof("update connection pooler deployment %s, reason: %+v", | 			c.logger.Infof("update connection pooler deployment %s, reason: %+v", | ||||||
| 				c.connectionPoolerName(role), syncReason) | 				c.connectionPoolerName(role), syncReason) | ||||||
| 			newDeployment, err = c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) | 			newDeployment, err = c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) | ||||||
|  | @ -1033,23 +1055,23 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql | ||||||
| 				return syncReason, fmt.Errorf("could not generate deployment for connection pooler: %v", err) | 				return syncReason, fmt.Errorf("could not generate deployment for connection pooler: %v", err) | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			deployment, err = updateConnectionPoolerDeployment(c.KubeClient, newDeployment) | 			deployment, err = updateConnectionPoolerDeployment(c.KubeClient, newDeployment, updateDeployment) | ||||||
| 
 | 
 | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return syncReason, err | 				return syncReason, err | ||||||
| 			} | 			} | ||||||
| 			c.ConnectionPooler[role].Deployment = deployment | 			c.ConnectionPooler[role].Deployment = deployment | ||||||
| 		} | 		} | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(c.ConnectionPooler[role].Deployment.Annotations)) | 		newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(nil)) // including the downscaling annotations
 | ||||||
| 	if newAnnotations != nil { | 		if changed, _ := c.compareAnnotations(deployment.Annotations, newAnnotations); changed { | ||||||
| 		deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations) | 			deployment, err = patchConnectionPoolerAnnotations(c.KubeClient, deployment, newAnnotations) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return nil, err | 				return nil, err | ||||||
| 			} | 			} | ||||||
| 			c.ConnectionPooler[role].Deployment = deployment | 			c.ConnectionPooler[role].Deployment = deployment | ||||||
| 		} | 		} | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	// check if pooler pods must be replaced due to secret update
 | 	// check if pooler pods must be replaced due to secret update
 | ||||||
| 	listOptions := metav1.ListOptions{ | 	listOptions := metav1.ListOptions{ | ||||||
|  | @ -1076,22 +1098,26 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return nil, fmt.Errorf("could not delete pooler pod: %v", err) | 				return nil, fmt.Errorf("could not delete pooler pod: %v", err) | ||||||
| 			} | 			} | ||||||
|  | 		} else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations); changed { | ||||||
|  | 			patchData, err := metaAnnotationsPatch(deployment.Spec.Template.Annotations) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return nil, fmt.Errorf("could not form patch for pooler's pod annotations: %v", err) | ||||||
|  | 			} | ||||||
|  | 			_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return nil, fmt.Errorf("could not patch annotations for pooler's pod %q: %v", pod.Name, err) | ||||||
|  | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if service, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}); err == nil { | 	if service, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}); err == nil { | ||||||
| 		c.ConnectionPooler[role].Service = service | 		c.ConnectionPooler[role].Service = service | ||||||
| 		desiredSvc := c.generateConnectionPoolerService(c.ConnectionPooler[role]) | 		desiredSvc := c.generateConnectionPoolerService(c.ConnectionPooler[role]) | ||||||
| 		if match, reason := c.compareServices(service, desiredSvc); !match { |  | ||||||
| 			syncReason = append(syncReason, reason) |  | ||||||
| 			c.logServiceChanges(role, service, desiredSvc, false, reason) |  | ||||||
| 		newService, err = c.updateService(role, service, desiredSvc) | 		newService, err = c.updateService(role, service, desiredSvc) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return syncReason, fmt.Errorf("could not update %s service to match desired state: %v", role, err) | 			return syncReason, fmt.Errorf("could not update %s service to match desired state: %v", role, err) | ||||||
| 		} | 		} | ||||||
| 		c.ConnectionPooler[role].Service = newService | 		c.ConnectionPooler[role].Service = newService | ||||||
| 			c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta)) |  | ||||||
| 		} |  | ||||||
| 		return NoSync, nil | 		return NoSync, nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1077,6 +1077,9 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { | ||||||
| 					ConnectionPoolerDefaultMemoryRequest: "100Mi", | 					ConnectionPoolerDefaultMemoryRequest: "100Mi", | ||||||
| 					ConnectionPoolerDefaultMemoryLimit:   "100Mi", | 					ConnectionPoolerDefaultMemoryLimit:   "100Mi", | ||||||
| 				}, | 				}, | ||||||
|  | 				Resources: config.Resources{ | ||||||
|  | 					EnableOwnerReferences: util.True(), | ||||||
|  | 				}, | ||||||
| 			}, | 			}, | ||||||
| 		}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) | 		}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) | ||||||
| 	cluster.Statefulset = &appsv1.StatefulSet{ | 	cluster.Statefulset = &appsv1.StatefulSet{ | ||||||
|  |  | ||||||
|  | @ -46,12 +46,15 @@ const ( | ||||||
| 	createExtensionSQL      = `CREATE EXTENSION IF NOT EXISTS "%s" SCHEMA "%s"` | 	createExtensionSQL      = `CREATE EXTENSION IF NOT EXISTS "%s" SCHEMA "%s"` | ||||||
| 	alterExtensionSQL       = `ALTER EXTENSION "%s" SET SCHEMA "%s"` | 	alterExtensionSQL       = `ALTER EXTENSION "%s" SET SCHEMA "%s"` | ||||||
| 
 | 
 | ||||||
| 	getPublicationsSQL = `SELECT p.pubname, string_agg(pt.schemaname || '.' || pt.tablename, ', ' ORDER BY pt.schemaname, pt.tablename) | 	getPublicationsSQL = `SELECT p.pubname, COALESCE(string_agg(pt.schemaname || '.' || pt.tablename, ', ' ORDER BY pt.schemaname, pt.tablename), '') AS pubtables | ||||||
| 	        FROM pg_publication p | 	        FROM pg_publication p | ||||||
| 			LEFT JOIN pg_publication_tables pt ON pt.pubname = p.pubname | 			LEFT JOIN pg_publication_tables pt ON pt.pubname = p.pubname | ||||||
|  | 			WHERE p.pubowner = 'postgres'::regrole | ||||||
|  | 			AND p.pubname LIKE 'fes_%' | ||||||
| 			GROUP BY p.pubname;` | 			GROUP BY p.pubname;` | ||||||
| 	createPublicationSQL = `CREATE PUBLICATION "%s" FOR TABLE %s WITH (publish = 'insert, update');` | 	createPublicationSQL = `CREATE PUBLICATION "%s" FOR TABLE %s WITH (publish = 'insert, update');` | ||||||
| 	alterPublicationSQL  = `ALTER PUBLICATION "%s" SET TABLE %s;` | 	alterPublicationSQL  = `ALTER PUBLICATION "%s" SET TABLE %s;` | ||||||
|  | 	dropPublicationSQL   = `DROP PUBLICATION "%s";` | ||||||
| 
 | 
 | ||||||
| 	globalDefaultPrivilegesSQL = `SET ROLE TO "%s"; | 	globalDefaultPrivilegesSQL = `SET ROLE TO "%s"; | ||||||
| 			ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO "%s","%s"; | 			ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO "%s","%s"; | ||||||
|  | @ -628,6 +631,14 @@ func (c *Cluster) getPublications() (publications map[string]string, err error) | ||||||
| 	return dbPublications, err | 	return dbPublications, err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func (c *Cluster) executeDropPublication(pubName string) error { | ||||||
|  | 	c.logger.Infof("dropping publication %q", pubName) | ||||||
|  | 	if _, err := c.pgDb.Exec(fmt.Sprintf(dropPublicationSQL, pubName)); err != nil { | ||||||
|  | 		return fmt.Errorf("could not execute drop publication: %v", err) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // executeCreatePublication creates new publication for given tables
 | // executeCreatePublication creates new publication for given tables
 | ||||||
| // The caller is responsible for opening and closing the database connection.
 | // The caller is responsible for opening and closing the database connection.
 | ||||||
| func (c *Cluster) executeCreatePublication(pubName, tableList string) error { | func (c *Cluster) executeCreatePublication(pubName, tableList string) error { | ||||||
|  |  | ||||||
|  | @ -59,7 +59,7 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) ( | ||||||
| 		return "", fmt.Errorf("failed to init executor: %v", err) | 		return "", fmt.Errorf("failed to init executor: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	err = exec.Stream(remotecommand.StreamOptions{ | 	err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ | ||||||
| 		Stdout: &execOut, | 		Stdout: &execOut, | ||||||
| 		Stderr: &execErr, | 		Stderr: &execErr, | ||||||
| 		Tty:    false, | 		Tty:    false, | ||||||
|  |  | ||||||
|  | @ -47,11 +47,6 @@ const ( | ||||||
| 	operatorPort                   = 8080 | 	operatorPort                   = 8080 | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type pgUser struct { |  | ||||||
| 	Password string   `json:"password"` |  | ||||||
| 	Options  []string `json:"options"` |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type patroniDCS struct { | type patroniDCS struct { | ||||||
| 	TTL                      uint32                       `json:"ttl,omitempty"` | 	TTL                      uint32                       `json:"ttl,omitempty"` | ||||||
| 	LoopWait                 uint32                       `json:"loop_wait,omitempty"` | 	LoopWait                 uint32                       `json:"loop_wait,omitempty"` | ||||||
|  | @ -79,19 +74,13 @@ func (c *Cluster) statefulSetName() string { | ||||||
| 	return c.Name | 	return c.Name | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) endpointName(role PostgresRole) string { |  | ||||||
| 	name := c.Name |  | ||||||
| 	if role == Replica { |  | ||||||
| 		name = fmt.Sprintf("%s-%s", name, "repl") |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return name |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) serviceName(role PostgresRole) string { | func (c *Cluster) serviceName(role PostgresRole) string { | ||||||
| 	name := c.Name | 	name := c.Name | ||||||
| 	if role == Replica { | 	switch role { | ||||||
|  | 	case Replica: | ||||||
| 		name = fmt.Sprintf("%s-%s", name, "repl") | 		name = fmt.Sprintf("%s-%s", name, "repl") | ||||||
|  | 	case Patroni: | ||||||
|  | 		name = fmt.Sprintf("%s-%s", name, "config") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return name | 	return name | ||||||
|  | @ -892,7 +881,7 @@ func (c *Cluster) generatePodTemplate( | ||||||
| 		addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath) | 		addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if additionalVolumes != nil { | 	if len(additionalVolumes) > 0 { | ||||||
| 		c.addAdditionalVolumes(&podSpec, additionalVolumes) | 		c.addAdditionalVolumes(&podSpec, additionalVolumes) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -1534,6 +1523,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef | ||||||
| 			Namespace:       c.Namespace, | 			Namespace:       c.Namespace, | ||||||
| 			Labels:          c.labelsSet(true), | 			Labels:          c.labelsSet(true), | ||||||
| 			Annotations:     c.AnnotationsToPropagate(c.annotationsSet(nil)), | 			Annotations:     c.AnnotationsToPropagate(c.annotationsSet(nil)), | ||||||
|  | 			OwnerReferences: c.ownerReferences(), | ||||||
| 		}, | 		}, | ||||||
| 		Spec: appsv1.StatefulSetSpec{ | 		Spec: appsv1.StatefulSetSpec{ | ||||||
| 			Replicas:                             &numberOfInstances, | 			Replicas:                             &numberOfInstances, | ||||||
|  | @ -1929,12 +1919,21 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) | ||||||
| 		lbls = c.connectionPoolerLabels("", false).MatchLabels | 		lbls = c.connectionPoolerLabels("", false).MatchLabels | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	// if secret lives in another namespace we cannot set ownerReferences
 | ||||||
|  | 	var ownerReferences []metav1.OwnerReference | ||||||
|  | 	if c.Config.OpConfig.EnableCrossNamespaceSecret && strings.Contains(username, ".") { | ||||||
|  | 		ownerReferences = nil | ||||||
|  | 	} else { | ||||||
|  | 		ownerReferences = c.ownerReferences() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	secret := v1.Secret{ | 	secret := v1.Secret{ | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name:            c.credentialSecretName(username), | 			Name:            c.credentialSecretName(username), | ||||||
| 			Namespace:       pgUser.Namespace, | 			Namespace:       pgUser.Namespace, | ||||||
| 			Labels:          lbls, | 			Labels:          lbls, | ||||||
| 			Annotations:     c.annotationsSet(nil), | 			Annotations:     c.annotationsSet(nil), | ||||||
|  | 			OwnerReferences: ownerReferences, | ||||||
| 		}, | 		}, | ||||||
| 		Type: v1.SecretTypeOpaque, | 		Type: v1.SecretTypeOpaque, | ||||||
| 		Data: map[string][]byte{ | 		Data: map[string][]byte{ | ||||||
|  | @ -1996,6 +1995,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) | ||||||
| 			Namespace:       c.Namespace, | 			Namespace:       c.Namespace, | ||||||
| 			Labels:          c.roleLabelsSet(true, role), | 			Labels:          c.roleLabelsSet(true, role), | ||||||
| 			Annotations:     c.annotationsSet(c.generateServiceAnnotations(role, spec)), | 			Annotations:     c.annotationsSet(c.generateServiceAnnotations(role, spec)), | ||||||
|  | 			OwnerReferences: c.ownerReferences(), | ||||||
| 		}, | 		}, | ||||||
| 		Spec: serviceSpec, | 		Spec: serviceSpec, | ||||||
| 	} | 	} | ||||||
|  | @ -2061,9 +2061,11 @@ func (c *Cluster) getCustomServiceAnnotations(role PostgresRole, spec *acidv1.Po | ||||||
| func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints { | func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints { | ||||||
| 	endpoints := &v1.Endpoints{ | 	endpoints := &v1.Endpoints{ | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name:      c.endpointName(role), | 			Name:            c.serviceName(role), | ||||||
| 			Namespace:       c.Namespace, | 			Namespace:       c.Namespace, | ||||||
|  | 			Annotations:     c.annotationsSet(nil), | ||||||
| 			Labels:          c.roleLabelsSet(true, role), | 			Labels:          c.roleLabelsSet(true, role), | ||||||
|  | 			OwnerReferences: c.ownerReferences(), | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 	if len(subsets) > 0 { | 	if len(subsets) > 0 { | ||||||
|  | @ -2228,6 +2230,7 @@ func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget { | ||||||
| 			Namespace:       c.Namespace, | 			Namespace:       c.Namespace, | ||||||
| 			Labels:          c.labelsSet(true), | 			Labels:          c.labelsSet(true), | ||||||
| 			Annotations:     c.annotationsSet(nil), | 			Annotations:     c.annotationsSet(nil), | ||||||
|  | 			OwnerReferences: c.ownerReferences(), | ||||||
| 		}, | 		}, | ||||||
| 		Spec: policyv1.PodDisruptionBudgetSpec{ | 		Spec: policyv1.PodDisruptionBudgetSpec{ | ||||||
| 			MinAvailable: &minAvailable, | 			MinAvailable: &minAvailable, | ||||||
|  | @ -2364,6 +2367,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { | ||||||
| 			Namespace:       c.Namespace, | 			Namespace:       c.Namespace, | ||||||
| 			Labels:          c.labelsSet(true), | 			Labels:          c.labelsSet(true), | ||||||
| 			Annotations:     c.annotationsSet(nil), | 			Annotations:     c.annotationsSet(nil), | ||||||
|  | 			OwnerReferences: c.ownerReferences(), | ||||||
| 		}, | 		}, | ||||||
| 		Spec: batchv1.CronJobSpec{ | 		Spec: batchv1.CronJobSpec{ | ||||||
| 			Schedule:          schedule, | 			Schedule:          schedule, | ||||||
|  | @ -2477,7 +2481,9 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 	case "gcs": | 	case "gcs": | ||||||
|  | 		if c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials != "" { | ||||||
| 			envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials}) | 			envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials}) | ||||||
|  | 		} | ||||||
| 
 | 
 | ||||||
| 	case "az": | 	case "az": | ||||||
| 		envVars = appendEnvVars(envVars, []v1.EnvVar{ | 		envVars = appendEnvVars(envVars, []v1.EnvVar{ | ||||||
|  | @ -2488,11 +2494,11 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { | ||||||
| 			{ | 			{ | ||||||
| 				Name:  "LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER", | 				Name:  "LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER", | ||||||
| 				Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageContainer, | 				Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageContainer, | ||||||
| 			}, |  | ||||||
| 			{ |  | ||||||
| 				Name:  "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY", |  | ||||||
| 				Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountKey, |  | ||||||
| 			}}...) | 			}}...) | ||||||
|  | 
 | ||||||
|  | 		if c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountKey != "" { | ||||||
|  | 			envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountKey}) | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return envVars | 	return envVars | ||||||
|  | @ -2518,24 +2524,28 @@ func (c *Cluster) getLogicalBackupJobName() (jobName string) { | ||||||
| // survived, we can't delete an object because it will affect the functioning
 | // survived, we can't delete an object because it will affect the functioning
 | ||||||
| // cluster).
 | // cluster).
 | ||||||
| func (c *Cluster) ownerReferences() []metav1.OwnerReference { | func (c *Cluster) ownerReferences() []metav1.OwnerReference { | ||||||
| 	controller := true | 	currentOwnerReferences := c.ObjectMeta.OwnerReferences | ||||||
| 
 | 	if c.OpConfig.EnableOwnerReferences == nil || !*c.OpConfig.EnableOwnerReferences { | ||||||
| 	if c.Statefulset == nil { | 		return currentOwnerReferences | ||||||
| 		c.logger.Warning("Cannot get owner reference, no statefulset") |  | ||||||
| 		return []metav1.OwnerReference{} |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return []metav1.OwnerReference{ | 	for _, ownerRef := range currentOwnerReferences { | ||||||
| 		{ | 		if ownerRef.UID == c.Postgresql.ObjectMeta.UID { | ||||||
| 			UID:        c.Statefulset.ObjectMeta.UID, | 			return currentOwnerReferences | ||||||
| 			APIVersion: "apps/v1", |  | ||||||
| 			Kind:       "StatefulSet", |  | ||||||
| 			Name:       c.Statefulset.ObjectMeta.Name, |  | ||||||
| 			Controller: &controller, |  | ||||||
| 		}, |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	controllerReference := metav1.OwnerReference{ | ||||||
|  | 		UID:        c.Postgresql.ObjectMeta.UID, | ||||||
|  | 		APIVersion: acidv1.SchemeGroupVersion.Identifier(), | ||||||
|  | 		Kind:       acidv1.PostgresCRDResourceKind, | ||||||
|  | 		Name:       c.Postgresql.ObjectMeta.Name, | ||||||
|  | 		Controller: util.True(), | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return append(currentOwnerReferences, controllerReference) | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func ensurePath(file string, defaultDir string, defaultFile string) string { | func ensurePath(file string, defaultDir string, defaultFile string) string { | ||||||
| 	if file == "" { | 	if file == "" { | ||||||
| 		return path.Join(defaultDir, defaultFile) | 		return path.Join(defaultDir, defaultFile) | ||||||
|  |  | ||||||
|  | @ -1566,22 +1566,28 @@ func TestPodAffinity(t *testing.T) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error { | func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error { | ||||||
|  | 	if len(deployment.ObjectMeta.OwnerReferences) == 0 { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
| 	owner := deployment.ObjectMeta.OwnerReferences[0] | 	owner := deployment.ObjectMeta.OwnerReferences[0] | ||||||
| 
 | 
 | ||||||
| 	if owner.Name != cluster.Statefulset.ObjectMeta.Name { | 	if owner.Name != cluster.Postgresql.ObjectMeta.Name { | ||||||
| 		return fmt.Errorf("Ownere reference is incorrect, got %s, expected %s", | 		return fmt.Errorf("Owner reference is incorrect, got %s, expected %s", | ||||||
| 			owner.Name, cluster.Statefulset.ObjectMeta.Name) | 			owner.Name, cluster.Postgresql.ObjectMeta.Name) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error { | func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error { | ||||||
|  | 	if len(service.ObjectMeta.OwnerReferences) == 0 { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
| 	owner := service.ObjectMeta.OwnerReferences[0] | 	owner := service.ObjectMeta.OwnerReferences[0] | ||||||
| 
 | 
 | ||||||
| 	if owner.Name != cluster.Statefulset.ObjectMeta.Name { | 	if owner.Name != cluster.Postgresql.ObjectMeta.Name { | ||||||
| 		return fmt.Errorf("Ownere reference is incorrect, got %s, expected %s", | 		return fmt.Errorf("Owner reference is incorrect, got %s, expected %s", | ||||||
| 			owner.Name, cluster.Statefulset.ObjectMeta.Name) | 			owner.Name, cluster.Postgresql.ObjectMeta.Name) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
|  | @ -2320,13 +2326,69 @@ func TestSidecars(t *testing.T) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestGeneratePodDisruptionBudget(t *testing.T) { | func TestGeneratePodDisruptionBudget(t *testing.T) { | ||||||
|  | 	testName := "Test PodDisruptionBudget spec generation" | ||||||
|  | 
 | ||||||
|  | 	hasName := func(pdbName string) func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { | ||||||
|  | 		return func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { | ||||||
|  | 			if pdbName != podDisruptionBudget.ObjectMeta.Name { | ||||||
|  | 				return fmt.Errorf("PodDisruptionBudget name is incorrect, got %s, expected %s", | ||||||
|  | 					podDisruptionBudget.ObjectMeta.Name, pdbName) | ||||||
|  | 			} | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	hasMinAvailable := func(expectedMinAvailable int) func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { | ||||||
|  | 		return func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { | ||||||
|  | 			actual := podDisruptionBudget.Spec.MinAvailable.IntVal | ||||||
|  | 			if actual != int32(expectedMinAvailable) { | ||||||
|  | 				return fmt.Errorf("PodDisruptionBudget MinAvailable is incorrect, got %d, expected %d", | ||||||
|  | 					actual, expectedMinAvailable) | ||||||
|  | 			} | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	testLabelsAndSelectors := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { | ||||||
|  | 		masterLabelSelectorDisabled := cluster.OpConfig.PDBMasterLabelSelector != nil && !*cluster.OpConfig.PDBMasterLabelSelector | ||||||
|  | 		if podDisruptionBudget.ObjectMeta.Namespace != "myapp" { | ||||||
|  | 			return fmt.Errorf("Object Namespace incorrect.") | ||||||
|  | 		} | ||||||
|  | 		if !reflect.DeepEqual(podDisruptionBudget.Labels, map[string]string{"team": "myapp", "cluster-name": "myapp-database"}) { | ||||||
|  | 			return fmt.Errorf("Labels incorrect.") | ||||||
|  | 		} | ||||||
|  | 		if !masterLabelSelectorDisabled && | ||||||
|  | 			!reflect.DeepEqual(podDisruptionBudget.Spec.Selector, &metav1.LabelSelector{ | ||||||
|  | 				MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}}) { | ||||||
|  | 
 | ||||||
|  | 			return fmt.Errorf("MatchLabels incorrect.") | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	testPodDisruptionBudgetOwnerReference := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { | ||||||
|  | 		if len(podDisruptionBudget.ObjectMeta.OwnerReferences) == 0 { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 		owner := podDisruptionBudget.ObjectMeta.OwnerReferences[0] | ||||||
|  | 
 | ||||||
|  | 		if owner.Name != cluster.Postgresql.ObjectMeta.Name { | ||||||
|  | 			return fmt.Errorf("Owner reference is incorrect, got %s, expected %s", | ||||||
|  | 				owner.Name, cluster.Postgresql.ObjectMeta.Name) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	tests := []struct { | 	tests := []struct { | ||||||
| 		c   *Cluster | 		scenario string | ||||||
| 		out policyv1.PodDisruptionBudget | 		spec     *Cluster | ||||||
|  | 		check    []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error | ||||||
| 	}{ | 	}{ | ||||||
| 		// With multiple instances.
 |  | ||||||
| 		{ | 		{ | ||||||
| 			New( | 			scenario: "With multiple instances", | ||||||
|  | 			spec: New( | ||||||
| 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, | 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, | ||||||
| 				k8sutil.KubernetesClient{}, | 				k8sutil.KubernetesClient{}, | ||||||
| 				acidv1.Postgresql{ | 				acidv1.Postgresql{ | ||||||
|  | @ -2334,23 +2396,16 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { | ||||||
| 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | ||||||
| 				logger, | 				logger, | ||||||
| 				eventRecorder), | 				eventRecorder), | ||||||
| 			policyv1.PodDisruptionBudget{ | 			check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ | ||||||
| 				ObjectMeta: metav1.ObjectMeta{ | 				testPodDisruptionBudgetOwnerReference, | ||||||
| 					Name:      "postgres-myapp-database-pdb", | 				hasName("postgres-myapp-database-pdb"), | ||||||
| 					Namespace: "myapp", | 				hasMinAvailable(1), | ||||||
| 					Labels:    map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, | 				testLabelsAndSelectors, | ||||||
| 				}, |  | ||||||
| 				Spec: policyv1.PodDisruptionBudgetSpec{ |  | ||||||
| 					MinAvailable: util.ToIntStr(1), |  | ||||||
| 					Selector: &metav1.LabelSelector{ |  | ||||||
| 						MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, |  | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 			}, |  | ||||||
| 		}, |  | ||||||
| 		// With zero instances.
 |  | ||||||
| 		{ | 		{ | ||||||
| 			New( | 			scenario: "With zero instances", | ||||||
|  | 			spec: New( | ||||||
| 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, | 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, | ||||||
| 				k8sutil.KubernetesClient{}, | 				k8sutil.KubernetesClient{}, | ||||||
| 				acidv1.Postgresql{ | 				acidv1.Postgresql{ | ||||||
|  | @ -2358,23 +2413,16 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { | ||||||
| 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}}, | 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}}, | ||||||
| 				logger, | 				logger, | ||||||
| 				eventRecorder), | 				eventRecorder), | ||||||
| 			policyv1.PodDisruptionBudget{ | 			check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ | ||||||
| 				ObjectMeta: metav1.ObjectMeta{ | 				testPodDisruptionBudgetOwnerReference, | ||||||
| 					Name:      "postgres-myapp-database-pdb", | 				hasName("postgres-myapp-database-pdb"), | ||||||
| 					Namespace: "myapp", | 				hasMinAvailable(0), | ||||||
| 					Labels:    map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, | 				testLabelsAndSelectors, | ||||||
| 				}, |  | ||||||
| 				Spec: policyv1.PodDisruptionBudgetSpec{ |  | ||||||
| 					MinAvailable: util.ToIntStr(0), |  | ||||||
| 					Selector: &metav1.LabelSelector{ |  | ||||||
| 						MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, |  | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 			}, |  | ||||||
| 		}, |  | ||||||
| 		// With PodDisruptionBudget disabled.
 |  | ||||||
| 		{ | 		{ | ||||||
| 			New( | 			scenario: "With PodDisruptionBudget disabled", | ||||||
|  | 			spec: New( | ||||||
| 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, | 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, | ||||||
| 				k8sutil.KubernetesClient{}, | 				k8sutil.KubernetesClient{}, | ||||||
| 				acidv1.Postgresql{ | 				acidv1.Postgresql{ | ||||||
|  | @ -2382,23 +2430,16 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { | ||||||
| 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | ||||||
| 				logger, | 				logger, | ||||||
| 				eventRecorder), | 				eventRecorder), | ||||||
| 			policyv1.PodDisruptionBudget{ | 			check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ | ||||||
| 				ObjectMeta: metav1.ObjectMeta{ | 				testPodDisruptionBudgetOwnerReference, | ||||||
| 					Name:      "postgres-myapp-database-pdb", | 				hasName("postgres-myapp-database-pdb"), | ||||||
| 					Namespace: "myapp", | 				hasMinAvailable(0), | ||||||
| 					Labels:    map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, | 				testLabelsAndSelectors, | ||||||
| 				}, |  | ||||||
| 				Spec: policyv1.PodDisruptionBudgetSpec{ |  | ||||||
| 					MinAvailable: util.ToIntStr(0), |  | ||||||
| 					Selector: &metav1.LabelSelector{ |  | ||||||
| 						MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, |  | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 			}, |  | ||||||
| 		}, |  | ||||||
| 		// With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled.
 |  | ||||||
| 		{ | 		{ | ||||||
| 			New( | 			scenario: "With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled", | ||||||
|  | 			spec: New( | ||||||
| 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, | 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, | ||||||
| 				k8sutil.KubernetesClient{}, | 				k8sutil.KubernetesClient{}, | ||||||
| 				acidv1.Postgresql{ | 				acidv1.Postgresql{ | ||||||
|  | @ -2406,50 +2447,57 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { | ||||||
| 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | ||||||
| 				logger, | 				logger, | ||||||
| 				eventRecorder), | 				eventRecorder), | ||||||
| 			policyv1.PodDisruptionBudget{ | 			check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ | ||||||
| 				ObjectMeta: metav1.ObjectMeta{ | 				testPodDisruptionBudgetOwnerReference, | ||||||
| 					Name:      "postgres-myapp-database-databass-budget", | 				hasName("postgres-myapp-database-databass-budget"), | ||||||
| 					Namespace: "myapp", | 				hasMinAvailable(1), | ||||||
| 					Labels:    map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, | 				testLabelsAndSelectors, | ||||||
| 				}, |  | ||||||
| 				Spec: policyv1.PodDisruptionBudgetSpec{ |  | ||||||
| 					MinAvailable: util.ToIntStr(1), |  | ||||||
| 					Selector: &metav1.LabelSelector{ |  | ||||||
| 						MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, |  | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 			}, |  | ||||||
| 		}, |  | ||||||
| 		// With PDBMasterLabelSelector disabled.
 |  | ||||||
| 		{ | 		{ | ||||||
| 			New( | 			scenario: "With PDBMasterLabelSelector disabled", | ||||||
| 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", PDBMasterLabelSelector: util.False()}}, | 			spec: New( | ||||||
|  | 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}}, | ||||||
| 				k8sutil.KubernetesClient{}, | 				k8sutil.KubernetesClient{}, | ||||||
| 				acidv1.Postgresql{ | 				acidv1.Postgresql{ | ||||||
| 					ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, | 					ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, | ||||||
| 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | ||||||
| 				logger, | 				logger, | ||||||
| 				eventRecorder), | 				eventRecorder), | ||||||
| 			policyv1.PodDisruptionBudget{ | 			check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ | ||||||
| 				ObjectMeta: metav1.ObjectMeta{ | 				testPodDisruptionBudgetOwnerReference, | ||||||
| 					Name:      "postgres-myapp-database-pdb", | 				hasName("postgres-myapp-database-pdb"), | ||||||
| 					Namespace: "myapp", | 				hasMinAvailable(1), | ||||||
| 					Labels:    map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, | 				testLabelsAndSelectors, | ||||||
| 				}, |  | ||||||
| 				Spec: policyv1.PodDisruptionBudgetSpec{ |  | ||||||
| 					MinAvailable: util.ToIntStr(1), |  | ||||||
| 					Selector: &metav1.LabelSelector{ |  | ||||||
| 						MatchLabels: map[string]string{"cluster-name": "myapp-database"}, |  | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
|  | 		{ | ||||||
|  | 			scenario: "With OwnerReference enabled", | ||||||
|  | 			spec: New( | ||||||
|  | 				Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, | ||||||
|  | 				k8sutil.KubernetesClient{}, | ||||||
|  | 				acidv1.Postgresql{ | ||||||
|  | 					ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, | ||||||
|  | 					Spec:       acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, | ||||||
|  | 				logger, | ||||||
|  | 				eventRecorder), | ||||||
|  | 			check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ | ||||||
|  | 				testPodDisruptionBudgetOwnerReference, | ||||||
|  | 				hasName("postgres-myapp-database-pdb"), | ||||||
|  | 				hasMinAvailable(1), | ||||||
|  | 				testLabelsAndSelectors, | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for _, tt := range tests { | 	for _, tt := range tests { | ||||||
| 		result := tt.c.generatePodDisruptionBudget() | 		result := tt.spec.generatePodDisruptionBudget() | ||||||
| 		if !reflect.DeepEqual(*result, tt.out) { | 		for _, check := range tt.check { | ||||||
| 			t.Errorf("Expected PodDisruptionBudget: %#v, got %#v", tt.out, *result) | 			err := check(tt.spec, result) | ||||||
|  | 			if err != nil { | ||||||
|  | 				t.Errorf("%s [%s]: PodDisruptionBudget spec is incorrect, %+v", | ||||||
|  | 					testName, tt.scenario, err) | ||||||
|  | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | @ -3541,6 +3589,11 @@ func TestGenerateLogicalBackupJob(t *testing.T) { | ||||||
| 		cluster.Spec.LogicalBackupSchedule = tt.specSchedule | 		cluster.Spec.LogicalBackupSchedule = tt.specSchedule | ||||||
| 		cronJob, err := cluster.generateLogicalBackupJob() | 		cronJob, err := cluster.generateLogicalBackupJob() | ||||||
| 		assert.NoError(t, err) | 		assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 		if !reflect.DeepEqual(cronJob.ObjectMeta.OwnerReferences, cluster.ownerReferences()) { | ||||||
|  | 			t.Errorf("%s - %s: expected owner references %#v, got %#v", t.Name(), tt.subTest, cluster.ownerReferences(), cronJob.ObjectMeta.OwnerReferences) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
| 		if cronJob.Spec.Schedule != tt.expectedSchedule { | 		if cronJob.Spec.Schedule != tt.expectedSchedule { | ||||||
| 			t.Errorf("%s - %s: expected schedule %s, got %s", t.Name(), tt.subTest, tt.expectedSchedule, cronJob.Spec.Schedule) | 			t.Errorf("%s - %s: expected schedule %s, got %s", t.Name(), tt.subTest, tt.expectedSchedule, cronJob.Spec.Schedule) | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
|  | @ -11,7 +11,6 @@ import ( | ||||||
| 
 | 
 | ||||||
| // VersionMap Map of version numbers
 | // VersionMap Map of version numbers
 | ||||||
| var VersionMap = map[string]int{ | var VersionMap = map[string]int{ | ||||||
| 	"11": 110000, |  | ||||||
| 	"12": 120000, | 	"12": 120000, | ||||||
| 	"13": 130000, | 	"13": 130000, | ||||||
| 	"14": 140000, | 	"14": 140000, | ||||||
|  | @ -74,6 +73,11 @@ func (c *Cluster) majorVersionUpgrade() error { | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if !isInMainternanceWindow(c.Spec.MaintenanceWindows) { | ||||||
|  | 		c.logger.Infof("skipping major version upgrade, not in maintenance window") | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	pods, err := c.listPods() | 	pods, err := c.listPods() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
|  |  | ||||||
|  | @ -31,20 +31,36 @@ func (c *Cluster) listResources() error { | ||||||
| 		c.logger.Infof("found statefulset: %q (uid: %q)", util.NameFromMeta(c.Statefulset.ObjectMeta), c.Statefulset.UID) | 		c.logger.Infof("found statefulset: %q (uid: %q)", util.NameFromMeta(c.Statefulset.ObjectMeta), c.Statefulset.UID) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for _, obj := range c.Secrets { | 	for appId, stream := range c.Streams { | ||||||
| 		c.logger.Infof("found secret: %q (uid: %q) namesapce: %s", util.NameFromMeta(obj.ObjectMeta), obj.UID, obj.ObjectMeta.Namespace) | 		c.logger.Infof("found stream: %q with application id %q (uid: %q)", util.NameFromMeta(stream.ObjectMeta), appId, stream.UID) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if !c.patroniKubernetesUseConfigMaps() { | 	if c.LogicalBackupJob != nil { | ||||||
| 		for role, endpoint := range c.Endpoints { | 		c.logger.Infof("found logical backup job: %q (uid: %q)", util.NameFromMeta(c.LogicalBackupJob.ObjectMeta), c.LogicalBackupJob.UID) | ||||||
| 			c.logger.Infof("found %s endpoint: %q (uid: %q)", role, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID) |  | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, secret := range c.Secrets { | ||||||
|  | 		c.logger.Infof("found secret: %q (uid: %q) namespace: %s", util.NameFromMeta(secret.ObjectMeta), secret.UID, secret.ObjectMeta.Namespace) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for role, service := range c.Services { | 	for role, service := range c.Services { | ||||||
| 		c.logger.Infof("found %s service: %q (uid: %q)", role, util.NameFromMeta(service.ObjectMeta), service.UID) | 		c.logger.Infof("found %s service: %q (uid: %q)", role, util.NameFromMeta(service.ObjectMeta), service.UID) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	for role, endpoint := range c.Endpoints { | ||||||
|  | 		c.logger.Infof("found %s endpoint: %q (uid: %q)", role, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if c.patroniKubernetesUseConfigMaps() { | ||||||
|  | 		for suffix, configmap := range c.PatroniConfigMaps { | ||||||
|  | 			c.logger.Infof("found %s Patroni config map: %q (uid: %q)", suffix, util.NameFromMeta(configmap.ObjectMeta), configmap.UID) | ||||||
|  | 		} | ||||||
|  | 	} else { | ||||||
|  | 		for suffix, endpoint := range c.PatroniEndpoints { | ||||||
|  | 			c.logger.Infof("found %s Patroni endpoint: %q (uid: %q)", suffix, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	pods, err := c.listPods() | 	pods, err := c.listPods() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not get the list of pods: %v", err) | 		return fmt.Errorf("could not get the list of pods: %v", err) | ||||||
|  | @ -63,6 +79,15 @@ func (c *Cluster) listResources() error { | ||||||
| 		c.logger.Infof("found PVC: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID) | 		c.logger.Infof("found PVC: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	for role, poolerObjs := range c.ConnectionPooler { | ||||||
|  | 		if poolerObjs.Deployment != nil { | ||||||
|  | 			c.logger.Infof("found %s pooler deployment: %q (uid: %q) ", role, util.NameFromMeta(poolerObjs.Deployment.ObjectMeta), poolerObjs.Deployment.UID) | ||||||
|  | 		} | ||||||
|  | 		if poolerObjs.Service != nil { | ||||||
|  | 			c.logger.Infof("found %s pooler service: %q (uid: %q) ", role, util.NameFromMeta(poolerObjs.Service.ObjectMeta), poolerObjs.Service.UID) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -286,33 +311,14 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newService *v1.Service) (*v1.Service, error) { | func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newService *v1.Service) (*v1.Service, error) { | ||||||
| 	var ( | 	var err error | ||||||
| 		svc *v1.Service | 	svc := oldService | ||||||
| 		err error |  | ||||||
| 	) |  | ||||||
| 
 |  | ||||||
| 	c.setProcessName("updating %v service", role) |  | ||||||
| 
 | 
 | ||||||
| 	serviceName := util.NameFromMeta(oldService.ObjectMeta) | 	serviceName := util.NameFromMeta(oldService.ObjectMeta) | ||||||
| 
 | 	match, reason := c.compareServices(oldService, newService) | ||||||
| 	// update the service annotation in order to propagate ELB notation.
 | 	if !match { | ||||||
| 	if len(newService.ObjectMeta.Annotations) > 0 { | 		c.logServiceChanges(role, oldService, newService, false, reason) | ||||||
| 		if annotationsPatchData, err := metaAnnotationsPatch(newService.ObjectMeta.Annotations); err == nil { | 		c.setProcessName("updating %v service", role) | ||||||
| 			_, err = c.KubeClient.Services(serviceName.Namespace).Patch( |  | ||||||
| 				context.TODO(), |  | ||||||
| 				serviceName.Name, |  | ||||||
| 				types.MergePatchType, |  | ||||||
| 				[]byte(annotationsPatchData), |  | ||||||
| 				metav1.PatchOptions{}, |  | ||||||
| 				"") |  | ||||||
| 
 |  | ||||||
| 			if err != nil { |  | ||||||
| 				return nil, fmt.Errorf("could not replace annotations for the service %q: %v", serviceName, err) |  | ||||||
| 			} |  | ||||||
| 		} else { |  | ||||||
| 			return nil, fmt.Errorf("could not form patch for the service metadata: %v", err) |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 		// now, patch the service spec, but when disabling LoadBalancers do update instead
 | 		// now, patch the service spec, but when disabling LoadBalancers do update instead
 | ||||||
| 		// patch does not work because of LoadBalancerSourceRanges field (even if set to nil)
 | 		// patch does not work because of LoadBalancerSourceRanges field (even if set to nil)
 | ||||||
|  | @ -321,20 +327,21 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe | ||||||
| 		if newServiceType == "ClusterIP" && newServiceType != oldServiceType { | 		if newServiceType == "ClusterIP" && newServiceType != oldServiceType { | ||||||
| 			newService.ResourceVersion = oldService.ResourceVersion | 			newService.ResourceVersion = oldService.ResourceVersion | ||||||
| 			newService.Spec.ClusterIP = oldService.Spec.ClusterIP | 			newService.Spec.ClusterIP = oldService.Spec.ClusterIP | ||||||
|  | 		} | ||||||
| 		svc, err = c.KubeClient.Services(serviceName.Namespace).Update(context.TODO(), newService, metav1.UpdateOptions{}) | 		svc, err = c.KubeClient.Services(serviceName.Namespace).Update(context.TODO(), newService, metav1.UpdateOptions{}) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return nil, fmt.Errorf("could not update service %q: %v", serviceName, err) | 			return nil, fmt.Errorf("could not update service %q: %v", serviceName, err) | ||||||
| 		} | 		} | ||||||
| 	} else { |  | ||||||
| 		patchData, err := specPatch(newService.Spec) |  | ||||||
| 		if err != nil { |  | ||||||
| 			return nil, fmt.Errorf("could not form patch for the service %q: %v", serviceName, err) |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 		svc, err = c.KubeClient.Services(serviceName.Namespace).Patch( | 	if changed, _ := c.compareAnnotations(oldService.Annotations, newService.Annotations); changed { | ||||||
| 			context.TODO(), serviceName.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "") | 		patchData, err := metaAnnotationsPatch(newService.Annotations) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return nil, fmt.Errorf("could not patch service %q: %v", serviceName, err) | 			return nil, fmt.Errorf("could not form patch for service %q annotations: %v", oldService.Name, err) | ||||||
|  | 		} | ||||||
|  | 		svc, err = c.KubeClient.Services(serviceName.Namespace).Patch(context.TODO(), newService.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, fmt.Errorf("could not patch annotations for service %q: %v", oldService.Name, err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -350,11 +357,10 @@ func (c *Cluster) deleteService(role PostgresRole) error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := c.KubeClient.Services(c.Services[role].Namespace).Delete(context.TODO(), c.Services[role].Name, c.deleteOptions); err != nil { | 	if err := c.KubeClient.Services(c.Services[role].Namespace).Delete(context.TODO(), c.Services[role].Name, c.deleteOptions); err != nil { | ||||||
| 		if k8sutil.ResourceNotFound(err) { | 		if !k8sutil.ResourceNotFound(err) { | ||||||
| 			c.logger.Debugf("%s service has already been deleted", role) | 			return fmt.Errorf("could not delete %s service: %v", role, err) | ||||||
| 		} else if err != nil { |  | ||||||
| 			return err |  | ||||||
| 		} | 		} | ||||||
|  | 		c.logger.Debugf("%s service has already been deleted", role) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(c.Services[role].ObjectMeta)) | 	c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(c.Services[role].ObjectMeta)) | ||||||
|  | @ -496,11 +502,10 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil { | 	if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil { | ||||||
| 		if k8sutil.ResourceNotFound(err) { | 		if !k8sutil.ResourceNotFound(err) { | ||||||
| 			c.logger.Debugf("%s endpoint has already been deleted", role) | 			return fmt.Errorf("could not delete %s endpoint: %v", role, err) | ||||||
| 		} else if err != nil { |  | ||||||
| 			return fmt.Errorf("could not delete endpoint: %v", err) |  | ||||||
| 		} | 		} | ||||||
|  | 		c.logger.Debugf("%s endpoint has already been deleted", role) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.logger.Infof("%s endpoint %q has been deleted", role, util.NameFromMeta(c.Endpoints[role].ObjectMeta)) | 	c.logger.Infof("%s endpoint %q has been deleted", role, util.NameFromMeta(c.Endpoints[role].ObjectMeta)) | ||||||
|  | @ -509,12 +514,83 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func (c *Cluster) deletePatroniResources() error { | ||||||
|  | 	c.setProcessName("deleting Patroni resources") | ||||||
|  | 	errors := make([]string, 0) | ||||||
|  | 
 | ||||||
|  | 	if err := c.deleteService(Patroni); err != nil { | ||||||
|  | 		errors = append(errors, fmt.Sprintf("%v", err)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, suffix := range patroniObjectSuffixes { | ||||||
|  | 		if c.patroniKubernetesUseConfigMaps() { | ||||||
|  | 			if err := c.deletePatroniConfigMap(suffix); err != nil { | ||||||
|  | 				errors = append(errors, fmt.Sprintf("%v", err)) | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			if err := c.deletePatroniEndpoint(suffix); err != nil { | ||||||
|  | 				errors = append(errors, fmt.Sprintf("%v", err)) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(errors) > 0 { | ||||||
|  | 		return fmt.Errorf("%v", strings.Join(errors, `', '`)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) deletePatroniConfigMap(suffix string) error { | ||||||
|  | 	c.setProcessName("deleting Patroni config map") | ||||||
|  | 	c.logger.Debugln("deleting Patroni config map") | ||||||
|  | 	cm := c.PatroniConfigMaps[suffix] | ||||||
|  | 	if cm == nil { | ||||||
|  | 		c.logger.Debugf("there is no %s Patroni config map in the cluster", suffix) | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := c.KubeClient.ConfigMaps(cm.Namespace).Delete(context.TODO(), cm.Name, c.deleteOptions); err != nil { | ||||||
|  | 		if !k8sutil.ResourceNotFound(err) { | ||||||
|  | 			return fmt.Errorf("could not delete %s Patroni config map %q: %v", suffix, cm.Name, err) | ||||||
|  | 		} | ||||||
|  | 		c.logger.Debugf("%s Patroni config map has already been deleted", suffix) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	c.logger.Infof("%s Patroni config map %q has been deleted", suffix, util.NameFromMeta(cm.ObjectMeta)) | ||||||
|  | 	delete(c.PatroniConfigMaps, suffix) | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) deletePatroniEndpoint(suffix string) error { | ||||||
|  | 	c.setProcessName("deleting Patroni endpoint") | ||||||
|  | 	c.logger.Debugln("deleting Patroni endpoint") | ||||||
|  | 	ep := c.PatroniEndpoints[suffix] | ||||||
|  | 	if ep == nil { | ||||||
|  | 		c.logger.Debugf("there is no %s Patroni endpoint in the cluster", suffix) | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := c.KubeClient.Endpoints(ep.Namespace).Delete(context.TODO(), ep.Name, c.deleteOptions); err != nil { | ||||||
|  | 		if !k8sutil.ResourceNotFound(err) { | ||||||
|  | 			return fmt.Errorf("could not delete %s Patroni endpoint %q: %v", suffix, ep.Name, err) | ||||||
|  | 		} | ||||||
|  | 		c.logger.Debugf("%s Patroni endpoint has already been deleted", suffix) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	c.logger.Infof("%s Patroni endpoint %q has been deleted", suffix, util.NameFromMeta(ep.ObjectMeta)) | ||||||
|  | 	delete(c.PatroniEndpoints, suffix) | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func (c *Cluster) deleteSecrets() error { | func (c *Cluster) deleteSecrets() error { | ||||||
| 	c.setProcessName("deleting secrets") | 	c.setProcessName("deleting secrets") | ||||||
| 	errors := make([]string, 0) | 	errors := make([]string, 0) | ||||||
| 
 | 
 | ||||||
| 	for uid, secret := range c.Secrets { | 	for uid := range c.Secrets { | ||||||
| 		err := c.deleteSecret(uid, *secret) | 		err := c.deleteSecret(uid) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			errors = append(errors, fmt.Sprintf("%v", err)) | 			errors = append(errors, fmt.Sprintf("%v", err)) | ||||||
| 		} | 		} | ||||||
|  | @ -527,8 +603,9 @@ func (c *Cluster) deleteSecrets() error { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) deleteSecret(uid types.UID, secret v1.Secret) error { | func (c *Cluster) deleteSecret(uid types.UID) error { | ||||||
| 	c.setProcessName("deleting secret") | 	c.setProcessName("deleting secret") | ||||||
|  | 	secret := c.Secrets[uid] | ||||||
| 	secretName := util.NameFromMeta(secret.ObjectMeta) | 	secretName := util.NameFromMeta(secret.ObjectMeta) | ||||||
| 	c.logger.Debugf("deleting secret %q", secretName) | 	c.logger.Debugf("deleting secret %q", secretName) | ||||||
| 	err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions) | 	err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions) | ||||||
|  | @ -556,12 +633,12 @@ func (c *Cluster) createLogicalBackupJob() (err error) { | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not generate k8s cron job spec: %v", err) | 		return fmt.Errorf("could not generate k8s cron job spec: %v", err) | ||||||
| 	} | 	} | ||||||
| 	c.logger.Debugf("Generated cronJobSpec: %v", logicalBackupJobSpec) |  | ||||||
| 
 | 
 | ||||||
| 	_, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(context.TODO(), logicalBackupJobSpec, metav1.CreateOptions{}) | 	cronJob, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(context.TODO(), logicalBackupJobSpec, metav1.CreateOptions{}) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not create k8s cron job: %v", err) | 		return fmt.Errorf("could not create k8s cron job: %v", err) | ||||||
| 	} | 	} | ||||||
|  | 	c.LogicalBackupJob = cronJob | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  | @ -575,7 +652,7 @@ func (c *Cluster) patchLogicalBackupJob(newJob *batchv1.CronJob) error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// update the backup job spec
 | 	// update the backup job spec
 | ||||||
| 	_, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Patch( | 	cronJob, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Patch( | ||||||
| 		context.TODO(), | 		context.TODO(), | ||||||
| 		c.getLogicalBackupJobName(), | 		c.getLogicalBackupJobName(), | ||||||
| 		types.MergePatchType, | 		types.MergePatchType, | ||||||
|  | @ -585,20 +662,24 @@ func (c *Cluster) patchLogicalBackupJob(newJob *batchv1.CronJob) error { | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not patch logical backup job: %v", err) | 		return fmt.Errorf("could not patch logical backup job: %v", err) | ||||||
| 	} | 	} | ||||||
|  | 	c.LogicalBackupJob = cronJob | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) deleteLogicalBackupJob() error { | func (c *Cluster) deleteLogicalBackupJob() error { | ||||||
| 
 | 	if c.LogicalBackupJob == nil { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
| 	c.logger.Info("removing the logical backup job") | 	c.logger.Info("removing the logical backup job") | ||||||
| 
 | 
 | ||||||
| 	err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions) | 	err := c.KubeClient.CronJobsGetter.CronJobs(c.LogicalBackupJob.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions) | ||||||
| 	if k8sutil.ResourceNotFound(err) { | 	if k8sutil.ResourceNotFound(err) { | ||||||
| 		c.logger.Debugf("logical backup cron job %q has already been deleted", c.getLogicalBackupJobName()) | 		c.logger.Debugf("logical backup cron job %q has already been deleted", c.getLogicalBackupJobName()) | ||||||
| 	} else if err != nil { | 	} else if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  | 	c.LogicalBackupJob = nil | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -29,41 +29,46 @@ func (c *Cluster) createStreams(appId string) (*zalandov1.FabricEventStream, err | ||||||
| 	return streamCRD, nil | 	return streamCRD, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) error { | func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) (patchedStream *zalandov1.FabricEventStream, err error) { | ||||||
| 	c.setProcessName("updating event streams") | 	c.setProcessName("updating event streams") | ||||||
|  | 
 | ||||||
| 	patch, err := json.Marshal(newEventStreams) | 	patch, err := json.Marshal(newEventStreams) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not marshal new event stream CRD %q: %v", newEventStreams.Name, err) | 		return nil, fmt.Errorf("could not marshal new event stream CRD %q: %v", newEventStreams.Name, err) | ||||||
| 	} | 	} | ||||||
| 	if _, err := c.KubeClient.FabricEventStreams(newEventStreams.Namespace).Patch( | 	if patchedStream, err = c.KubeClient.FabricEventStreams(newEventStreams.Namespace).Patch( | ||||||
| 		context.TODO(), newEventStreams.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil { | 		context.TODO(), newEventStreams.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil { | ||||||
| 		return err | 		return nil, err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	return patchedStream, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) deleteStream(appId string) error { | ||||||
|  | 	c.setProcessName("deleting event stream") | ||||||
|  | 
 | ||||||
|  | 	err := c.KubeClient.FabricEventStreams(c.Streams[appId].Namespace).Delete(context.TODO(), c.Streams[appId].Name, metav1.DeleteOptions{}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("could not delete event stream %q with applicationId %s: %v", c.Streams[appId].Name, appId, err) | ||||||
|  | 	} | ||||||
|  | 	delete(c.Streams, appId) | ||||||
|  | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) deleteStreams() error { | func (c *Cluster) deleteStreams() error { | ||||||
| 	c.setProcessName("deleting event streams") |  | ||||||
| 
 |  | ||||||
| 	// check if stream CRD is installed before trying a delete
 | 	// check if stream CRD is installed before trying a delete
 | ||||||
| 	_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{}) | 	_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{}) | ||||||
| 	if k8sutil.ResourceNotFound(err) { | 	if k8sutil.ResourceNotFound(err) { | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 
 | 	c.setProcessName("deleting event streams") | ||||||
| 	errors := make([]string, 0) | 	errors := make([]string, 0) | ||||||
| 	listOptions := metav1.ListOptions{ | 
 | ||||||
| 		LabelSelector: c.labelsSet(true).String(), | 	for appId := range c.Streams { | ||||||
| 	} | 		err := c.deleteStream(appId) | ||||||
| 	streams, err := c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) |  | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 		return fmt.Errorf("could not list of FabricEventStreams: %v", err) | 			errors = append(errors, fmt.Sprintf("%v", err)) | ||||||
| 	} |  | ||||||
| 	for _, stream := range streams.Items { |  | ||||||
| 		err = c.KubeClient.FabricEventStreams(stream.Namespace).Delete(context.TODO(), stream.Name, metav1.DeleteOptions{}) |  | ||||||
| 		if err != nil { |  | ||||||
| 			errors = append(errors, fmt.Sprintf("could not delete event stream %q: %v", stream.Name, err)) |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -74,7 +79,7 @@ func (c *Cluster) deleteStreams() error { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func gatherApplicationIds(streams []acidv1.Stream) []string { | func getDistinctApplicationIds(streams []acidv1.Stream) []string { | ||||||
| 	appIds := make([]string, 0) | 	appIds := make([]string, 0) | ||||||
| 	for _, stream := range streams { | 	for _, stream := range streams { | ||||||
| 		if !util.SliceContains(appIds, stream.ApplicationId) { | 		if !util.SliceContains(appIds, stream.ApplicationId) { | ||||||
|  | @ -85,9 +90,10 @@ func gatherApplicationIds(streams []acidv1.Stream) []string { | ||||||
| 	return appIds | 	return appIds | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) syncPublication(publication, dbName string, tables map[string]acidv1.StreamTable) error { | func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]zalandov1.Slot, slotsToSync *map[string]map[string]string) error { | ||||||
| 	createPublications := make(map[string]string) | 	createPublications := make(map[string]string) | ||||||
| 	alterPublications := make(map[string]string) | 	alterPublications := make(map[string]string) | ||||||
|  | 	deletePublications := []string{} | ||||||
| 
 | 
 | ||||||
| 	defer func() { | 	defer func() { | ||||||
| 		if err := c.closeDbConn(); err != nil { | 		if err := c.closeDbConn(); err != nil { | ||||||
|  | @ -97,7 +103,7 @@ func (c *Cluster) syncPublication(publication, dbName string, tables map[string] | ||||||
| 
 | 
 | ||||||
| 	// check for existing publications
 | 	// check for existing publications
 | ||||||
| 	if err := c.initDbConnWithName(dbName); err != nil { | 	if err := c.initDbConnWithName(dbName); err != nil { | ||||||
| 		return fmt.Errorf("could not init database connection") | 		return fmt.Errorf("could not init database connection: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	currentPublications, err := c.getPublications() | 	currentPublications, err := c.getPublications() | ||||||
|  | @ -105,6 +111,8 @@ func (c *Cluster) syncPublication(publication, dbName string, tables map[string] | ||||||
| 		return fmt.Errorf("could not get current publications: %v", err) | 		return fmt.Errorf("could not get current publications: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	for slotName, slotAndPublication := range databaseSlotsList { | ||||||
|  | 		tables := slotAndPublication.Publication | ||||||
| 		tableNames := make([]string, len(tables)) | 		tableNames := make([]string, len(tables)) | ||||||
| 		i := 0 | 		i := 0 | ||||||
| 		for t := range tables { | 		for t := range tables { | ||||||
|  | @ -115,26 +123,52 @@ func (c *Cluster) syncPublication(publication, dbName string, tables map[string] | ||||||
| 		sort.Strings(tableNames) | 		sort.Strings(tableNames) | ||||||
| 		tableList := strings.Join(tableNames, ", ") | 		tableList := strings.Join(tableNames, ", ") | ||||||
| 
 | 
 | ||||||
| 	currentTables, exists := currentPublications[publication] | 		currentTables, exists := currentPublications[slotName] | ||||||
| 		if !exists { | 		if !exists { | ||||||
| 		createPublications[publication] = tableList | 			createPublications[slotName] = tableList | ||||||
| 		} else if currentTables != tableList { | 		} else if currentTables != tableList { | ||||||
| 		alterPublications[publication] = tableList | 			alterPublications[slotName] = tableList | ||||||
|  | 		} else { | ||||||
|  | 			(*slotsToSync)[slotName] = slotAndPublication.Slot | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if len(createPublications)+len(alterPublications) == 0 { | 	// check if there is any deletion
 | ||||||
|  | 	for slotName := range currentPublications { | ||||||
|  | 		if _, exists := databaseSlotsList[slotName]; !exists { | ||||||
|  | 			deletePublications = append(deletePublications, slotName) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(createPublications)+len(alterPublications)+len(deletePublications) == 0 { | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	errors := make([]string, 0) | ||||||
| 	for publicationName, tables := range createPublications { | 	for publicationName, tables := range createPublications { | ||||||
| 		if err = c.executeCreatePublication(publicationName, tables); err != nil { | 		if err = c.executeCreatePublication(publicationName, tables); err != nil { | ||||||
| 			return fmt.Errorf("creation of publication %q failed: %v", publicationName, err) | 			errors = append(errors, fmt.Sprintf("creation of publication %q failed: %v", publicationName, err)) | ||||||
|  | 			continue | ||||||
| 		} | 		} | ||||||
|  | 		(*slotsToSync)[publicationName] = databaseSlotsList[publicationName].Slot | ||||||
| 	} | 	} | ||||||
| 	for publicationName, tables := range alterPublications { | 	for publicationName, tables := range alterPublications { | ||||||
| 		if err = c.executeAlterPublication(publicationName, tables); err != nil { | 		if err = c.executeAlterPublication(publicationName, tables); err != nil { | ||||||
| 			return fmt.Errorf("update of publication %q failed: %v", publicationName, err) | 			errors = append(errors, fmt.Sprintf("update of publication %q failed: %v", publicationName, err)) | ||||||
|  | 			continue | ||||||
| 		} | 		} | ||||||
|  | 		(*slotsToSync)[publicationName] = databaseSlotsList[publicationName].Slot | ||||||
|  | 	} | ||||||
|  | 	for _, publicationName := range deletePublications { | ||||||
|  | 		if err = c.executeDropPublication(publicationName); err != nil { | ||||||
|  | 			errors = append(errors, fmt.Sprintf("deletion of publication %q failed: %v", publicationName, err)) | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		(*slotsToSync)[publicationName] = nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(errors) > 0 { | ||||||
|  | 		return fmt.Errorf("%v", strings.Join(errors, `', '`)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
|  | @ -172,7 +206,6 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent | ||||||
| 			Namespace:       c.Namespace, | 			Namespace:       c.Namespace, | ||||||
| 			Labels:          c.labelsSet(true), | 			Labels:          c.labelsSet(true), | ||||||
| 			Annotations:     c.AnnotationsToPropagate(c.annotationsSet(nil)), | 			Annotations:     c.AnnotationsToPropagate(c.annotationsSet(nil)), | ||||||
| 			// make cluster StatefulSet the owner (like with connection pooler objects)
 |  | ||||||
| 			OwnerReferences: c.ownerReferences(), | 			OwnerReferences: c.ownerReferences(), | ||||||
| 		}, | 		}, | ||||||
| 		Spec: zalandov1.FabricEventStreamSpec{ | 		Spec: zalandov1.FabricEventStreamSpec{ | ||||||
|  | @ -279,55 +312,73 @@ func (c *Cluster) syncStreams() error { | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	slots := make(map[string]map[string]string) | 	databaseSlots := make(map[string]map[string]zalandov1.Slot) | ||||||
| 	slotsToSync := make(map[string]map[string]string) | 	slotsToSync := make(map[string]map[string]string) | ||||||
| 	publications := make(map[string]map[string]acidv1.StreamTable) |  | ||||||
| 	requiredPatroniConfig := c.Spec.Patroni | 	requiredPatroniConfig := c.Spec.Patroni | ||||||
| 
 | 
 | ||||||
| 	if len(requiredPatroniConfig.Slots) > 0 { | 	if len(requiredPatroniConfig.Slots) > 0 { | ||||||
| 		slots = requiredPatroniConfig.Slots | 		for slotName, slotConfig := range requiredPatroniConfig.Slots { | ||||||
|  | 			slotsToSync[slotName] = slotConfig | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// gather list of required slots and publications
 | 	if err := c.initDbConn(); err != nil { | ||||||
|  | 		return fmt.Errorf("could not init database connection") | ||||||
|  | 	} | ||||||
|  | 	defer func() { | ||||||
|  | 		if err := c.closeDbConn(); err != nil { | ||||||
|  | 			c.logger.Errorf("could not close database connection: %v", err) | ||||||
|  | 		} | ||||||
|  | 	}() | ||||||
|  | 	listDatabases, err := c.getDatabases() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("could not get list of databases: %v", err) | ||||||
|  | 	} | ||||||
|  | 	// get database name with empty list of slot, except template0 and template1
 | ||||||
|  | 	for dbName := range listDatabases { | ||||||
|  | 		if dbName != "template0" && dbName != "template1" { | ||||||
|  | 			databaseSlots[dbName] = map[string]zalandov1.Slot{} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// get list of required slots and publications, group by database
 | ||||||
| 	for _, stream := range c.Spec.Streams { | 	for _, stream := range c.Spec.Streams { | ||||||
|  | 		if _, exists := databaseSlots[stream.Database]; !exists { | ||||||
|  | 			c.logger.Warningf("database %q does not exist in the cluster", stream.Database) | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
| 		slot := map[string]string{ | 		slot := map[string]string{ | ||||||
| 			"database": stream.Database, | 			"database": stream.Database, | ||||||
| 			"plugin":   constants.EventStreamSourcePluginType, | 			"plugin":   constants.EventStreamSourcePluginType, | ||||||
| 			"type":     "logical", | 			"type":     "logical", | ||||||
| 		} | 		} | ||||||
| 		slotName := getSlotName(stream.Database, stream.ApplicationId) | 		slotName := getSlotName(stream.Database, stream.ApplicationId) | ||||||
| 		if _, exists := slots[slotName]; !exists { | 		if _, exists := databaseSlots[stream.Database][slotName]; !exists { | ||||||
| 			slots[slotName] = slot | 			databaseSlots[stream.Database][slotName] = zalandov1.Slot{ | ||||||
| 			publications[slotName] = stream.Tables | 				Slot:        slot, | ||||||
|  | 				Publication: stream.Tables, | ||||||
|  | 			} | ||||||
| 		} else { | 		} else { | ||||||
| 			streamTables := publications[slotName] | 			slotAndPublication := databaseSlots[stream.Database][slotName] | ||||||
|  | 			streamTables := slotAndPublication.Publication | ||||||
| 			for tableName, table := range stream.Tables { | 			for tableName, table := range stream.Tables { | ||||||
| 				if _, exists := streamTables[tableName]; !exists { | 				if _, exists := streamTables[tableName]; !exists { | ||||||
| 					streamTables[tableName] = table | 					streamTables[tableName] = table | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 			publications[slotName] = streamTables | 			slotAndPublication.Publication = streamTables | ||||||
|  | 			databaseSlots[stream.Database][slotName] = slotAndPublication | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// create publications to each created slot
 | 	// sync publication in a database
 | ||||||
| 	c.logger.Debug("syncing database publications") | 	c.logger.Debug("syncing database publications") | ||||||
| 	for publication, tables := range publications { | 	for dbName, databaseSlotsList := range databaseSlots { | ||||||
| 		// but first check for existing publications
 | 		err := c.syncPublication(dbName, databaseSlotsList, &slotsToSync) | ||||||
| 		dbName := slots[publication]["database"] |  | ||||||
| 		err = c.syncPublication(publication, dbName, tables) |  | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			c.logger.Warningf("could not sync publication %q in database %q: %v", publication, dbName, err) | 			c.logger.Warningf("could not sync all publications in database %q: %v", dbName, err) | ||||||
| 			continue | 			continue | ||||||
| 		} | 		} | ||||||
| 		slotsToSync[publication] = slots[publication] |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// no slots to sync = no streams defined or publications created
 |  | ||||||
| 	if len(slotsToSync) > 0 { |  | ||||||
| 		requiredPatroniConfig.Slots = slotsToSync |  | ||||||
| 	} else { |  | ||||||
| 		return nil |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.logger.Debug("syncing logical replication slots") | 	c.logger.Debug("syncing logical replication slots") | ||||||
|  | @ -337,70 +388,135 @@ func (c *Cluster) syncStreams() error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// sync logical replication slots in Patroni config
 | 	// sync logical replication slots in Patroni config
 | ||||||
|  | 	requiredPatroniConfig.Slots = slotsToSync | ||||||
| 	configPatched, _, _, err := c.syncPatroniConfig(pods, requiredPatroniConfig, nil) | 	configPatched, _, _, err := c.syncPatroniConfig(pods, requiredPatroniConfig, nil) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err) | 		c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// finally sync stream CRDs
 | 	// finally sync stream CRDs
 | ||||||
| 	err = c.createOrUpdateStreams() | 	// get distinct application IDs from streams section
 | ||||||
| 	if err != nil { | 	// there will be a separate event stream resource for each ID
 | ||||||
| 		return err | 	appIds := getDistinctApplicationIds(c.Spec.Streams) | ||||||
|  | 	for _, appId := range appIds { | ||||||
|  | 		if hasSlotsInSync(appId, databaseSlots, slotsToSync) { | ||||||
|  | 			if err = c.syncStream(appId); err != nil { | ||||||
|  | 				c.logger.Warningf("could not sync event streams with applicationId %s: %v", appId, err) | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			c.logger.Warningf("database replication slots %#v for streams with applicationId %s not in sync, skipping event stream sync", slotsToSync, appId) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// check if there is any deletion
 | ||||||
|  | 	if err = c.cleanupRemovedStreams(appIds); err != nil { | ||||||
|  | 		return fmt.Errorf("%v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) createOrUpdateStreams() error { | func hasSlotsInSync(appId string, databaseSlots map[string]map[string]zalandov1.Slot, slotsToSync map[string]map[string]string) bool { | ||||||
| 
 | 	allSlotsInSync := true | ||||||
| 	// fetch different application IDs from streams section
 | 	for dbName, slots := range databaseSlots { | ||||||
| 	// there will be a separate event stream resource for each ID
 | 		for slotName := range slots { | ||||||
| 	appIds := gatherApplicationIds(c.Spec.Streams) | 			if slotName == getSlotName(dbName, appId) { | ||||||
| 
 | 				if slot, exists := slotsToSync[slotName]; !exists || slot == nil { | ||||||
| 	// list all existing stream CRDs
 | 					allSlotsInSync = false | ||||||
| 	listOptions := metav1.ListOptions{ | 					continue | ||||||
| 		LabelSelector: c.labelsSet(true).String(), |  | ||||||
| 				} | 				} | ||||||
| 	streams, err := c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return allSlotsInSync | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) syncStream(appId string) error { | ||||||
|  | 	var ( | ||||||
|  | 		streams *zalandov1.FabricEventStreamList | ||||||
|  | 		err     error | ||||||
|  | 	) | ||||||
|  | 	c.setProcessName("syncing stream with applicationId %s", appId) | ||||||
|  | 	c.logger.Debugf("syncing stream with applicationId %s", appId) | ||||||
|  | 
 | ||||||
|  | 	listOptions := metav1.ListOptions{LabelSelector: c.labelsSet(true).String()} | ||||||
|  | 	streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not list of FabricEventStreams: %v", err) | 		return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for _, appId := range appIds { |  | ||||||
| 	streamExists := false | 	streamExists := false | ||||||
| 
 |  | ||||||
| 		// update stream when it exists and EventStreams array differs
 |  | ||||||
| 	for _, stream := range streams.Items { | 	for _, stream := range streams.Items { | ||||||
| 			if appId == stream.Spec.ApplicationId { | 		if stream.Spec.ApplicationId != appId { | ||||||
| 				streamExists = true | 			continue | ||||||
| 				desiredStreams := c.generateFabricEventStream(appId) |  | ||||||
| 				if match, reason := sameStreams(stream.Spec.EventStreams, desiredStreams.Spec.EventStreams); !match { |  | ||||||
| 					c.logger.Debugf("updating event streams: %s", reason) |  | ||||||
| 					desiredStreams.ObjectMeta = stream.ObjectMeta |  | ||||||
| 					err = c.updateStreams(desiredStreams) |  | ||||||
| 					if err != nil { |  | ||||||
| 						return fmt.Errorf("failed updating event stream %s: %v", stream.Name, err) |  | ||||||
| 		} | 		} | ||||||
| 					c.logger.Infof("event stream %q has been successfully updated", stream.Name) | 		if streamExists { | ||||||
|  | 			c.logger.Warningf("more than one event stream with applicationId %s found, delete it", appId) | ||||||
|  | 			if err = c.KubeClient.FabricEventStreams(stream.ObjectMeta.Namespace).Delete(context.TODO(), stream.ObjectMeta.Name, metav1.DeleteOptions{}); err != nil { | ||||||
|  | 				c.logger.Errorf("could not delete event stream %q with applicationId %s: %v", stream.ObjectMeta.Name, appId, err) | ||||||
|  | 			} else { | ||||||
|  | 				c.logger.Infof("redundant event stream %q with applicationId %s has been successfully deleted", stream.ObjectMeta.Name, appId) | ||||||
| 			} | 			} | ||||||
| 			continue | 			continue | ||||||
| 		} | 		} | ||||||
|  | 		streamExists = true | ||||||
|  | 		desiredStreams := c.generateFabricEventStream(appId) | ||||||
|  | 		if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) { | ||||||
|  | 			c.logger.Infof("owner references of event streams with applicationId %s do not match the current ones", appId) | ||||||
|  | 			stream.ObjectMeta.OwnerReferences = desiredStreams.ObjectMeta.OwnerReferences | ||||||
|  | 			c.setProcessName("updating event streams with applicationId %s", appId) | ||||||
|  | 			stream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not update event streams with applicationId %s: %v", appId, err) | ||||||
|  | 			} | ||||||
|  | 			c.Streams[appId] = stream | ||||||
|  | 		} | ||||||
|  | 		if match, reason := c.compareStreams(&stream, desiredStreams); !match { | ||||||
|  | 			c.logger.Debugf("updating event streams with applicationId %s: %s", appId, reason) | ||||||
|  | 			desiredStreams.ObjectMeta = stream.ObjectMeta | ||||||
|  | 			updatedStream, err := c.updateStreams(desiredStreams) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err) | ||||||
|  | 			} | ||||||
|  | 			c.Streams[appId] = updatedStream | ||||||
|  | 			c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId) | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if !streamExists { | 	if !streamExists { | ||||||
| 		c.logger.Infof("event streams with applicationId %s do not exist, create it", appId) | 		c.logger.Infof("event streams with applicationId %s do not exist, create it", appId) | ||||||
| 			streamCRD, err := c.createStreams(appId) | 		createdStream, err := c.createStreams(appId) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return fmt.Errorf("failed creating event streams with applicationId %s: %v", appId, err) | 			return fmt.Errorf("failed creating event streams with applicationId %s: %v", appId, err) | ||||||
| 		} | 		} | ||||||
| 			c.logger.Infof("event streams %q have been successfully created", streamCRD.Name) | 		c.logger.Infof("event streams %q have been successfully created", createdStream.Name) | ||||||
| 		} | 		c.Streams[appId] = createdStream | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func sameStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (match bool, reason string) { | func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.FabricEventStream) (match bool, reason string) { | ||||||
|  | 	reasons := make([]string, 0) | ||||||
|  | 	match = true | ||||||
|  | 
 | ||||||
|  | 	// stream operator can add extra annotations so incl. current annotations in desired annotations
 | ||||||
|  | 	desiredAnnotations := c.annotationsSet(curEventStreams.Annotations) | ||||||
|  | 	if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations); changed { | ||||||
|  | 		match = false | ||||||
|  | 		reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if changed, reason := sameEventStreams(curEventStreams.Spec.EventStreams, newEventStreams.Spec.EventStreams); !changed { | ||||||
|  | 		match = false | ||||||
|  | 		reasons = append(reasons, fmt.Sprintf("new streams EventStreams array does not match : %s", reason)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return match, strings.Join(reasons, ", ") | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func sameEventStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (match bool, reason string) { | ||||||
| 	if len(newEventStreams) != len(curEventStreams) { | 	if len(newEventStreams) != len(curEventStreams) { | ||||||
| 		return false, "number of defined streams is different" | 		return false, "number of defined streams is different" | ||||||
| 	} | 	} | ||||||
|  | @ -424,3 +540,23 @@ func sameStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (matc | ||||||
| 
 | 
 | ||||||
| 	return true, "" | 	return true, "" | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) cleanupRemovedStreams(appIds []string) error { | ||||||
|  | 	errors := make([]string, 0) | ||||||
|  | 	for appId := range c.Streams { | ||||||
|  | 		if !util.SliceContains(appIds, appId) { | ||||||
|  | 			c.logger.Infof("event streams with applicationId %s do not exist in the manifest, delete it", appId) | ||||||
|  | 			err := c.deleteStream(appId) | ||||||
|  | 			if err != nil { | ||||||
|  | 				errors = append(errors, fmt.Sprintf("failed deleting event streams with applicationId %s: %v", appId, err)) | ||||||
|  | 			} | ||||||
|  | 			c.logger.Infof("event streams with applicationId %s have been successfully deleted", appId) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(errors) > 0 { | ||||||
|  | 		return fmt.Errorf("could not delete all removed event streams: %v", strings.Join(errors, `', '`)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -2,6 +2,7 @@ package cluster | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"fmt" | 	"fmt" | ||||||
|  | 	"reflect" | ||||||
| 	"strings" | 	"strings" | ||||||
| 
 | 
 | ||||||
| 	"context" | 	"context" | ||||||
|  | @ -18,29 +19,25 @@ import ( | ||||||
| 
 | 
 | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 	"k8s.io/apimachinery/pkg/types" | 	"k8s.io/apimachinery/pkg/types" | ||||||
| 	"k8s.io/client-go/kubernetes/fake" |  | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func newFakeK8sStreamClient() (k8sutil.KubernetesClient, *fake.Clientset) { |  | ||||||
| 	zalandoClientSet := fakezalandov1.NewSimpleClientset() |  | ||||||
| 	clientSet := fake.NewSimpleClientset() |  | ||||||
| 
 |  | ||||||
| 	return k8sutil.KubernetesClient{ |  | ||||||
| 		FabricEventStreamsGetter: zalandoClientSet.ZalandoV1(), |  | ||||||
| 		PostgresqlsGetter:        zalandoClientSet.AcidV1(), |  | ||||||
| 		PodsGetter:               clientSet.CoreV1(), |  | ||||||
| 		StatefulSetsGetter:       clientSet.AppsV1(), |  | ||||||
| 	}, clientSet |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| var ( | var ( | ||||||
| 	clusterName string = "acid-test-cluster" | 	clusterName string = "acid-stream-cluster" | ||||||
| 	namespace   string = "default" | 	namespace   string = "default" | ||||||
| 	appId       string = "test-app" | 	appId       string = "test-app" | ||||||
| 	dbName      string = "foo" | 	dbName      string = "foo" | ||||||
| 	fesUser     string = fmt.Sprintf("%s%s", constants.EventStreamSourceSlotPrefix, constants.UserRoleNameSuffix) | 	fesUser     string = fmt.Sprintf("%s%s", constants.EventStreamSourceSlotPrefix, constants.UserRoleNameSuffix) | ||||||
| 	slotName    string = fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbName, strings.Replace(appId, "-", "_", -1)) | 	slotName    string = fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbName, strings.Replace(appId, "-", "_", -1)) | ||||||
| 
 | 
 | ||||||
|  | 	zalandoClientSet = fakezalandov1.NewSimpleClientset() | ||||||
|  | 
 | ||||||
|  | 	client = k8sutil.KubernetesClient{ | ||||||
|  | 		FabricEventStreamsGetter: zalandoClientSet.ZalandoV1(), | ||||||
|  | 		PostgresqlsGetter:        zalandoClientSet.AcidV1(), | ||||||
|  | 		PodsGetter:               clientSet.CoreV1(), | ||||||
|  | 		StatefulSetsGetter:       clientSet.AppsV1(), | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	pg = acidv1.Postgresql{ | 	pg = acidv1.Postgresql{ | ||||||
| 		TypeMeta: metav1.TypeMeta{ | 		TypeMeta: metav1.TypeMeta{ | ||||||
| 			Kind:       "Postgresql", | 			Kind:       "Postgresql", | ||||||
|  | @ -91,6 +88,11 @@ var ( | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name:      fmt.Sprintf("%s-12345", clusterName), | 			Name:      fmt.Sprintf("%s-12345", clusterName), | ||||||
| 			Namespace: namespace, | 			Namespace: namespace, | ||||||
|  | 			Labels: map[string]string{ | ||||||
|  | 				"application":  "spilo", | ||||||
|  | 				"cluster-name": fmt.Sprintf("%s-2", clusterName), | ||||||
|  | 				"team":         "acid", | ||||||
|  | 			}, | ||||||
| 			OwnerReferences: []metav1.OwnerReference{ | 			OwnerReferences: []metav1.OwnerReference{ | ||||||
| 				metav1.OwnerReference{ | 				metav1.OwnerReference{ | ||||||
| 					APIVersion: "apps/v1", | 					APIVersion: "apps/v1", | ||||||
|  | @ -181,21 +183,8 @@ var ( | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| ) |  | ||||||
| 
 | 
 | ||||||
| func TestGatherApplicationIds(t *testing.T) { | 	cluster = New( | ||||||
| 	testAppIds := []string{appId} |  | ||||||
| 	appIds := gatherApplicationIds(pg.Spec.Streams) |  | ||||||
| 
 |  | ||||||
| 	if !util.IsEqualIgnoreOrder(testAppIds, appIds) { |  | ||||||
| 		t.Errorf("gathered applicationIds do not match, expected %#v, got %#v", testAppIds, appIds) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func TestGenerateFabricEventStream(t *testing.T) { |  | ||||||
| 	client, _ := newFakeK8sStreamClient() |  | ||||||
| 
 |  | ||||||
| 	var cluster = New( |  | ||||||
| 		Config{ | 		Config{ | ||||||
| 			OpConfig: config.Config{ | 			OpConfig: config.Config{ | ||||||
| 				Auth: config.Auth{ | 				Auth: config.Auth{ | ||||||
|  | @ -213,21 +202,249 @@ func TestGenerateFabricEventStream(t *testing.T) { | ||||||
| 				}, | 				}, | ||||||
| 			}, | 			}, | ||||||
| 		}, client, pg, logger, eventRecorder) | 		}, client, pg, logger, eventRecorder) | ||||||
|  | ) | ||||||
| 
 | 
 | ||||||
|  | func TestGatherApplicationIds(t *testing.T) { | ||||||
|  | 	testAppIds := []string{appId} | ||||||
|  | 	appIds := getDistinctApplicationIds(pg.Spec.Streams) | ||||||
|  | 
 | ||||||
|  | 	if !util.IsEqualIgnoreOrder(testAppIds, appIds) { | ||||||
|  | 		t.Errorf("list of applicationIds does not match, expected %#v, got %#v", testAppIds, appIds) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestHasSlotsInSync(t *testing.T) { | ||||||
| 	cluster.Name = clusterName | 	cluster.Name = clusterName | ||||||
| 	cluster.Namespace = namespace | 	cluster.Namespace = namespace | ||||||
| 
 | 
 | ||||||
| 	// create statefulset to have ownerReference for streams
 | 	appId2 := fmt.Sprintf("%s-2", appId) | ||||||
| 	_, err := cluster.createStatefulSet() | 	dbNotExists := "dbnotexists" | ||||||
| 	assert.NoError(t, err) | 	slotNotExists := fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbNotExists, strings.Replace(appId, "-", "_", -1)) | ||||||
|  | 	slotNotExistsAppId2 := fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbNotExists, strings.Replace(appId2, "-", "_", -1)) | ||||||
|  | 
 | ||||||
|  | 	tests := []struct { | ||||||
|  | 		subTest       string | ||||||
|  | 		applicationId string | ||||||
|  | 		expectedSlots map[string]map[string]zalandov1.Slot | ||||||
|  | 		actualSlots   map[string]map[string]string | ||||||
|  | 		slotsInSync   bool | ||||||
|  | 	}{ | ||||||
|  | 		{ | ||||||
|  | 			subTest:       fmt.Sprintf("slots in sync for applicationId %s", appId), | ||||||
|  | 			applicationId: appId, | ||||||
|  | 			expectedSlots: map[string]map[string]zalandov1.Slot{ | ||||||
|  | 				dbName: { | ||||||
|  | 					slotName: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": dbName, | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test1": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-a", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			actualSlots: map[string]map[string]string{ | ||||||
|  | 				slotName: map[string]string{ | ||||||
|  | 					"databases": dbName, | ||||||
|  | 					"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 					"type":      "logical", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			slotsInSync: true, | ||||||
|  | 		}, { | ||||||
|  | 			subTest:       fmt.Sprintf("slots empty for applicationId %s after create or update of publication failed", appId), | ||||||
|  | 			applicationId: appId, | ||||||
|  | 			expectedSlots: map[string]map[string]zalandov1.Slot{ | ||||||
|  | 				dbNotExists: { | ||||||
|  | 					slotNotExists: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": dbName, | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test1": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-a", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			actualSlots: map[string]map[string]string{}, | ||||||
|  | 			slotsInSync: false, | ||||||
|  | 		}, { | ||||||
|  | 			subTest:       fmt.Sprintf("slot with empty definition for applicationId %s after publication git deleted", appId), | ||||||
|  | 			applicationId: appId, | ||||||
|  | 			expectedSlots: map[string]map[string]zalandov1.Slot{ | ||||||
|  | 				dbNotExists: { | ||||||
|  | 					slotNotExists: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": dbName, | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test1": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-a", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			actualSlots: map[string]map[string]string{ | ||||||
|  | 				slotName: nil, | ||||||
|  | 			}, | ||||||
|  | 			slotsInSync: false, | ||||||
|  | 		}, { | ||||||
|  | 			subTest:       fmt.Sprintf("one slot not in sync for applicationId %s because database does not exist", appId), | ||||||
|  | 			applicationId: appId, | ||||||
|  | 			expectedSlots: map[string]map[string]zalandov1.Slot{ | ||||||
|  | 				dbName: { | ||||||
|  | 					slotName: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": dbName, | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test1": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-a", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 				dbNotExists: { | ||||||
|  | 					slotNotExists: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": "dbnotexists", | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test2": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-b", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			actualSlots: map[string]map[string]string{ | ||||||
|  | 				slotName: map[string]string{ | ||||||
|  | 					"databases": dbName, | ||||||
|  | 					"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 					"type":      "logical", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			slotsInSync: false, | ||||||
|  | 		}, { | ||||||
|  | 			subTest:       fmt.Sprintf("slots in sync for applicationId %s, but not for %s - checking %s should return true", appId, appId2, appId), | ||||||
|  | 			applicationId: appId, | ||||||
|  | 			expectedSlots: map[string]map[string]zalandov1.Slot{ | ||||||
|  | 				dbName: { | ||||||
|  | 					slotName: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": dbName, | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test1": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-a", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 				dbNotExists: { | ||||||
|  | 					slotNotExistsAppId2: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": "dbnotexists", | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test2": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-b", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			actualSlots: map[string]map[string]string{ | ||||||
|  | 				slotName: map[string]string{ | ||||||
|  | 					"databases": dbName, | ||||||
|  | 					"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 					"type":      "logical", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			slotsInSync: true, | ||||||
|  | 		}, { | ||||||
|  | 			subTest:       fmt.Sprintf("slots in sync for applicationId %s, but not for %s - checking %s should return false", appId, appId2, appId2), | ||||||
|  | 			applicationId: appId2, | ||||||
|  | 			expectedSlots: map[string]map[string]zalandov1.Slot{ | ||||||
|  | 				dbName: { | ||||||
|  | 					slotName: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": dbName, | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test1": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-a", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 				dbNotExists: { | ||||||
|  | 					slotNotExistsAppId2: zalandov1.Slot{ | ||||||
|  | 						Slot: map[string]string{ | ||||||
|  | 							"databases": "dbnotexists", | ||||||
|  | 							"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 							"type":      "logical", | ||||||
|  | 						}, | ||||||
|  | 						Publication: map[string]acidv1.StreamTable{ | ||||||
|  | 							"test2": acidv1.StreamTable{ | ||||||
|  | 								EventType: "stream-type-b", | ||||||
|  | 							}, | ||||||
|  | 						}, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			actualSlots: map[string]map[string]string{ | ||||||
|  | 				slotName: map[string]string{ | ||||||
|  | 					"databases": dbName, | ||||||
|  | 					"plugin":    constants.EventStreamSourcePluginType, | ||||||
|  | 					"type":      "logical", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			slotsInSync: false, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, tt := range tests { | ||||||
|  | 		result := hasSlotsInSync(tt.applicationId, tt.expectedSlots, tt.actualSlots) | ||||||
|  | 		if result != tt.slotsInSync { | ||||||
|  | 			t.Errorf("%s: unexpected result for slot test of applicationId: %v, expected slots %#v, actual slots %#v", tt.subTest, tt.applicationId, tt.expectedSlots, tt.actualSlots) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestGenerateFabricEventStream(t *testing.T) { | ||||||
|  | 	cluster.Name = clusterName | ||||||
|  | 	cluster.Namespace = namespace | ||||||
| 
 | 
 | ||||||
| 	// create the streams
 | 	// create the streams
 | ||||||
| 	err = cluster.createOrUpdateStreams() | 	err := cluster.syncStream(appId) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	// compare generated stream with expected stream
 | 	// compare generated stream with expected stream
 | ||||||
| 	result := cluster.generateFabricEventStream(appId) | 	result := cluster.generateFabricEventStream(appId) | ||||||
| 	if match, _ := sameStreams(result.Spec.EventStreams, fes.Spec.EventStreams); !match { | 	if match, _ := cluster.compareStreams(result, fes); !match { | ||||||
| 		t.Errorf("malformed FabricEventStream, expected %#v, got %#v", fes, result) | 		t.Errorf("malformed FabricEventStream, expected %#v, got %#v", fes, result) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -236,37 +453,100 @@ func TestGenerateFabricEventStream(t *testing.T) { | ||||||
| 	} | 	} | ||||||
| 	streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | 	streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 	assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only one", len(streams.Items)) | ||||||
| 	// check if there is only one stream
 |  | ||||||
| 	if len(streams.Items) > 1 { |  | ||||||
| 		t.Errorf("too many stream CRDs found: got %d, but expected only one", len(streams.Items)) |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	// compare stream returned from API with expected stream
 | 	// compare stream returned from API with expected stream
 | ||||||
| 	if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, fes.Spec.EventStreams); !match { | 	if match, _ := cluster.compareStreams(&streams.Items[0], fes); !match { | ||||||
| 		t.Errorf("malformed FabricEventStream returned from API, expected %#v, got %#v", fes, streams.Items[0]) | 		t.Errorf("malformed FabricEventStream returned from API, expected %#v, got %#v", fes, streams.Items[0]) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// sync streams once again
 | 	// sync streams once again
 | ||||||
| 	err = cluster.createOrUpdateStreams() | 	err = cluster.syncStream(appId) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | 	streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 	assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only one", len(streams.Items)) | ||||||
| 	// check if there is still only one stream
 |  | ||||||
| 	if len(streams.Items) > 1 { |  | ||||||
| 		t.Errorf("too many stream CRDs found after sync: got %d, but expected only one", len(streams.Items)) |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	// compare stream resturned from API with generated stream
 | 	// compare stream resturned from API with generated stream
 | ||||||
| 	if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match { | 	if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { | ||||||
| 		t.Errorf("returned FabricEventStream differs from generated one, expected %#v, got %#v", result, streams.Items[0]) | 		t.Errorf("returned FabricEventStream differs from generated one, expected %#v, got %#v", result, streams.Items[0]) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func newFabricEventStream(streams []zalandov1.EventStream, annotations map[string]string) *zalandov1.FabricEventStream { | ||||||
|  | 	return &zalandov1.FabricEventStream{ | ||||||
|  | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
|  | 			Name:        fmt.Sprintf("%s-12345", clusterName), | ||||||
|  | 			Annotations: annotations, | ||||||
|  | 		}, | ||||||
|  | 		Spec: zalandov1.FabricEventStreamSpec{ | ||||||
|  | 			ApplicationId: appId, | ||||||
|  | 			EventStreams:  streams, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestSyncStreams(t *testing.T) { | ||||||
|  | 	pg.Name = fmt.Sprintf("%s-2", pg.Name) | ||||||
|  | 	var cluster = New( | ||||||
|  | 		Config{ | ||||||
|  | 			OpConfig: config.Config{ | ||||||
|  | 				PodManagementPolicy: "ordered_ready", | ||||||
|  | 				Resources: config.Resources{ | ||||||
|  | 					ClusterLabels:         map[string]string{"application": "spilo"}, | ||||||
|  | 					ClusterNameLabel:      "cluster-name", | ||||||
|  | 					DefaultCPURequest:     "300m", | ||||||
|  | 					DefaultCPULimit:       "300m", | ||||||
|  | 					DefaultMemoryRequest:  "300Mi", | ||||||
|  | 					DefaultMemoryLimit:    "300Mi", | ||||||
|  | 					EnableOwnerReferences: util.True(), | ||||||
|  | 					PodRoleLabel:          "spilo-role", | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, client, pg, logger, eventRecorder) | ||||||
|  | 
 | ||||||
|  | 	_, err := cluster.KubeClient.Postgresqls(namespace).Create( | ||||||
|  | 		context.TODO(), &pg, metav1.CreateOptions{}) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// create the stream
 | ||||||
|  | 	err = cluster.syncStream(appId) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// create a second stream with same spec but with different name
 | ||||||
|  | 	createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create( | ||||||
|  | 		context.TODO(), fes, metav1.CreateOptions{}) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	assert.Equal(t, createdStream.Spec.ApplicationId, appId) | ||||||
|  | 
 | ||||||
|  | 	// check that two streams exist
 | ||||||
|  | 	listOptions := metav1.ListOptions{ | ||||||
|  | 		LabelSelector: cluster.labelsSet(true).String(), | ||||||
|  | 	} | ||||||
|  | 	streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	assert.Equalf(t, 2, len(streams.Items), "unexpected number of streams found: got %d, but expected only 2", len(streams.Items)) | ||||||
|  | 
 | ||||||
|  | 	// sync the stream which should remove the redundant stream
 | ||||||
|  | 	err = cluster.syncStream(appId) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// check that only one stream remains after sync
 | ||||||
|  | 	streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items)) | ||||||
|  | 
 | ||||||
|  | 	// check owner references
 | ||||||
|  | 	if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) { | ||||||
|  | 		t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func TestSameStreams(t *testing.T) { | func TestSameStreams(t *testing.T) { | ||||||
| 	testName := "TestSameStreams" | 	testName := "TestSameStreams" | ||||||
|  | 	annotationsA := map[string]string{"owned-by": "acid"} | ||||||
|  | 	annotationsB := map[string]string{"owned-by": "foo"} | ||||||
| 
 | 
 | ||||||
| 	stream1 := zalandov1.EventStream{ | 	stream1 := zalandov1.EventStream{ | ||||||
| 		EventStreamFlow:     zalandov1.EventStreamFlow{}, | 		EventStreamFlow:     zalandov1.EventStreamFlow{}, | ||||||
|  | @ -311,57 +591,64 @@ func TestSameStreams(t *testing.T) { | ||||||
| 
 | 
 | ||||||
| 	tests := []struct { | 	tests := []struct { | ||||||
| 		subTest  string | 		subTest  string | ||||||
| 		streamsA []zalandov1.EventStream | 		streamsA *zalandov1.FabricEventStream | ||||||
| 		streamsB []zalandov1.EventStream | 		streamsB *zalandov1.FabricEventStream | ||||||
| 		match    bool | 		match    bool | ||||||
| 		reason   string | 		reason   string | ||||||
| 	}{ | 	}{ | ||||||
| 		{ | 		{ | ||||||
| 			subTest:  "identical streams", | 			subTest:  "identical streams", | ||||||
| 			streamsA: []zalandov1.EventStream{stream1, stream2}, | 			streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, annotationsA), | ||||||
| 			streamsB: []zalandov1.EventStream{stream1, stream2}, | 			streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, annotationsA), | ||||||
| 			match:    true, | 			match:    true, | ||||||
| 			reason:   "", | 			reason:   "", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			subTest:  "same streams different order", | 			subTest:  "same streams different order", | ||||||
| 			streamsA: []zalandov1.EventStream{stream1, stream2}, | 			streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), | ||||||
| 			streamsB: []zalandov1.EventStream{stream2, stream1}, | 			streamsB: newFabricEventStream([]zalandov1.EventStream{stream2, stream1}, nil), | ||||||
| 			match:    true, | 			match:    true, | ||||||
| 			reason:   "", | 			reason:   "", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			subTest:  "same streams different order", | 			subTest:  "same streams different order", | ||||||
| 			streamsA: []zalandov1.EventStream{stream1}, | 			streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil), | ||||||
| 			streamsB: []zalandov1.EventStream{stream1, stream2}, | 			streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), | ||||||
| 			match:    false, | 			match:    false, | ||||||
| 			reason:   "number of defined streams is different", | 			reason:   "number of defined streams is different", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			subTest:  "different number of streams", | 			subTest:  "different number of streams", | ||||||
| 			streamsA: []zalandov1.EventStream{stream1}, | 			streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil), | ||||||
| 			streamsB: []zalandov1.EventStream{stream1, stream2}, | 			streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), | ||||||
| 			match:    false, | 			match:    false, | ||||||
| 			reason:   "number of defined streams is different", | 			reason:   "number of defined streams is different", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			subTest:  "event stream specs differ", | 			subTest:  "event stream specs differ", | ||||||
| 			streamsA: []zalandov1.EventStream{stream1, stream2}, | 			streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), | ||||||
| 			streamsB: fes.Spec.EventStreams, | 			streamsB: fes, | ||||||
| 			match:    false, | 			match:    false, | ||||||
| 			reason:   "number of defined streams is different", | 			reason:   "number of defined streams is different", | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			subTest:  "event stream recovery specs differ", | 			subTest:  "event stream recovery specs differ", | ||||||
| 			streamsA: []zalandov1.EventStream{stream2}, | 			streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil), | ||||||
| 			streamsB: []zalandov1.EventStream{stream3}, | 			streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, nil), | ||||||
|  | 			match:    false, | ||||||
|  | 			reason:   "event stream specs differ", | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			subTest:  "event stream annotations differ", | ||||||
|  | 			streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA), | ||||||
|  | 			streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsB), | ||||||
| 			match:    false, | 			match:    false, | ||||||
| 			reason:   "event stream specs differ", | 			reason:   "event stream specs differ", | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for _, tt := range tests { | 	for _, tt := range tests { | ||||||
| 		streamsMatch, matchReason := sameStreams(tt.streamsA, tt.streamsB) | 		streamsMatch, matchReason := cluster.compareStreams(tt.streamsA, tt.streamsB) | ||||||
| 		if streamsMatch != tt.match { | 		if streamsMatch != tt.match { | ||||||
| 			t.Errorf("%s %s: unexpected match result when comparing streams: got %s, epxected %s", | 			t.Errorf("%s %s: unexpected match result when comparing streams: got %s, epxected %s", | ||||||
| 				testName, tt.subTest, matchReason, tt.reason) | 				testName, tt.subTest, matchReason, tt.reason) | ||||||
|  | @ -369,9 +656,8 @@ func TestSameStreams(t *testing.T) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestUpdateFabricEventStream(t *testing.T) { | func TestUpdateStreams(t *testing.T) { | ||||||
| 	client, _ := newFakeK8sStreamClient() | 	pg.Name = fmt.Sprintf("%s-3", pg.Name) | ||||||
| 
 |  | ||||||
| 	var cluster = New( | 	var cluster = New( | ||||||
| 		Config{ | 		Config{ | ||||||
| 			OpConfig: config.Config{ | 			OpConfig: config.Config{ | ||||||
|  | @ -392,12 +678,8 @@ func TestUpdateFabricEventStream(t *testing.T) { | ||||||
| 		context.TODO(), &pg, metav1.CreateOptions{}) | 		context.TODO(), &pg, metav1.CreateOptions{}) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	// create statefulset to have ownerReference for streams
 | 	// create the stream
 | ||||||
| 	_, err = cluster.createStatefulSet() | 	err = cluster.syncStream(appId) | ||||||
| 	assert.NoError(t, err) |  | ||||||
| 
 |  | ||||||
| 	// now create the stream
 |  | ||||||
| 	err = cluster.createOrUpdateStreams() |  | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	// change specs of streams and patch CRD
 | 	// change specs of streams and patch CRD
 | ||||||
|  | @ -411,7 +693,50 @@ func TestUpdateFabricEventStream(t *testing.T) { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	patchData, err := specPatch(pg.Spec) | 	// compare stream returned from API with expected stream
 | ||||||
|  | 	listOptions := metav1.ListOptions{ | ||||||
|  | 		LabelSelector: cluster.labelsSet(true).String(), | ||||||
|  | 	} | ||||||
|  | 	streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) | ||||||
|  | 	result := cluster.generateFabricEventStream(appId) | ||||||
|  | 	if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { | ||||||
|  | 		t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// disable recovery
 | ||||||
|  | 	for idx, stream := range pg.Spec.Streams { | ||||||
|  | 		if stream.ApplicationId == appId { | ||||||
|  | 			stream.EnableRecovery = util.False() | ||||||
|  | 			pg.Spec.Streams[idx] = stream | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) | ||||||
|  | 	result = cluster.generateFabricEventStream(appId) | ||||||
|  | 	if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { | ||||||
|  | 		t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	mockClient := k8sutil.NewMockKubernetesClient() | ||||||
|  | 	cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter | ||||||
|  | 
 | ||||||
|  | 	// remove streams from manifest
 | ||||||
|  | 	pg.Spec.Streams = nil | ||||||
|  | 	pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update( | ||||||
|  | 		context.TODO(), &pg, metav1.UpdateOptions{}) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams) | ||||||
|  | 	cluster.cleanupRemovedStreams(appIds) | ||||||
|  | 
 | ||||||
|  | 	streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
|  | 	if len(streams.Items) > 0 || err != nil { | ||||||
|  | 		t.Errorf("stream resource has not been removed or unexpected error %v", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) { | ||||||
|  | 	patchData, err := specPatch(pgSpec) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch( | 	pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch( | ||||||
|  | @ -419,40 +744,11 @@ func TestUpdateFabricEventStream(t *testing.T) { | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	cluster.Postgresql.Spec = pgPatched.Spec | 	cluster.Postgresql.Spec = pgPatched.Spec | ||||||
| 	err = cluster.createOrUpdateStreams() | 	err = cluster.syncStream(appId) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	// compare stream returned from API with expected stream
 | 	streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) | ||||||
| 	listOptions := metav1.ListOptions{ |  | ||||||
| 		LabelSelector: cluster.labelsSet(true).String(), |  | ||||||
| 	} |  | ||||||
| 	streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) |  | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	result := cluster.generateFabricEventStream(appId) | 	return streams | ||||||
| 	if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match { |  | ||||||
| 		t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// disable recovery
 |  | ||||||
| 	for _, stream := range pg.Spec.Streams { |  | ||||||
| 		if stream.ApplicationId == appId { |  | ||||||
| 			stream.EnableRecovery = util.False() |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 	patchData, err = specPatch(pg.Spec) |  | ||||||
| 	assert.NoError(t, err) |  | ||||||
| 
 |  | ||||||
| 	pgPatched, err = cluster.KubeClient.Postgresqls(namespace).Patch( |  | ||||||
| 		context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec") |  | ||||||
| 	assert.NoError(t, err) |  | ||||||
| 
 |  | ||||||
| 	cluster.Postgresql.Spec = pgPatched.Spec |  | ||||||
| 	err = cluster.createOrUpdateStreams() |  | ||||||
| 	assert.NoError(t, err) |  | ||||||
| 
 |  | ||||||
| 	result = cluster.generateFabricEventStream(appId) |  | ||||||
| 	if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match { |  | ||||||
| 		t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result) |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -15,11 +15,13 @@ import ( | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util" | 	"github.com/zalando/postgres-operator/pkg/util" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/constants" | 	"github.com/zalando/postgres-operator/pkg/util/constants" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/k8sutil" | 	"github.com/zalando/postgres-operator/pkg/util/k8sutil" | ||||||
|  | 	"golang.org/x/exp/maps" | ||||||
| 	"golang.org/x/exp/slices" | 	"golang.org/x/exp/slices" | ||||||
| 	batchv1 "k8s.io/api/batch/v1" | 	batchv1 "k8s.io/api/batch/v1" | ||||||
| 	v1 "k8s.io/api/core/v1" | 	v1 "k8s.io/api/core/v1" | ||||||
| 	policyv1 "k8s.io/api/policy/v1" | 	policyv1 "k8s.io/api/policy/v1" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
|  | 	"k8s.io/apimachinery/pkg/types" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| var requirePrimaryRestartWhenDecreased = []string{ | var requirePrimaryRestartWhenDecreased = []string{ | ||||||
|  | @ -79,6 +81,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if err = c.syncPatroniResources(); err != nil { | ||||||
|  | 		c.logger.Errorf("could not sync Patroni resources: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	// sync volume may already transition volumes to gp3, if iops/throughput or type is specified
 | 	// sync volume may already transition volumes to gp3, if iops/throughput or type is specified
 | ||||||
| 	if err = c.syncVolumes(); err != nil { | 	if err = c.syncVolumes(); err != nil { | ||||||
| 		return err | 		return err | ||||||
|  | @ -91,7 +97,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.logger.Debug("syncing statefulsets") |  | ||||||
| 	if err = c.syncStatefulSet(); err != nil { | 	if err = c.syncStatefulSet(); err != nil { | ||||||
| 		if !k8sutil.ResourceAlreadyExists(err) { | 		if !k8sutil.ResourceAlreadyExists(err) { | ||||||
| 			err = fmt.Errorf("could not sync statefulsets: %v", err) | 			err = fmt.Errorf("could not sync statefulsets: %v", err) | ||||||
|  | @ -173,6 +178,166 @@ func (c *Cluster) syncFinalizer() error { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func (c *Cluster) syncPatroniResources() error { | ||||||
|  | 	errors := make([]string, 0) | ||||||
|  | 
 | ||||||
|  | 	if err := c.syncPatroniService(); err != nil { | ||||||
|  | 		errors = append(errors, fmt.Sprintf("could not sync %s service: %v", Patroni, err)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, suffix := range patroniObjectSuffixes { | ||||||
|  | 		if c.patroniKubernetesUseConfigMaps() { | ||||||
|  | 			if err := c.syncPatroniConfigMap(suffix); err != nil { | ||||||
|  | 				errors = append(errors, fmt.Sprintf("could not sync %s Patroni config map: %v", suffix, err)) | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			if err := c.syncPatroniEndpoint(suffix); err != nil { | ||||||
|  | 				errors = append(errors, fmt.Sprintf("could not sync %s Patroni endpoint: %v", suffix, err)) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(errors) > 0 { | ||||||
|  | 		return fmt.Errorf("%v", strings.Join(errors, `', '`)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) syncPatroniConfigMap(suffix string) error { | ||||||
|  | 	var ( | ||||||
|  | 		cm  *v1.ConfigMap | ||||||
|  | 		err error | ||||||
|  | 	) | ||||||
|  | 	configMapName := fmt.Sprintf("%s-%s", c.Name, suffix) | ||||||
|  | 	c.logger.Debugf("syncing %s config map", configMapName) | ||||||
|  | 	c.setProcessName("syncing %s config map", configMapName) | ||||||
|  | 
 | ||||||
|  | 	if cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}); err == nil { | ||||||
|  | 		c.PatroniConfigMaps[suffix] = cm | ||||||
|  | 		desiredOwnerRefs := c.ownerReferences() | ||||||
|  | 		if !reflect.DeepEqual(cm.ObjectMeta.OwnerReferences, desiredOwnerRefs) { | ||||||
|  | 			c.logger.Infof("new %s config map's owner references do not match the current ones", configMapName) | ||||||
|  | 			cm.ObjectMeta.OwnerReferences = desiredOwnerRefs | ||||||
|  | 			c.setProcessName("updating %s config map", configMapName) | ||||||
|  | 			cm, err = c.KubeClient.ConfigMaps(c.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not update %s config map: %v", configMapName, err) | ||||||
|  | 			} | ||||||
|  | 			c.PatroniConfigMaps[suffix] = cm | ||||||
|  | 		} | ||||||
|  | 		annotations := make(map[string]string) | ||||||
|  | 		maps.Copy(annotations, cm.Annotations) | ||||||
|  | 		// Patroni can add extra annotations so incl. current annotations in desired annotations
 | ||||||
|  | 		desiredAnnotations := c.annotationsSet(cm.Annotations) | ||||||
|  | 		if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed { | ||||||
|  | 			patchData, err := metaAnnotationsPatch(desiredAnnotations) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not form patch for %s config map: %v", configMapName, err) | ||||||
|  | 			} | ||||||
|  | 			cm, err = c.KubeClient.ConfigMaps(c.Namespace).Patch(context.TODO(), configMapName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not patch annotations of %s config map: %v", configMapName, err) | ||||||
|  | 			} | ||||||
|  | 			c.PatroniConfigMaps[suffix] = cm | ||||||
|  | 		} | ||||||
|  | 	} else if !k8sutil.ResourceNotFound(err) { | ||||||
|  | 		// if config map does not exist yet, Patroni should create it
 | ||||||
|  | 		return fmt.Errorf("could not get %s config map: %v", configMapName, err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) syncPatroniEndpoint(suffix string) error { | ||||||
|  | 	var ( | ||||||
|  | 		ep  *v1.Endpoints | ||||||
|  | 		err error | ||||||
|  | 	) | ||||||
|  | 	endpointName := fmt.Sprintf("%s-%s", c.Name, suffix) | ||||||
|  | 	c.logger.Debugf("syncing %s endpoint", endpointName) | ||||||
|  | 	c.setProcessName("syncing %s endpoint", endpointName) | ||||||
|  | 
 | ||||||
|  | 	if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), endpointName, metav1.GetOptions{}); err == nil { | ||||||
|  | 		c.PatroniEndpoints[suffix] = ep | ||||||
|  | 		desiredOwnerRefs := c.ownerReferences() | ||||||
|  | 		if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredOwnerRefs) { | ||||||
|  | 			c.logger.Infof("new %s endpoints's owner references do not match the current ones", endpointName) | ||||||
|  | 			ep.ObjectMeta.OwnerReferences = desiredOwnerRefs | ||||||
|  | 			c.setProcessName("updating %s endpoint", endpointName) | ||||||
|  | 			ep, err = c.KubeClient.Endpoints(c.Namespace).Update(context.TODO(), ep, metav1.UpdateOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not update %s endpoint: %v", endpointName, err) | ||||||
|  | 			} | ||||||
|  | 			c.PatroniEndpoints[suffix] = ep | ||||||
|  | 		} | ||||||
|  | 		annotations := make(map[string]string) | ||||||
|  | 		maps.Copy(annotations, ep.Annotations) | ||||||
|  | 		// Patroni can add extra annotations so incl. current annotations in desired annotations
 | ||||||
|  | 		desiredAnnotations := c.annotationsSet(ep.Annotations) | ||||||
|  | 		if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed { | ||||||
|  | 			patchData, err := metaAnnotationsPatch(desiredAnnotations) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not form patch for %s endpoint: %v", endpointName, err) | ||||||
|  | 			} | ||||||
|  | 			ep, err = c.KubeClient.Endpoints(c.Namespace).Patch(context.TODO(), endpointName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not patch annotations of %s endpoint: %v", endpointName, err) | ||||||
|  | 			} | ||||||
|  | 			c.PatroniEndpoints[suffix] = ep | ||||||
|  | 		} | ||||||
|  | 	} else if !k8sutil.ResourceNotFound(err) { | ||||||
|  | 		// if endpoint does not exist yet, Patroni should create it
 | ||||||
|  | 		return fmt.Errorf("could not get %s endpoint: %v", endpointName, err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) syncPatroniService() error { | ||||||
|  | 	var ( | ||||||
|  | 		svc *v1.Service | ||||||
|  | 		err error | ||||||
|  | 	) | ||||||
|  | 	serviceName := fmt.Sprintf("%s-%s", c.Name, Patroni) | ||||||
|  | 	c.setProcessName("syncing %s service", serviceName) | ||||||
|  | 
 | ||||||
|  | 	if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil { | ||||||
|  | 		c.Services[Patroni] = svc | ||||||
|  | 		desiredOwnerRefs := c.ownerReferences() | ||||||
|  | 		if !reflect.DeepEqual(svc.ObjectMeta.OwnerReferences, desiredOwnerRefs) { | ||||||
|  | 			c.logger.Infof("new %s service's owner references do not match the current ones", serviceName) | ||||||
|  | 			svc.ObjectMeta.OwnerReferences = desiredOwnerRefs | ||||||
|  | 			c.setProcessName("updating %v service", serviceName) | ||||||
|  | 			svc, err = c.KubeClient.Services(c.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not update %s endpoint: %v", serviceName, err) | ||||||
|  | 			} | ||||||
|  | 			c.Services[Patroni] = svc | ||||||
|  | 		} | ||||||
|  | 		annotations := make(map[string]string) | ||||||
|  | 		maps.Copy(annotations, svc.Annotations) | ||||||
|  | 		// Patroni can add extra annotations so incl. current annotations in desired annotations
 | ||||||
|  | 		desiredAnnotations := c.annotationsSet(svc.Annotations) | ||||||
|  | 		if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed { | ||||||
|  | 			patchData, err := metaAnnotationsPatch(desiredAnnotations) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not form patch for %s service: %v", serviceName, err) | ||||||
|  | 			} | ||||||
|  | 			svc, err = c.KubeClient.Services(c.Namespace).Patch(context.TODO(), serviceName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not patch annotations of %s service: %v", serviceName, err) | ||||||
|  | 			} | ||||||
|  | 			c.Services[Patroni] = svc | ||||||
|  | 		} | ||||||
|  | 	} else if !k8sutil.ResourceNotFound(err) { | ||||||
|  | 		// if config service does not exist yet, Patroni should create it
 | ||||||
|  | 		return fmt.Errorf("could not get %s service: %v", serviceName, err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func (c *Cluster) syncServices() error { | func (c *Cluster) syncServices() error { | ||||||
| 	for _, role := range []PostgresRole{Master, Replica} { | 	for _, role := range []PostgresRole{Master, Replica} { | ||||||
| 		c.logger.Debugf("syncing %s service", role) | 		c.logger.Debugf("syncing %s service", role) | ||||||
|  | @ -200,22 +365,17 @@ func (c *Cluster) syncService(role PostgresRole) error { | ||||||
| 	if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil { | 	if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil { | ||||||
| 		c.Services[role] = svc | 		c.Services[role] = svc | ||||||
| 		desiredSvc := c.generateService(role, &c.Spec) | 		desiredSvc := c.generateService(role, &c.Spec) | ||||||
| 		if match, reason := c.compareServices(svc, desiredSvc); !match { |  | ||||||
| 			c.logServiceChanges(role, svc, desiredSvc, false, reason) |  | ||||||
| 		updatedSvc, err := c.updateService(role, svc, desiredSvc) | 		updatedSvc, err := c.updateService(role, svc, desiredSvc) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return fmt.Errorf("could not update %s service to match desired state: %v", role, err) | 			return fmt.Errorf("could not update %s service to match desired state: %v", role, err) | ||||||
| 		} | 		} | ||||||
| 		c.Services[role] = updatedSvc | 		c.Services[role] = updatedSvc | ||||||
| 			c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta)) |  | ||||||
| 		} |  | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 	if !k8sutil.ResourceNotFound(err) { | 	if !k8sutil.ResourceNotFound(err) { | ||||||
| 		return fmt.Errorf("could not get %s service: %v", role, err) | 		return fmt.Errorf("could not get %s service: %v", role, err) | ||||||
| 	} | 	} | ||||||
| 	// no existing service, create new one
 | 	// no existing service, create new one
 | ||||||
| 	c.Services[role] = nil |  | ||||||
| 	c.logger.Infof("could not find the cluster's %s service", role) | 	c.logger.Infof("could not find the cluster's %s service", role) | ||||||
| 
 | 
 | ||||||
| 	if svc, err = c.createService(role); err == nil { | 	if svc, err = c.createService(role); err == nil { | ||||||
|  | @ -240,8 +400,28 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { | ||||||
| 	) | 	) | ||||||
| 	c.setProcessName("syncing %s endpoint", role) | 	c.setProcessName("syncing %s endpoint", role) | ||||||
| 
 | 
 | ||||||
| 	if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err == nil { | 	if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil { | ||||||
| 		// TODO: No syncing of endpoints here, is this covered completely by updateService?
 | 		desiredEp := c.generateEndpoint(role, ep.Subsets) | ||||||
|  | 		// if owner references differ we update which would also change annotations
 | ||||||
|  | 		if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredEp.ObjectMeta.OwnerReferences) { | ||||||
|  | 			c.logger.Infof("new %s endpoints's owner references do not match the current ones", role) | ||||||
|  | 			c.setProcessName("updating %v endpoint", role) | ||||||
|  | 			ep, err = c.KubeClient.Endpoints(c.Namespace).Update(context.TODO(), desiredEp, metav1.UpdateOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not update %s endpoint: %v", role, err) | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			if changed, _ := c.compareAnnotations(ep.Annotations, desiredEp.Annotations); changed { | ||||||
|  | 				patchData, err := metaAnnotationsPatch(desiredEp.Annotations) | ||||||
|  | 				if err != nil { | ||||||
|  | 					return fmt.Errorf("could not form patch for %s endpoint: %v", role, err) | ||||||
|  | 				} | ||||||
|  | 				ep, err = c.KubeClient.Endpoints(c.Namespace).Patch(context.TODO(), c.serviceName(role), types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 				if err != nil { | ||||||
|  | 					return fmt.Errorf("could not patch annotations of %s endpoint: %v", role, err) | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
| 		c.Endpoints[role] = ep | 		c.Endpoints[role] = ep | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
|  | @ -249,7 +429,6 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { | ||||||
| 		return fmt.Errorf("could not get %s endpoint: %v", role, err) | 		return fmt.Errorf("could not get %s endpoint: %v", role, err) | ||||||
| 	} | 	} | ||||||
| 	// no existing endpoint, create new one
 | 	// no existing endpoint, create new one
 | ||||||
| 	c.Endpoints[role] = nil |  | ||||||
| 	c.logger.Infof("could not find the cluster's %s endpoint", role) | 	c.logger.Infof("could not find the cluster's %s endpoint", role) | ||||||
| 
 | 
 | ||||||
| 	if ep, err = c.createEndpoint(role); err == nil { | 	if ep, err = c.createEndpoint(role); err == nil { | ||||||
|  | @ -259,7 +438,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { | ||||||
| 			return fmt.Errorf("could not create missing %s endpoint: %v", role, err) | 			return fmt.Errorf("could not create missing %s endpoint: %v", role, err) | ||||||
| 		} | 		} | ||||||
| 		c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta)) | 		c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta)) | ||||||
| 		if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err != nil { | 		if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err != nil { | ||||||
| 			return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err) | 			return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -275,7 +454,8 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { | ||||||
| 	if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { | 	if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { | ||||||
| 		c.PodDisruptionBudget = pdb | 		c.PodDisruptionBudget = pdb | ||||||
| 		newPDB := c.generatePodDisruptionBudget() | 		newPDB := c.generatePodDisruptionBudget() | ||||||
| 		if match, reason := k8sutil.SamePDB(pdb, newPDB); !match { | 		match, reason := c.comparePodDisruptionBudget(pdb, newPDB) | ||||||
|  | 		if !match { | ||||||
| 			c.logPDBChanges(pdb, newPDB, isUpdate, reason) | 			c.logPDBChanges(pdb, newPDB, isUpdate, reason) | ||||||
| 			if err = c.updatePodDisruptionBudget(newPDB); err != nil { | 			if err = c.updatePodDisruptionBudget(newPDB); err != nil { | ||||||
| 				return err | 				return err | ||||||
|  | @ -290,7 +470,6 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { | ||||||
| 		return fmt.Errorf("could not get pod disruption budget: %v", err) | 		return fmt.Errorf("could not get pod disruption budget: %v", err) | ||||||
| 	} | 	} | ||||||
| 	// no existing pod disruption budget, create new one
 | 	// no existing pod disruption budget, create new one
 | ||||||
| 	c.PodDisruptionBudget = nil |  | ||||||
| 	c.logger.Infof("could not find the cluster's pod disruption budget") | 	c.logger.Infof("could not find the cluster's pod disruption budget") | ||||||
| 
 | 
 | ||||||
| 	if pdb, err = c.createPodDisruptionBudget(); err != nil { | 	if pdb, err = c.createPodDisruptionBudget(); err != nil { | ||||||
|  | @ -326,12 +505,12 @@ func (c *Cluster) syncStatefulSet() error { | ||||||
| 
 | 
 | ||||||
| 	// NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early.
 | 	// NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early.
 | ||||||
| 	sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(context.TODO(), c.statefulSetName(), metav1.GetOptions{}) | 	sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(context.TODO(), c.statefulSetName(), metav1.GetOptions{}) | ||||||
| 	if err != nil { | 	if err != nil && !k8sutil.ResourceNotFound(err) { | ||||||
| 		if !k8sutil.ResourceNotFound(err) { |  | ||||||
| 		return fmt.Errorf("error during reading of statefulset: %v", err) | 		return fmt.Errorf("error during reading of statefulset: %v", err) | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
|  | 	if err != nil { | ||||||
| 		// statefulset does not exist, try to re-create it
 | 		// statefulset does not exist, try to re-create it
 | ||||||
| 		c.Statefulset = nil |  | ||||||
| 		c.logger.Infof("cluster's statefulset does not exist") | 		c.logger.Infof("cluster's statefulset does not exist") | ||||||
| 
 | 
 | ||||||
| 		sset, err = c.createStatefulSet() | 		sset, err = c.createStatefulSet() | ||||||
|  | @ -354,6 +533,11 @@ func (c *Cluster) syncStatefulSet() error { | ||||||
| 		c.logger.Infof("created missing statefulset %q", util.NameFromMeta(sset.ObjectMeta)) | 		c.logger.Infof("created missing statefulset %q", util.NameFromMeta(sset.ObjectMeta)) | ||||||
| 
 | 
 | ||||||
| 	} else { | 	} else { | ||||||
|  | 		desiredSts, err := c.generateStatefulSet(&c.Spec) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return fmt.Errorf("could not generate statefulset: %v", err) | ||||||
|  | 		} | ||||||
|  | 		c.logger.Debugf("syncing statefulsets") | ||||||
| 		// check if there are still pods with a rolling update flag
 | 		// check if there are still pods with a rolling update flag
 | ||||||
| 		for _, pod := range pods { | 		for _, pod := range pods { | ||||||
| 			if c.getRollingUpdateFlagFromPod(&pod) { | 			if c.getRollingUpdateFlagFromPod(&pod) { | ||||||
|  | @ -374,12 +558,21 @@ func (c *Cluster) syncStatefulSet() error { | ||||||
| 		// statefulset is already there, make sure we use its definition in order to compare with the spec.
 | 		// statefulset is already there, make sure we use its definition in order to compare with the spec.
 | ||||||
| 		c.Statefulset = sset | 		c.Statefulset = sset | ||||||
| 
 | 
 | ||||||
| 		desiredSts, err := c.generateStatefulSet(&c.Spec) |  | ||||||
| 		if err != nil { |  | ||||||
| 			return fmt.Errorf("could not generate statefulset: %v", err) |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		cmp := c.compareStatefulSetWith(desiredSts) | 		cmp := c.compareStatefulSetWith(desiredSts) | ||||||
|  | 		if !cmp.rollingUpdate { | ||||||
|  | 			for _, pod := range pods { | ||||||
|  | 				if changed, _ := c.compareAnnotations(pod.Annotations, desiredSts.Spec.Template.Annotations); changed { | ||||||
|  | 					patchData, err := metaAnnotationsPatch(desiredSts.Spec.Template.Annotations) | ||||||
|  | 					if err != nil { | ||||||
|  | 						return fmt.Errorf("could not form patch for pod %q annotations: %v", pod.Name, err) | ||||||
|  | 					} | ||||||
|  | 					_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 					if err != nil { | ||||||
|  | 						return fmt.Errorf("could not patch annotations for pod %q: %v", pod.Name, err) | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
| 		if !cmp.match { | 		if !cmp.match { | ||||||
| 			if cmp.rollingUpdate { | 			if cmp.rollingUpdate { | ||||||
| 				podsToRecreate = make([]v1.Pod, 0) | 				podsToRecreate = make([]v1.Pod, 0) | ||||||
|  | @ -682,7 +875,7 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv | ||||||
| 	// check if specified slots exist in config and if they differ
 | 	// check if specified slots exist in config and if they differ
 | ||||||
| 	for slotName, desiredSlot := range desiredPatroniConfig.Slots { | 	for slotName, desiredSlot := range desiredPatroniConfig.Slots { | ||||||
| 		// only add slots specified in manifest to c.replicationSlots
 | 		// only add slots specified in manifest to c.replicationSlots
 | ||||||
| 		for manifestSlotName, _ := range c.Spec.Patroni.Slots { | 		for manifestSlotName := range c.Spec.Patroni.Slots { | ||||||
| 			if manifestSlotName == slotName { | 			if manifestSlotName == slotName { | ||||||
| 				c.replicationSlots[slotName] = desiredSlot | 				c.replicationSlots[slotName] = desiredSlot | ||||||
| 			} | 			} | ||||||
|  | @ -934,14 +1127,32 @@ func (c *Cluster) updateSecret( | ||||||
| 		userMap[userKey] = pwdUser | 		userMap[userKey] = pwdUser | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if !reflect.DeepEqual(secret.ObjectMeta.OwnerReferences, generatedSecret.ObjectMeta.OwnerReferences) { | ||||||
|  | 		updateSecret = true | ||||||
|  | 		updateSecretMsg = fmt.Sprintf("secret %s owner references do not match the current ones", secretName) | ||||||
|  | 		secret.ObjectMeta.OwnerReferences = generatedSecret.ObjectMeta.OwnerReferences | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	if updateSecret { | 	if updateSecret { | ||||||
| 		c.logger.Debugln(updateSecretMsg) | 		c.logger.Debugln(updateSecretMsg) | ||||||
| 		if _, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { | 		if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { | ||||||
| 			return fmt.Errorf("could not update secret %s: %v", secretName, err) | 			return fmt.Errorf("could not update secret %s: %v", secretName, err) | ||||||
| 		} | 		} | ||||||
| 		c.Secrets[secret.UID] = secret | 		c.Secrets[secret.UID] = secret | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations); changed { | ||||||
|  | 		patchData, err := metaAnnotationsPatch(generatedSecret.Annotations) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return fmt.Errorf("could not form patch for secret %q annotations: %v", secret.Name, err) | ||||||
|  | 		} | ||||||
|  | 		secret, err = c.KubeClient.Secrets(secret.Namespace).Patch(context.TODO(), secret.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return fmt.Errorf("could not patch annotations for secret %q: %v", secret.Name, err) | ||||||
|  | 		} | ||||||
|  | 		c.Secrets[secret.UID] = secret | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1367,6 +1578,14 @@ func (c *Cluster) syncLogicalBackupJob() error { | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return fmt.Errorf("could not generate the desired logical backup job state: %v", err) | 			return fmt.Errorf("could not generate the desired logical backup job state: %v", err) | ||||||
| 		} | 		} | ||||||
|  | 		if !reflect.DeepEqual(job.ObjectMeta.OwnerReferences, desiredJob.ObjectMeta.OwnerReferences) { | ||||||
|  | 			c.logger.Info("new logical backup job's owner references do not match the current ones") | ||||||
|  | 			job, err = c.KubeClient.CronJobs(job.Namespace).Update(context.TODO(), desiredJob, metav1.UpdateOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not update owner references for logical backup job %q: %v", job.Name, err) | ||||||
|  | 			} | ||||||
|  | 			c.logger.Infof("logical backup job %s updated", c.getLogicalBackupJobName()) | ||||||
|  | 		} | ||||||
| 		if match, reason := c.compareLogicalBackupJob(job, desiredJob); !match { | 		if match, reason := c.compareLogicalBackupJob(job, desiredJob); !match { | ||||||
| 			c.logger.Infof("logical job %s is not in the desired state and needs to be updated", | 			c.logger.Infof("logical job %s is not in the desired state and needs to be updated", | ||||||
| 				c.getLogicalBackupJobName(), | 				c.getLogicalBackupJobName(), | ||||||
|  | @ -1379,6 +1598,17 @@ func (c *Cluster) syncLogicalBackupJob() error { | ||||||
| 			} | 			} | ||||||
| 			c.logger.Info("the logical backup job is synced") | 			c.logger.Info("the logical backup job is synced") | ||||||
| 		} | 		} | ||||||
|  | 		if changed, _ := c.compareAnnotations(job.Annotations, desiredJob.Annotations); changed { | ||||||
|  | 			patchData, err := metaAnnotationsPatch(desiredJob.Annotations) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not form patch for the logical backup job %q: %v", jobName, err) | ||||||
|  | 			} | ||||||
|  | 			_, err = c.KubeClient.CronJobs(c.Namespace).Patch(context.TODO(), jobName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not patch annotations of the logical backup job %q: %v", jobName, err) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		c.LogicalBackupJob = desiredJob | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 	if !k8sutil.ResourceNotFound(err) { | 	if !k8sutil.ResourceNotFound(err) { | ||||||
|  |  | ||||||
|  | @ -17,6 +17,7 @@ const ( | ||||||
| 	// spilo roles
 | 	// spilo roles
 | ||||||
| 	Master  PostgresRole = "master" | 	Master  PostgresRole = "master" | ||||||
| 	Replica PostgresRole = "replica" | 	Replica PostgresRole = "replica" | ||||||
|  | 	Patroni PostgresRole = "config" | ||||||
| 
 | 
 | ||||||
| 	// roles returned by Patroni cluster endpoint
 | 	// roles returned by Patroni cluster endpoint
 | ||||||
| 	Leader        PostgresRole = "leader" | 	Leader        PostgresRole = "leader" | ||||||
|  |  | ||||||
|  | @ -176,6 +176,10 @@ func (c *Cluster) logPDBChanges(old, new *policyv1.PodDisruptionBudget, isUpdate | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	logNiceDiff(c.logger, old.Spec, new.Spec) | 	logNiceDiff(c.logger, old.Spec, new.Spec) | ||||||
|  | 
 | ||||||
|  | 	if reason != "" { | ||||||
|  | 		c.logger.Infof("reason: %s", reason) | ||||||
|  | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func logNiceDiff(log *logrus.Entry, old, new interface{}) { | func logNiceDiff(log *logrus.Entry, old, new interface{}) { | ||||||
|  | @ -445,10 +449,6 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { | ||||||
| 	return err | 	return err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) waitForAnyReplicaLabelReady() error { |  | ||||||
| 	return c._waitPodLabelsReady(true) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) waitForAllPodsLabelReady() error { | func (c *Cluster) waitForAllPodsLabelReady() error { | ||||||
| 	return c._waitPodLabelsReady(false) | 	return c._waitPodLabelsReady(false) | ||||||
| } | } | ||||||
|  | @ -662,3 +662,24 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac | ||||||
| 	} | 	} | ||||||
| 	return resources, nil | 	return resources, nil | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func isInMainternanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool { | ||||||
|  | 	if len(specMaintenanceWindows) == 0 { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	now := time.Now() | ||||||
|  | 	currentDay := now.Weekday() | ||||||
|  | 	currentTime := now.Format("15:04") | ||||||
|  | 
 | ||||||
|  | 	for _, window := range specMaintenanceWindows { | ||||||
|  | 		startTime := window.StartTime.Format("15:04") | ||||||
|  | 		endTime := window.EndTime.Format("15:04") | ||||||
|  | 
 | ||||||
|  | 		if window.Everyday || window.Weekday == currentDay { | ||||||
|  | 			if currentTime >= startTime && currentTime <= endTime { | ||||||
|  | 				return true | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -1,57 +1,299 @@ | ||||||
| package cluster | package cluster | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	"bytes" | ||||||
| 	"context" | 	"context" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"maps" | ||||||
|  | 	"net/http" | ||||||
|  | 	"reflect" | ||||||
| 	"testing" | 	"testing" | ||||||
|  | 	"time" | ||||||
| 
 | 
 | ||||||
|  | 	"github.com/golang/mock/gomock" | ||||||
| 	"github.com/stretchr/testify/assert" | 	"github.com/stretchr/testify/assert" | ||||||
|  | 	"github.com/zalando/postgres-operator/mocks" | ||||||
| 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | ||||||
| 	fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" | 	fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util" | 	"github.com/zalando/postgres-operator/pkg/util" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/config" | 	"github.com/zalando/postgres-operator/pkg/util/config" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/k8sutil" | 	"github.com/zalando/postgres-operator/pkg/util/k8sutil" | ||||||
|  | 	"github.com/zalando/postgres-operator/pkg/util/patroni" | ||||||
|  | 	v1 "k8s.io/api/core/v1" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
|  | 	"k8s.io/apimachinery/pkg/labels" | ||||||
|  | 	"k8s.io/apimachinery/pkg/types" | ||||||
| 	k8sFake "k8s.io/client-go/kubernetes/fake" | 	k8sFake "k8s.io/client-go/kubernetes/fake" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | var externalAnnotations = map[string]string{"existing": "annotation"} | ||||||
|  | 
 | ||||||
|  | func mustParseTime(s string) metav1.Time { | ||||||
|  | 	v, err := time.Parse("15:04", s) | ||||||
|  | 	if err != nil { | ||||||
|  | 		panic(err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return metav1.Time{Time: v.UTC()} | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset) { | func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset) { | ||||||
| 	clientSet := k8sFake.NewSimpleClientset() | 	clientSet := k8sFake.NewSimpleClientset() | ||||||
| 	acidClientSet := fakeacidv1.NewSimpleClientset() | 	acidClientSet := fakeacidv1.NewSimpleClientset() | ||||||
| 
 | 
 | ||||||
| 	return k8sutil.KubernetesClient{ | 	return k8sutil.KubernetesClient{ | ||||||
| 		PodDisruptionBudgetsGetter:   clientSet.PolicyV1(), | 		PodDisruptionBudgetsGetter:   clientSet.PolicyV1(), | ||||||
|  | 		SecretsGetter:                clientSet.CoreV1(), | ||||||
| 		ServicesGetter:               clientSet.CoreV1(), | 		ServicesGetter:               clientSet.CoreV1(), | ||||||
| 		StatefulSetsGetter:           clientSet.AppsV1(), | 		StatefulSetsGetter:           clientSet.AppsV1(), | ||||||
| 		PostgresqlsGetter:            acidClientSet.AcidV1(), | 		PostgresqlsGetter:            acidClientSet.AcidV1(), | ||||||
|  | 		PersistentVolumeClaimsGetter: clientSet.CoreV1(), | ||||||
|  | 		PersistentVolumesGetter:      clientSet.CoreV1(), | ||||||
|  | 		EndpointsGetter:              clientSet.CoreV1(), | ||||||
|  | 		ConfigMapsGetter:             clientSet.CoreV1(), | ||||||
|  | 		PodsGetter:                   clientSet.CoreV1(), | ||||||
|  | 		DeploymentsGetter:            clientSet.AppsV1(), | ||||||
|  | 		CronJobsGetter:               clientSet.BatchV1(), | ||||||
| 	}, clientSet | 	}, clientSet | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestInheritedAnnotations(t *testing.T) { | func clusterLabelsOptions(cluster *Cluster) metav1.ListOptions { | ||||||
| 	testName := "test inheriting annotations from manifest" | 	clusterLabel := labels.Set(map[string]string{cluster.OpConfig.ClusterNameLabel: cluster.Name}) | ||||||
| 	client, _ := newFakeK8sAnnotationsClient() | 	return metav1.ListOptions{ | ||||||
| 	clusterName := "acid-test-cluster" | 		LabelSelector: clusterLabel.String(), | ||||||
| 	namespace := "default" | 	} | ||||||
| 	annotationValue := "acid" | } | ||||||
| 	role := Master |  | ||||||
| 
 | 
 | ||||||
|  | func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[string]string) error { | ||||||
|  | 	clusterOptions := clusterLabelsOptions(cluster) | ||||||
|  | 	// helper functions
 | ||||||
|  | 	containsAnnotations := func(expected map[string]string, actual map[string]string, objName string, objType string) error { | ||||||
|  | 		if !util.MapContains(actual, expected) { | ||||||
|  | 			return fmt.Errorf("%s %v expected annotations %#v to be contained in %#v", objType, objName, expected, actual) | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	updateAnnotations := func(annotations map[string]string) map[string]string { | ||||||
|  | 		result := make(map[string]string, 0) | ||||||
|  | 		for anno := range annotations { | ||||||
|  | 			if _, ok := externalAnnotations[anno]; !ok { | ||||||
|  | 				result[anno] = annotations[anno] | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return result | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkSts := func(annotations map[string]string) error { | ||||||
|  | 		stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		stsAnnotations := updateAnnotations(annotations) | ||||||
|  | 
 | ||||||
|  | 		for _, sts := range stsList.Items { | ||||||
|  | 			if err := containsAnnotations(stsAnnotations, sts.Annotations, sts.ObjectMeta.Name, "StatefulSet"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			// pod template
 | ||||||
|  | 			if err := containsAnnotations(stsAnnotations, sts.Spec.Template.Annotations, sts.ObjectMeta.Name, "StatefulSet pod template"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			// pvc template
 | ||||||
|  | 			if err := containsAnnotations(stsAnnotations, sts.Spec.VolumeClaimTemplates[0].Annotations, sts.ObjectMeta.Name, "StatefulSet pvc template"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkPods := func(annotations map[string]string) error { | ||||||
|  | 		podList, err := cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, pod := range podList.Items { | ||||||
|  | 			if err := containsAnnotations(annotations, pod.Annotations, pod.ObjectMeta.Name, "Pod"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkSvc := func(annotations map[string]string) error { | ||||||
|  | 		svcList, err := cluster.KubeClient.Services(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, svc := range svcList.Items { | ||||||
|  | 			if err := containsAnnotations(annotations, svc.Annotations, svc.ObjectMeta.Name, "Service"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkPdb := func(annotations map[string]string) error { | ||||||
|  | 		pdbList, err := cluster.KubeClient.PodDisruptionBudgets(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, pdb := range pdbList.Items { | ||||||
|  | 			if err := containsAnnotations(updateAnnotations(annotations), pdb.Annotations, pdb.ObjectMeta.Name, "Pod Disruption Budget"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkPvc := func(annotations map[string]string) error { | ||||||
|  | 		pvcList, err := cluster.KubeClient.PersistentVolumeClaims(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, pvc := range pvcList.Items { | ||||||
|  | 			if err := containsAnnotations(annotations, pvc.Annotations, pvc.ObjectMeta.Name, "Volume claim"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkPooler := func(annotations map[string]string) error { | ||||||
|  | 		for _, role := range []PostgresRole{Master, Replica} { | ||||||
|  | 			deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			if err := containsAnnotations(annotations, deploy.Annotations, deploy.Name, "Deployment"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			if err := containsAnnotations(updateAnnotations(annotations), deploy.Spec.Template.Annotations, deploy.Name, "Pooler pod template"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkCronJob := func(annotations map[string]string) error { | ||||||
|  | 		cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, cronJob := range cronJobList.Items { | ||||||
|  | 			if err := containsAnnotations(annotations, cronJob.Annotations, cronJob.ObjectMeta.Name, "Logical backup cron job"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			if err := containsAnnotations(updateAnnotations(annotations), cronJob.Spec.JobTemplate.Spec.Template.Annotations, cronJob.Name, "Logical backup cron job pod template"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkSecrets := func(annotations map[string]string) error { | ||||||
|  | 		secretList, err := cluster.KubeClient.Secrets(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, secret := range secretList.Items { | ||||||
|  | 			if err := containsAnnotations(annotations, secret.Annotations, secret.Name, "Secret"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkEndpoints := func(annotations map[string]string) error { | ||||||
|  | 		endpointsList, err := cluster.KubeClient.Endpoints(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, ep := range endpointsList.Items { | ||||||
|  | 			if err := containsAnnotations(annotations, ep.Annotations, ep.Name, "Endpoints"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkConfigMaps := func(annotations map[string]string) error { | ||||||
|  | 		cmList, err := cluster.KubeClient.ConfigMaps(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, cm := range cmList.Items { | ||||||
|  | 			if err := containsAnnotations(annotations, cm.Annotations, cm.ObjectMeta.Name, "ConfigMap"); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	checkFuncs := []func(map[string]string) error{ | ||||||
|  | 		checkSts, checkPods, checkSvc, checkPdb, checkPooler, checkCronJob, checkPvc, checkSecrets, checkEndpoints, checkConfigMaps, | ||||||
|  | 	} | ||||||
|  | 	for _, f := range checkFuncs { | ||||||
|  | 		if err := f(resultAnnotations); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func createPods(cluster *Cluster) []v1.Pod { | ||||||
|  | 	podsList := make([]v1.Pod, 0) | ||||||
|  | 	for i, role := range []PostgresRole{Master, Replica} { | ||||||
|  | 		podsList = append(podsList, v1.Pod{ | ||||||
|  | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
|  | 				Name:      fmt.Sprintf("%s-%d", clusterName, i), | ||||||
|  | 				Namespace: namespace, | ||||||
|  | 				Labels: map[string]string{ | ||||||
|  | 					"application":  "spilo", | ||||||
|  | 					"cluster-name": clusterName, | ||||||
|  | 					"spilo-role":   string(role), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}) | ||||||
|  | 		podsList = append(podsList, v1.Pod{ | ||||||
|  | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
|  | 				Name:      fmt.Sprintf("%s-pooler-%s", clusterName, role), | ||||||
|  | 				Namespace: namespace, | ||||||
|  | 				Labels:    cluster.connectionPoolerLabels(role, true).MatchLabels, | ||||||
|  | 			}, | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return podsList | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster, error) { | ||||||
| 	pg := acidv1.Postgresql{ | 	pg := acidv1.Postgresql{ | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ | 		ObjectMeta: metav1.ObjectMeta{ | ||||||
| 			Name: clusterName, | 			Name: clusterName, | ||||||
| 			Annotations: map[string]string{ | 			Annotations: map[string]string{ | ||||||
| 				"owned-by": annotationValue, | 				"owned-by": "acid", | ||||||
|  | 				"foo":      "bar", // should not be inherited
 | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
| 		Spec: acidv1.PostgresSpec{ | 		Spec: acidv1.PostgresSpec{ | ||||||
|  | 			EnableConnectionPooler:        boolToPointer(true), | ||||||
| 			EnableReplicaConnectionPooler: boolToPointer(true), | 			EnableReplicaConnectionPooler: boolToPointer(true), | ||||||
|  | 			EnableLogicalBackup:           true, | ||||||
| 			Volume: acidv1.Volume{ | 			Volume: acidv1.Volume{ | ||||||
| 				Size: "1Gi", | 				Size: "1Gi", | ||||||
| 			}, | 			}, | ||||||
|  | 			NumberOfInstances: 2, | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	var cluster = New( | 	cluster := New( | ||||||
| 		Config{ | 		Config{ | ||||||
| 			OpConfig: config.Config{ | 			OpConfig: config.Config{ | ||||||
|  | 				PatroniAPICheckInterval: time.Duration(1), | ||||||
|  | 				PatroniAPICheckTimeout:  time.Duration(5), | ||||||
|  | 				KubernetesUseConfigMaps: true, | ||||||
| 				ConnectionPooler: config.ConnectionPooler{ | 				ConnectionPooler: config.ConnectionPooler{ | ||||||
| 					ConnectionPoolerDefaultCPURequest:    "100m", | 					ConnectionPoolerDefaultCPURequest:    "100m", | ||||||
| 					ConnectionPoolerDefaultCPULimit:      "100m", | 					ConnectionPoolerDefaultCPULimit:      "100m", | ||||||
|  | @ -59,6 +301,7 @@ func TestInheritedAnnotations(t *testing.T) { | ||||||
| 					ConnectionPoolerDefaultMemoryLimit:   "100Mi", | 					ConnectionPoolerDefaultMemoryLimit:   "100Mi", | ||||||
| 					NumberOfInstances:                    k8sutil.Int32ToPointer(1), | 					NumberOfInstances:                    k8sutil.Int32ToPointer(1), | ||||||
| 				}, | 				}, | ||||||
|  | 				PDBNameFormat:       "postgres-{cluster}-pdb", | ||||||
| 				PodManagementPolicy: "ordered_ready", | 				PodManagementPolicy: "ordered_ready", | ||||||
| 				Resources: config.Resources{ | 				Resources: config.Resources{ | ||||||
| 					ClusterLabels:         map[string]string{"application": "spilo"}, | 					ClusterLabels:         map[string]string{"application": "spilo"}, | ||||||
|  | @ -69,75 +312,302 @@ func TestInheritedAnnotations(t *testing.T) { | ||||||
| 					DefaultMemoryLimit:    "300Mi", | 					DefaultMemoryLimit:    "300Mi", | ||||||
| 					InheritedAnnotations:  []string{"owned-by"}, | 					InheritedAnnotations:  []string{"owned-by"}, | ||||||
| 					PodRoleLabel:          "spilo-role", | 					PodRoleLabel:          "spilo-role", | ||||||
|  | 					ResourceCheckInterval: time.Duration(testResourceCheckInterval), | ||||||
|  | 					ResourceCheckTimeout:  time.Duration(testResourceCheckTimeout), | ||||||
|  | 					MinInstances:          -1, | ||||||
|  | 					MaxInstances:          -1, | ||||||
| 				}, | 				}, | ||||||
| 			}, | 			}, | ||||||
| 		}, client, pg, logger, eventRecorder) | 		}, client, pg, logger, eventRecorder) | ||||||
| 
 |  | ||||||
| 	cluster.Name = clusterName | 	cluster.Name = clusterName | ||||||
| 	cluster.Namespace = namespace | 	cluster.Namespace = namespace | ||||||
| 
 |  | ||||||
| 	// test annotationsSet function
 |  | ||||||
| 	inheritedAnnotations := cluster.annotationsSet(nil) |  | ||||||
| 
 |  | ||||||
| 	listOptions := metav1.ListOptions{ |  | ||||||
| 		LabelSelector: cluster.labelsSet(false).String(), |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// check statefulset annotations
 |  | ||||||
| 	_, err := cluster.createStatefulSet() | 	_, err := cluster.createStatefulSet() | ||||||
| 	assert.NoError(t, err) | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	_, err = cluster.createService(Master) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	_, err = cluster.createPodDisruptionBudget() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	_, err = cluster.createConnectionPooler(mockInstallLookupFunction) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	err = cluster.createLogicalBackupJob() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	pvcList := CreatePVCs(namespace, clusterName, cluster.labelsSet(false), 2, "1Gi") | ||||||
|  | 	for _, pvc := range pvcList.Items { | ||||||
|  | 		_, err = cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	podsList := createPods(cluster) | ||||||
|  | 	for _, pod := range podsList { | ||||||
|  | 		_, err = cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	stsList, err := client.StatefulSets(namespace).List(context.TODO(), listOptions) | 	// resources which Patroni creates
 | ||||||
| 	assert.NoError(t, err) | 	if err = createPatroniResources(cluster); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return cluster, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func createPatroniResources(cluster *Cluster) error { | ||||||
|  | 	patroniService := cluster.generateService(Replica, &pg.Spec) | ||||||
|  | 	patroniService.ObjectMeta.Name = cluster.serviceName(Patroni) | ||||||
|  | 	_, err := cluster.KubeClient.Services(namespace).Create(context.TODO(), patroniService, metav1.CreateOptions{}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, suffix := range patroniObjectSuffixes { | ||||||
|  | 		metadata := metav1.ObjectMeta{ | ||||||
|  | 			Name:      fmt.Sprintf("%s-%s", clusterName, suffix), | ||||||
|  | 			Namespace: namespace, | ||||||
|  | 			Annotations: map[string]string{ | ||||||
|  | 				"initialize": "123456789", | ||||||
|  | 			}, | ||||||
|  | 			Labels: cluster.labelsSet(false), | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if cluster.OpConfig.KubernetesUseConfigMaps { | ||||||
|  | 			configMap := v1.ConfigMap{ | ||||||
|  | 				ObjectMeta: metadata, | ||||||
|  | 			} | ||||||
|  | 			_, err := cluster.KubeClient.ConfigMaps(namespace).Create(context.TODO(), &configMap, metav1.CreateOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			endpoints := v1.Endpoints{ | ||||||
|  | 				ObjectMeta: metadata, | ||||||
|  | 			} | ||||||
|  | 			_, err := cluster.KubeClient.Endpoints(namespace).Create(context.TODO(), &endpoints, metav1.CreateOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func annotateResources(cluster *Cluster) error { | ||||||
|  | 	clusterOptions := clusterLabelsOptions(cluster) | ||||||
|  | 	patchData, err := metaAnnotationsPatch(externalAnnotations) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
| 	for _, sts := range stsList.Items { | 	for _, sts := range stsList.Items { | ||||||
| 		if !(util.MapContains(sts.ObjectMeta.Annotations, inheritedAnnotations)) { | 		sts.Annotations = externalAnnotations | ||||||
| 			t.Errorf("%s: StatefulSet %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) | 		if _, err = cluster.KubeClient.StatefulSets(namespace).Patch(context.TODO(), sts.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { | ||||||
| 		} | 			return err | ||||||
| 		// pod template
 |  | ||||||
| 		if !(util.MapContains(sts.Spec.Template.ObjectMeta.Annotations, inheritedAnnotations)) { |  | ||||||
| 			t.Errorf("%s: pod template %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) |  | ||||||
| 		} |  | ||||||
| 		// pvc template
 |  | ||||||
| 		if !(util.MapContains(sts.Spec.VolumeClaimTemplates[0].Annotations, inheritedAnnotations)) { |  | ||||||
| 			t.Errorf("%s: PVC template %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// check service annotations
 | 	podList, err := cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions) | ||||||
| 	cluster.createService(Master) | 	if err != nil { | ||||||
| 	svcList, err := client.Services(namespace).List(context.TODO(), listOptions) | 		return err | ||||||
| 	assert.NoError(t, err) | 	} | ||||||
|  | 	for _, pod := range podList.Items { | ||||||
|  | 		pod.Annotations = externalAnnotations | ||||||
|  | 		if _, err = cluster.KubeClient.Pods(namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	svcList, err := cluster.KubeClient.Services(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
| 	for _, svc := range svcList.Items { | 	for _, svc := range svcList.Items { | ||||||
| 		if !(util.MapContains(svc.ObjectMeta.Annotations, inheritedAnnotations)) { | 		svc.Annotations = externalAnnotations | ||||||
| 			t.Errorf("%s: Service %v not inherited annotations %#v, got %#v", testName, svc.ObjectMeta.Name, inheritedAnnotations, svc.ObjectMeta.Annotations) | 		if _, err = cluster.KubeClient.Services(namespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { | ||||||
|  | 			return err | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// check pod disruption budget annotations
 | 	pdbList, err := cluster.KubeClient.PodDisruptionBudgets(namespace).List(context.TODO(), clusterOptions) | ||||||
| 	cluster.createPodDisruptionBudget() | 	if err != nil { | ||||||
| 	pdbList, err := client.PodDisruptionBudgets(namespace).List(context.TODO(), listOptions) | 		return err | ||||||
| 	assert.NoError(t, err) | 	} | ||||||
| 	for _, pdb := range pdbList.Items { | 	for _, pdb := range pdbList.Items { | ||||||
| 		if !(util.MapContains(pdb.ObjectMeta.Annotations, inheritedAnnotations)) { | 		pdb.Annotations = externalAnnotations | ||||||
| 			t.Errorf("%s: Pod Disruption Budget %v not inherited annotations %#v, got %#v", testName, pdb.ObjectMeta.Name, inheritedAnnotations, pdb.ObjectMeta.Annotations) | 		_, err = cluster.KubeClient.PodDisruptionBudgets(namespace).Patch(context.TODO(), pdb.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// check pooler deployment annotations
 | 	cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions) | ||||||
| 	cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} | 	if err != nil { | ||||||
| 	cluster.ConnectionPooler[role] = &ConnectionPoolerObjects{ | 		return err | ||||||
| 		Name:        cluster.connectionPoolerName(role), |  | ||||||
| 		ClusterName: cluster.Name, |  | ||||||
| 		Namespace:   cluster.Namespace, |  | ||||||
| 		Role:        role, |  | ||||||
| 	} | 	} | ||||||
| 	deploy, err := cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[role]) | 	for _, cronJob := range cronJobList.Items { | ||||||
|  | 		cronJob.Annotations = externalAnnotations | ||||||
|  | 		_, err = cluster.KubeClient.CronJobs(namespace).Patch(context.TODO(), cronJob.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	pvcList, err := cluster.KubeClient.PersistentVolumeClaims(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	for _, pvc := range pvcList.Items { | ||||||
|  | 		pvc.Annotations = externalAnnotations | ||||||
|  | 		if _, err = cluster.KubeClient.PersistentVolumeClaims(namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, role := range []PostgresRole{Master, Replica} { | ||||||
|  | 		deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		deploy.Annotations = externalAnnotations | ||||||
|  | 		if _, err = cluster.KubeClient.Deployments(namespace).Patch(context.TODO(), deploy.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	secrets, err := cluster.KubeClient.Secrets(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	for _, secret := range secrets.Items { | ||||||
|  | 		secret.Annotations = externalAnnotations | ||||||
|  | 		if _, err = cluster.KubeClient.Secrets(namespace).Patch(context.TODO(), secret.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	endpoints, err := cluster.KubeClient.Endpoints(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	for _, ep := range endpoints.Items { | ||||||
|  | 		ep.Annotations = externalAnnotations | ||||||
|  | 		if _, err = cluster.KubeClient.Endpoints(namespace).Patch(context.TODO(), ep.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	configMaps, err := cluster.KubeClient.ConfigMaps(namespace).List(context.TODO(), clusterOptions) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	for _, cm := range configMaps.Items { | ||||||
|  | 		cm.Annotations = externalAnnotations | ||||||
|  | 		if _, err = cluster.KubeClient.ConfigMaps(namespace).Patch(context.TODO(), cm.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestInheritedAnnotations(t *testing.T) { | ||||||
|  | 	// mocks
 | ||||||
|  | 	ctrl := gomock.NewController(t) | ||||||
|  | 	defer ctrl.Finish() | ||||||
|  | 	client, _ := newFakeK8sAnnotationsClient() | ||||||
|  | 	mockClient := mocks.NewMockHTTPClient(ctrl) | ||||||
|  | 
 | ||||||
|  | 	cluster, err := newInheritedAnnotationsCluster(client) | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
| 
 | 
 | ||||||
| 	if !(util.MapContains(deploy.ObjectMeta.Annotations, inheritedAnnotations)) { | 	configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}` | ||||||
| 		t.Errorf("%s: Deployment %v not inherited annotations %#v, got %#v", testName, deploy.ObjectMeta.Name, inheritedAnnotations, deploy.ObjectMeta.Annotations) | 	response := http.Response{ | ||||||
|  | 		StatusCode: 200, | ||||||
|  | 		Body:       io.NopCloser(bytes.NewReader([]byte(configJson))), | ||||||
| 	} | 	} | ||||||
|  | 	mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes() | ||||||
|  | 	cluster.patroni = patroni.New(patroniLogger, mockClient) | ||||||
| 
 | 
 | ||||||
|  | 	err = cluster.Sync(&cluster.Postgresql) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	filterLabels := cluster.labelsSet(false) | ||||||
|  | 
 | ||||||
|  | 	// Finally, tests!
 | ||||||
|  | 	result := map[string]string{"owned-by": "acid"} | ||||||
|  | 	assert.True(t, reflect.DeepEqual(result, cluster.annotationsSet(nil))) | ||||||
|  | 
 | ||||||
|  | 	// 1. Check initial state
 | ||||||
|  | 	err = checkResourcesInheritedAnnotations(cluster, result) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// 2. Check annotation value change
 | ||||||
|  | 
 | ||||||
|  | 	// 2.1 Sync event
 | ||||||
|  | 	newSpec := cluster.Postgresql.DeepCopy() | ||||||
|  | 	newSpec.Annotations["owned-by"] = "fooSync" | ||||||
|  | 	result["owned-by"] = "fooSync" | ||||||
|  | 
 | ||||||
|  | 	err = cluster.Sync(newSpec) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	err = checkResourcesInheritedAnnotations(cluster, result) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// + existing PVC without annotations
 | ||||||
|  | 	cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &CreatePVCs(namespace, clusterName, filterLabels, 3, "1Gi").Items[2], metav1.CreateOptions{}) | ||||||
|  | 	err = cluster.Sync(newSpec) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	err = checkResourcesInheritedAnnotations(cluster, result) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// 2.2 Update event
 | ||||||
|  | 	newSpec = cluster.Postgresql.DeepCopy() | ||||||
|  | 	newSpec.Annotations["owned-by"] = "fooUpdate" | ||||||
|  | 	result["owned-by"] = "fooUpdate" | ||||||
|  | 	// + new PVC
 | ||||||
|  | 	cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &CreatePVCs(namespace, clusterName, filterLabels, 4, "1Gi").Items[3], metav1.CreateOptions{}) | ||||||
|  | 
 | ||||||
|  | 	err = cluster.Update(cluster.Postgresql.DeepCopy(), newSpec) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	err = checkResourcesInheritedAnnotations(cluster, result) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// 3. Change from ConfigMaps to Endpoints
 | ||||||
|  | 	err = cluster.deletePatroniResources() | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	cluster.OpConfig.KubernetesUseConfigMaps = false | ||||||
|  | 	err = createPatroniResources(cluster) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	err = cluster.Sync(newSpec.DeepCopy()) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	err = checkResourcesInheritedAnnotations(cluster, result) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 
 | ||||||
|  | 	// 4. Existing annotations (should not be removed)
 | ||||||
|  | 	err = annotateResources(cluster) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	maps.Copy(result, externalAnnotations) | ||||||
|  | 	err = cluster.Sync(newSpec.DeepCopy()) | ||||||
|  | 	assert.NoError(t, err) | ||||||
|  | 	err = checkResourcesInheritedAnnotations(cluster, result) | ||||||
|  | 	assert.NoError(t, err) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func Test_trimCronjobName(t *testing.T) { | func Test_trimCronjobName(t *testing.T) { | ||||||
|  | @ -179,3 +649,65 @@ func Test_trimCronjobName(t *testing.T) { | ||||||
| 		}) | 		}) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func TestIsInMaintenanceWindow(t *testing.T) { | ||||||
|  | 	now := time.Now() | ||||||
|  | 	futureTimeStart := now.Add(1 * time.Hour) | ||||||
|  | 	futureTimeStartFormatted := futureTimeStart.Format("15:04") | ||||||
|  | 	futureTimeEnd := now.Add(2 * time.Hour) | ||||||
|  | 	futureTimeEndFormatted := futureTimeEnd.Format("15:04") | ||||||
|  | 
 | ||||||
|  | 	tests := []struct { | ||||||
|  | 		name     string | ||||||
|  | 		windows  []acidv1.MaintenanceWindow | ||||||
|  | 		expected bool | ||||||
|  | 	}{ | ||||||
|  | 		{ | ||||||
|  | 			name:     "no maintenance windows", | ||||||
|  | 			windows:  nil, | ||||||
|  | 			expected: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "maintenance windows with everyday", | ||||||
|  | 			windows: []acidv1.MaintenanceWindow{ | ||||||
|  | 				{ | ||||||
|  | 					Everyday:  true, | ||||||
|  | 					StartTime: mustParseTime("00:00"), | ||||||
|  | 					EndTime:   mustParseTime("23:59"), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "maintenance windows with weekday", | ||||||
|  | 			windows: []acidv1.MaintenanceWindow{ | ||||||
|  | 				{ | ||||||
|  | 					Weekday:   now.Weekday(), | ||||||
|  | 					StartTime: mustParseTime("00:00"), | ||||||
|  | 					EndTime:   mustParseTime("23:59"), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "maintenance windows with future interval time", | ||||||
|  | 			windows: []acidv1.MaintenanceWindow{ | ||||||
|  | 				{ | ||||||
|  | 					Weekday:   now.Weekday(), | ||||||
|  | 					StartTime: mustParseTime(futureTimeStartFormatted), | ||||||
|  | 					EndTime:   mustParseTime(futureTimeEndFormatted), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			expected: false, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, tt := range tests { | ||||||
|  | 		t.Run(tt.name, func(t *testing.T) { | ||||||
|  | 			cluster.Spec.MaintenanceWindows = tt.windows | ||||||
|  | 			if isInMainternanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected { | ||||||
|  | 				t.Errorf("Expected isInMainternanceWindow to return %t", tt.expected) | ||||||
|  | 			} | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -9,9 +9,9 @@ import ( | ||||||
| 	v1 "k8s.io/api/core/v1" | 	v1 "k8s.io/api/core/v1" | ||||||
| 	"k8s.io/apimachinery/pkg/api/resource" | 	"k8s.io/apimachinery/pkg/api/resource" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
|  | 	"k8s.io/apimachinery/pkg/types" | ||||||
| 
 | 
 | ||||||
| 	"github.com/aws/aws-sdk-go/aws" | 	"github.com/aws/aws-sdk-go/aws" | ||||||
| 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" |  | ||||||
| 	"github.com/zalando/postgres-operator/pkg/spec" | 	"github.com/zalando/postgres-operator/pkg/spec" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util" | 	"github.com/zalando/postgres-operator/pkg/util" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/constants" | 	"github.com/zalando/postgres-operator/pkg/util/constants" | ||||||
|  | @ -42,18 +42,14 @@ func (c *Cluster) syncVolumes() error { | ||||||
| 				c.logger.Errorf("errors occured during EBS volume adjustments: %v", err) | 				c.logger.Errorf("errors occured during EBS volume adjustments: %v", err) | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 		// resize pvc to adjust filesystem size until better K8s support
 |  | ||||||
| 	if err = c.syncVolumeClaims(); err != nil { | 	if err = c.syncVolumeClaims(); err != nil { | ||||||
| 		err = fmt.Errorf("could not sync persistent volume claims: %v", err) | 		err = fmt.Errorf("could not sync persistent volume claims: %v", err) | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 	} else if c.OpConfig.StorageResizeMode == "pvc" { | 
 | ||||||
| 		if err = c.syncVolumeClaims(); err != nil { | 	if c.OpConfig.StorageResizeMode == "ebs" { | ||||||
| 			err = fmt.Errorf("could not sync persistent volume claims: %v", err) |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 	} else if c.OpConfig.StorageResizeMode == "ebs" { |  | ||||||
| 		// potentially enlarge volumes before changing the statefulset. By doing that
 | 		// potentially enlarge volumes before changing the statefulset. By doing that
 | ||||||
| 		// in this order we make sure the operator is not stuck waiting for a pod that
 | 		// in this order we make sure the operator is not stuck waiting for a pod that
 | ||||||
| 		// cannot start because it ran out of disk space.
 | 		// cannot start because it ran out of disk space.
 | ||||||
|  | @ -64,8 +60,6 @@ func (c *Cluster) syncVolumes() error { | ||||||
| 			err = fmt.Errorf("could not sync persistent volumes: %v", err) | 			err = fmt.Errorf("could not sync persistent volumes: %v", err) | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
| 	} else { |  | ||||||
| 		c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
|  | @ -187,18 +181,57 @@ func (c *Cluster) populateVolumeMetaData() error { | ||||||
| func (c *Cluster) syncVolumeClaims() error { | func (c *Cluster) syncVolumeClaims() error { | ||||||
| 	c.setProcessName("syncing volume claims") | 	c.setProcessName("syncing volume claims") | ||||||
| 
 | 
 | ||||||
| 	needsResizing, err := c.volumeClaimsNeedResizing(c.Spec.Volume) | 	ignoreResize := false | ||||||
|  | 
 | ||||||
|  | 	if c.OpConfig.StorageResizeMode == "off" || c.OpConfig.StorageResizeMode == "ebs" { | ||||||
|  | 		ignoreResize = true | ||||||
|  | 		c.logger.Debugf("Storage resize mode is set to %q. Skipping volume size sync of PVCs.", c.OpConfig.StorageResizeMode) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	newSize, err := resource.ParseQuantity(c.Spec.Volume.Size) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not compare size of the volume claims: %v", err) | 		return fmt.Errorf("could not parse volume size from the manifest: %v", err) | ||||||
|  | 	} | ||||||
|  | 	manifestSize := quantityToGigabyte(newSize) | ||||||
|  | 
 | ||||||
|  | 	pvcs, err := c.listPersistentVolumeClaims() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("could not receive persistent volume claims: %v", err) | ||||||
|  | 	} | ||||||
|  | 	for _, pvc := range pvcs { | ||||||
|  | 		needsUpdate := false | ||||||
|  | 		currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) | ||||||
|  | 		if !ignoreResize && currentSize != manifestSize { | ||||||
|  | 			if currentSize < manifestSize { | ||||||
|  | 				pvc.Spec.Resources.Requests[v1.ResourceStorage] = newSize | ||||||
|  | 				needsUpdate = true | ||||||
|  | 				c.logger.Debugf("persistent volume claim for volume %q needs to be resized", pvc.Name) | ||||||
|  | 			} else { | ||||||
|  | 				c.logger.Warningf("cannot shrink persistent volume") | ||||||
|  | 			} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 	if !needsResizing { | 		if needsUpdate { | ||||||
| 		c.logger.Infof("volume claims do not require changes") | 			c.logger.Debugf("updating persistent volume claim definition for volume %q", pvc.Name) | ||||||
| 		return nil | 			if _, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil { | ||||||
|  | 				return fmt.Errorf("could not update persistent volume claim: %q", err) | ||||||
|  | 			} | ||||||
|  | 			c.logger.Debugf("successfully updated persistent volume claim %q", pvc.Name) | ||||||
|  | 		} else { | ||||||
|  | 			c.logger.Debugf("volume claim for volume %q do not require updates", pvc.Name) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 	if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil { | 		newAnnotations := c.annotationsSet(nil) | ||||||
| 		return fmt.Errorf("could not sync volume claims: %v", err) | 		if changed, _ := c.compareAnnotations(pvc.Annotations, newAnnotations); changed { | ||||||
|  | 			patchData, err := metaAnnotationsPatch(newAnnotations) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not form patch for the persistent volume claim for volume %q: %v", pvc.Name, err) | ||||||
|  | 			} | ||||||
|  | 			_, err = c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return fmt.Errorf("could not patch annotations of the persistent volume claim for volume %q: %v", pvc.Name, err) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.logger.Infof("volume claims have been synced successfully") | 	c.logger.Infof("volume claims have been synced successfully") | ||||||
|  | @ -261,35 +294,6 @@ func (c *Cluster) deletePersistentVolumeClaims() error { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) resizeVolumeClaims(newVolume acidv1.Volume) error { |  | ||||||
| 	c.logger.Debugln("resizing PVCs") |  | ||||||
| 	pvcs, err := c.listPersistentVolumeClaims() |  | ||||||
| 	if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 	newQuantity, err := resource.ParseQuantity(newVolume.Size) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return fmt.Errorf("could not parse volume size: %v", err) |  | ||||||
| 	} |  | ||||||
| 	newSize := quantityToGigabyte(newQuantity) |  | ||||||
| 	for _, pvc := range pvcs { |  | ||||||
| 		volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) |  | ||||||
| 		if volumeSize >= newSize { |  | ||||||
| 			if volumeSize > newSize { |  | ||||||
| 				c.logger.Warningf("cannot shrink persistent volume") |  | ||||||
| 			} |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
| 		pvc.Spec.Resources.Requests[v1.ResourceStorage] = newQuantity |  | ||||||
| 		c.logger.Debugf("updating persistent volume claim definition for volume %q", pvc.Name) |  | ||||||
| 		if _, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil { |  | ||||||
| 			return fmt.Errorf("could not update persistent volume claim: %q", err) |  | ||||||
| 		} |  | ||||||
| 		c.logger.Debugf("successfully updated persistent volume claim %q", pvc.Name) |  | ||||||
| 	} |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { | func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { | ||||||
| 	result := make([]*v1.PersistentVolume, 0) | 	result := make([]*v1.PersistentVolume, 0) | ||||||
| 
 | 
 | ||||||
|  | @ -406,25 +410,6 @@ func (c *Cluster) resizeVolumes() error { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) volumeClaimsNeedResizing(newVolume acidv1.Volume) (bool, error) { |  | ||||||
| 	newSize, err := resource.ParseQuantity(newVolume.Size) |  | ||||||
| 	manifestSize := quantityToGigabyte(newSize) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return false, fmt.Errorf("could not parse volume size from the manifest: %v", err) |  | ||||||
| 	} |  | ||||||
| 	pvcs, err := c.listPersistentVolumeClaims() |  | ||||||
| 	if err != nil { |  | ||||||
| 		return false, fmt.Errorf("could not receive persistent volume claims: %v", err) |  | ||||||
| 	} |  | ||||||
| 	for _, pvc := range pvcs { |  | ||||||
| 		currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) |  | ||||||
| 		if currentSize != manifestSize { |  | ||||||
| 			return true, nil |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 	return false, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) volumesNeedResizing() (bool, error) { | func (c *Cluster) volumesNeedResizing() (bool, error) { | ||||||
| 	newQuantity, _ := resource.ParseQuantity(c.Spec.Volume.Size) | 	newQuantity, _ := resource.ParseQuantity(c.Spec.Volume.Size) | ||||||
| 	newSize := quantityToGigabyte(newQuantity) | 	newSize := quantityToGigabyte(newQuantity) | ||||||
|  |  | ||||||
|  | @ -74,6 +74,7 @@ func TestResizeVolumeClaim(t *testing.T) { | ||||||
| 	cluster.Name = clusterName | 	cluster.Name = clusterName | ||||||
| 	cluster.Namespace = namespace | 	cluster.Namespace = namespace | ||||||
| 	filterLabels := cluster.labelsSet(false) | 	filterLabels := cluster.labelsSet(false) | ||||||
|  | 	cluster.Spec.Volume.Size = newVolumeSize | ||||||
| 
 | 
 | ||||||
| 	// define and create PVCs for 1Gi volumes
 | 	// define and create PVCs for 1Gi volumes
 | ||||||
| 	pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi") | 	pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi") | ||||||
|  | @ -85,7 +86,7 @@ func TestResizeVolumeClaim(t *testing.T) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// test resizing
 | 	// test resizing
 | ||||||
| 	cluster.resizeVolumeClaims(acidv1.Volume{Size: newVolumeSize}) | 	cluster.syncVolumes() | ||||||
| 
 | 
 | ||||||
| 	pvcs, err := cluster.listPersistentVolumeClaims() | 	pvcs, err := cluster.listPersistentVolumeClaims() | ||||||
| 	assert.NoError(t, err) | 	assert.NoError(t, err) | ||||||
|  |  | ||||||
|  | @ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | ||||||
| 	result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix | 	result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix | ||||||
| 	result.EtcdHost = fromCRD.EtcdHost | 	result.EtcdHost = fromCRD.EtcdHost | ||||||
| 	result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps | 	result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps | ||||||
| 	result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-16:3.2-p3") | 	result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-16:3.3-p1") | ||||||
| 	result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) | 	result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) | ||||||
| 	result.MinInstances = fromCRD.MinInstances | 	result.MinInstances = fromCRD.MinInstances | ||||||
| 	result.MaxInstances = fromCRD.MaxInstances | 	result.MaxInstances = fromCRD.MaxInstances | ||||||
|  | @ -60,12 +60,13 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | ||||||
| 	result.PasswordRotationUserRetention = util.CoalesceUInt32(fromCRD.PostgresUsersConfiguration.DeepCopy().PasswordRotationUserRetention, 180) | 	result.PasswordRotationUserRetention = util.CoalesceUInt32(fromCRD.PostgresUsersConfiguration.DeepCopy().PasswordRotationUserRetention, 180) | ||||||
| 
 | 
 | ||||||
| 	// major version upgrade config
 | 	// major version upgrade config
 | ||||||
| 	result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "off") | 	result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual") | ||||||
| 	result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList | 	result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList | ||||||
| 	result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "12") | 	result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "12") | ||||||
| 	result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "16") | 	result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "16") | ||||||
| 
 | 
 | ||||||
| 	// kubernetes config
 | 	// kubernetes config
 | ||||||
|  | 	result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False()) | ||||||
| 	result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations | 	result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations | ||||||
| 	result.PodServiceAccountName = util.Coalesce(fromCRD.Kubernetes.PodServiceAccountName, "postgres-pod") | 	result.PodServiceAccountName = util.Coalesce(fromCRD.Kubernetes.PodServiceAccountName, "postgres-pod") | ||||||
| 	result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition | 	result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition | ||||||
|  | @ -173,13 +174,13 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur | ||||||
| 	result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials | 	result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials | ||||||
| 	result.WALAZStorageAccount = fromCRD.AWSGCP.WALAZStorageAccount | 	result.WALAZStorageAccount = fromCRD.AWSGCP.WALAZStorageAccount | ||||||
| 	result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount | 	result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount | ||||||
| 	result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials") | 	result.AdditionalSecretMountPath = fromCRD.AWSGCP.AdditionalSecretMountPath | ||||||
| 	result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration | 	result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration | ||||||
| 	result.EnableEBSGp3MigrationMaxSize = util.CoalesceInt64(fromCRD.AWSGCP.EnableEBSGp3MigrationMaxSize, 1000) | 	result.EnableEBSGp3MigrationMaxSize = util.CoalesceInt64(fromCRD.AWSGCP.EnableEBSGp3MigrationMaxSize, 1000) | ||||||
| 
 | 
 | ||||||
| 	// logical backup config
 | 	// logical backup config
 | ||||||
| 	result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") | 	result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") | ||||||
| 	result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2") | 	result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0") | ||||||
| 	result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") | 	result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") | ||||||
| 	result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName | 	result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName | ||||||
| 	result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey | 	result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey | ||||||
|  |  | ||||||
|  | @ -384,10 +384,6 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.Postg | ||||||
| 		c.logger.Warningf("parameter %q is deprecated. Consider setting %q instead", deprecated, replacement) | 		c.logger.Warningf("parameter %q is deprecated. Consider setting %q instead", deprecated, replacement) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	noeffect := func(param string, explanation string) { |  | ||||||
| 		c.logger.Warningf("parameter %q takes no effect. %s", param, explanation) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if spec.UseLoadBalancer != nil { | 	if spec.UseLoadBalancer != nil { | ||||||
| 		deprecate("useLoadBalancer", "enableMasterLoadBalancer") | 		deprecate("useLoadBalancer", "enableMasterLoadBalancer") | ||||||
| 	} | 	} | ||||||
|  | @ -395,10 +391,6 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.Postg | ||||||
| 		deprecate("replicaLoadBalancer", "enableReplicaLoadBalancer") | 		deprecate("replicaLoadBalancer", "enableReplicaLoadBalancer") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if len(spec.MaintenanceWindows) > 0 { |  | ||||||
| 		noeffect("maintenanceWindows", "Not implemented.") |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if (spec.UseLoadBalancer != nil || spec.ReplicaLoadBalancer != nil) && | 	if (spec.UseLoadBalancer != nil || spec.ReplicaLoadBalancer != nil) && | ||||||
| 		(spec.EnableReplicaLoadBalancer != nil || spec.EnableMasterLoadBalancer != nil) { | 		(spec.EnableReplicaLoadBalancer != nil || spec.EnableMasterLoadBalancer != nil) { | ||||||
| 		c.logger.Warnf("both old and new load balancer parameters are present in the manifest, ignoring old ones") | 		c.logger.Warnf("both old and new load balancer parameters are present in the manifest, ignoring old ones") | ||||||
|  | @ -454,8 +446,10 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. | ||||||
| 		clusterError = informerNewSpec.Error | 		clusterError = informerNewSpec.Error | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// only allow deletion if delete annotations are set and conditions are met
 |  | ||||||
| 	if eventType == EventDelete { | 	if eventType == EventDelete { | ||||||
|  | 		// when owner references are used operator cannot block deletion
 | ||||||
|  | 		if c.opConfig.EnableOwnerReferences == nil || !*c.opConfig.EnableOwnerReferences { | ||||||
|  | 			// only allow deletion if delete annotations are set and conditions are met
 | ||||||
| 			if err := c.meetsClusterDeleteAnnotations(informerOldSpec); err != nil { | 			if err := c.meetsClusterDeleteAnnotations(informerOldSpec); err != nil { | ||||||
| 				c.logger.WithField("cluster-name", clusterName).Warnf( | 				c.logger.WithField("cluster-name", clusterName).Warnf( | ||||||
| 					"ignoring %q event for cluster %q - manifest does not fulfill delete requirements: %s", eventType, clusterName, err) | 					"ignoring %q event for cluster %q - manifest does not fulfill delete requirements: %s", eventType, clusterName, err) | ||||||
|  | @ -469,6 +463,7 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. | ||||||
| 				return | 				return | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	if clusterError != "" && eventType != EventDelete { | 	if clusterError != "" && eventType != EventDelete { | ||||||
| 		c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) | 		c.logger.WithField("cluster-name", clusterName).Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) | ||||||
|  |  | ||||||
|  | @ -80,7 +80,7 @@ func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefini | ||||||
| 		c.logger.Infof("customResourceDefinition %q has been registered", crd.Name) | 		c.logger.Infof("customResourceDefinition %q has been registered", crd.Name) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return wait.Poll(c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, func() (bool, error) { | 	return wait.PollUntilContextTimeout(context.TODO(), c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, false, func(ctx context.Context) (bool, error) { | ||||||
| 		c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), desiredCrd.Name, metav1.GetOptions{}) | 		c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), desiredCrd.Name, metav1.GetOptions{}) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return false, err | 			return false, err | ||||||
|  |  | ||||||
|  | @ -25,6 +25,7 @@ type CRD struct { | ||||||
| 
 | 
 | ||||||
| // Resources describes kubernetes resource specific configuration parameters
 | // Resources describes kubernetes resource specific configuration parameters
 | ||||||
| type Resources struct { | type Resources struct { | ||||||
|  | 	EnableOwnerReferences         *bool               `name:"enable_owner_references" default:"false"` | ||||||
| 	ResourceCheckInterval         time.Duration       `name:"resource_check_interval" default:"3s"` | 	ResourceCheckInterval         time.Duration       `name:"resource_check_interval" default:"3s"` | ||||||
| 	ResourceCheckTimeout          time.Duration       `name:"resource_check_timeout" default:"10m"` | 	ResourceCheckTimeout          time.Duration       `name:"resource_check_timeout" default:"10m"` | ||||||
| 	PodLabelWaitTimeout           time.Duration       `name:"pod_label_wait_timeout" default:"10m"` | 	PodLabelWaitTimeout           time.Duration       `name:"pod_label_wait_timeout" default:"10m"` | ||||||
|  | @ -126,7 +127,7 @@ type Scalyr struct { | ||||||
| // LogicalBackup defines configuration for logical backup
 | // LogicalBackup defines configuration for logical backup
 | ||||||
| type LogicalBackup struct { | type LogicalBackup struct { | ||||||
| 	LogicalBackupSchedule                     string `name:"logical_backup_schedule" default:"30 00 * * *"` | 	LogicalBackupSchedule                     string `name:"logical_backup_schedule" default:"30 00 * * *"` | ||||||
| 	LogicalBackupDockerImage                  string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.12.2"` | 	LogicalBackupDockerImage                  string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"` | ||||||
| 	LogicalBackupProvider                     string `name:"logical_backup_provider" default:"s3"` | 	LogicalBackupProvider                     string `name:"logical_backup_provider" default:"s3"` | ||||||
| 	LogicalBackupAzureStorageAccountName      string `name:"logical_backup_azure_storage_account_name" default:""` | 	LogicalBackupAzureStorageAccountName      string `name:"logical_backup_azure_storage_account_name" default:""` | ||||||
| 	LogicalBackupAzureStorageContainer        string `name:"logical_backup_azure_storage_container" default:""` | 	LogicalBackupAzureStorageContainer        string `name:"logical_backup_azure_storage_container" default:""` | ||||||
|  | @ -174,7 +175,7 @@ type Config struct { | ||||||
| 	WatchedNamespace        string            `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
 | 	WatchedNamespace        string            `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
 | ||||||
| 	KubernetesUseConfigMaps bool              `name:"kubernetes_use_configmaps" default:"false"` | 	KubernetesUseConfigMaps bool              `name:"kubernetes_use_configmaps" default:"false"` | ||||||
| 	EtcdHost                string            `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
 | 	EtcdHost                string            `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
 | ||||||
| 	DockerImage             string            `name:"docker_image" default:"ghcr.io/zalando/spilo-16:3.2-p3"` | 	DockerImage             string            `name:"docker_image" default:"ghcr.io/zalando/spilo-16:3.3-p1"` | ||||||
| 	SidecarImages           map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
 | 	SidecarImages           map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
 | ||||||
| 	SidecarContainers       []v1.Container    `name:"sidecars"` | 	SidecarContainers       []v1.Container    `name:"sidecars"` | ||||||
| 	PodServiceAccountName   string            `name:"pod_service_account_name" default:"postgres-pod"` | 	PodServiceAccountName   string            `name:"pod_service_account_name" default:"postgres-pod"` | ||||||
|  | @ -191,7 +192,7 @@ type Config struct { | ||||||
| 	GCPCredentials                           string            `name:"gcp_credentials"` | 	GCPCredentials                           string            `name:"gcp_credentials"` | ||||||
| 	WALAZStorageAccount                      string            `name:"wal_az_storage_account"` | 	WALAZStorageAccount                      string            `name:"wal_az_storage_account"` | ||||||
| 	AdditionalSecretMount                    string            `name:"additional_secret_mount"` | 	AdditionalSecretMount                    string            `name:"additional_secret_mount"` | ||||||
| 	AdditionalSecretMountPath                string            `name:"additional_secret_mount_path" default:"/meta/credentials"` | 	AdditionalSecretMountPath                string            `name:"additional_secret_mount_path"` | ||||||
| 	EnableEBSGp3Migration                    bool              `name:"enable_ebs_gp3_migration" default:"false"` | 	EnableEBSGp3Migration                    bool              `name:"enable_ebs_gp3_migration" default:"false"` | ||||||
| 	EnableEBSGp3MigrationMaxSize             int64             `name:"enable_ebs_gp3_migration_max_size" default:"1000"` | 	EnableEBSGp3MigrationMaxSize             int64             `name:"enable_ebs_gp3_migration_max_size" default:"1000"` | ||||||
| 	DebugLogging                             bool              `name:"debug_logging" default:"true"` | 	DebugLogging                             bool              `name:"debug_logging" default:"true"` | ||||||
|  | @ -243,7 +244,7 @@ type Config struct { | ||||||
| 	EnablePgVersionEnvVar                    bool              `name:"enable_pgversion_env_var" default:"true"` | 	EnablePgVersionEnvVar                    bool              `name:"enable_pgversion_env_var" default:"true"` | ||||||
| 	EnableSpiloWalPathCompat                 bool              `name:"enable_spilo_wal_path_compat" default:"false"` | 	EnableSpiloWalPathCompat                 bool              `name:"enable_spilo_wal_path_compat" default:"false"` | ||||||
| 	EnableTeamIdClusternamePrefix            bool              `name:"enable_team_id_clustername_prefix" default:"false"` | 	EnableTeamIdClusternamePrefix            bool              `name:"enable_team_id_clustername_prefix" default:"false"` | ||||||
| 	MajorVersionUpgradeMode                  string            `name:"major_version_upgrade_mode" default:"off"` | 	MajorVersionUpgradeMode                  string            `name:"major_version_upgrade_mode" default:"manual"` | ||||||
| 	MajorVersionUpgradeTeamAllowList         []string          `name:"major_version_upgrade_team_allow_list" default:""` | 	MajorVersionUpgradeTeamAllowList         []string          `name:"major_version_upgrade_team_allow_list" default:""` | ||||||
| 	MinimalMajorVersion                      string            `name:"minimal_major_version" default:"12"` | 	MinimalMajorVersion                      string            `name:"minimal_major_version" default:"12"` | ||||||
| 	TargetMajorVersion                       string            `name:"target_major_version" default:"16"` | 	TargetMajorVersion                       string            `name:"target_major_version" default:"16"` | ||||||
|  |  | ||||||
|  | @ -3,7 +3,6 @@ package k8sutil | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"reflect" |  | ||||||
| 
 | 
 | ||||||
| 	b64 "encoding/base64" | 	b64 "encoding/base64" | ||||||
| 	"encoding/json" | 	"encoding/json" | ||||||
|  | @ -17,9 +16,9 @@ import ( | ||||||
| 	"github.com/zalando/postgres-operator/pkg/spec" | 	"github.com/zalando/postgres-operator/pkg/spec" | ||||||
| 	apiappsv1 "k8s.io/api/apps/v1" | 	apiappsv1 "k8s.io/api/apps/v1" | ||||||
| 	v1 "k8s.io/api/core/v1" | 	v1 "k8s.io/api/core/v1" | ||||||
| 	apipolicyv1 "k8s.io/api/policy/v1" | 	apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" | ||||||
| 	apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" | 	apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" | ||||||
| 	apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" | 	apiextv1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" | ||||||
| 	apierrors "k8s.io/apimachinery/pkg/api/errors" | 	apierrors "k8s.io/apimachinery/pkg/api/errors" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 	"k8s.io/apimachinery/pkg/types" | 	"k8s.io/apimachinery/pkg/types" | ||||||
|  | @ -61,7 +60,7 @@ type KubernetesClient struct { | ||||||
| 	appsv1.DeploymentsGetter | 	appsv1.DeploymentsGetter | ||||||
| 	rbacv1.RoleBindingsGetter | 	rbacv1.RoleBindingsGetter | ||||||
| 	policyv1.PodDisruptionBudgetsGetter | 	policyv1.PodDisruptionBudgetsGetter | ||||||
| 	apiextv1.CustomResourceDefinitionsGetter | 	apiextv1client.CustomResourceDefinitionsGetter | ||||||
| 	clientbatchv1.CronJobsGetter | 	clientbatchv1.CronJobsGetter | ||||||
| 	acidv1.OperatorConfigurationsGetter | 	acidv1.OperatorConfigurationsGetter | ||||||
| 	acidv1.PostgresTeamsGetter | 	acidv1.PostgresTeamsGetter | ||||||
|  | @ -73,6 +72,13 @@ type KubernetesClient struct { | ||||||
| 	Zalandov1ClientSet *zalandoclient.Clientset | 	Zalandov1ClientSet *zalandoclient.Clientset | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | type mockCustomResourceDefinition struct { | ||||||
|  | 	apiextv1client.CustomResourceDefinitionInterface | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type MockCustomResourceDefinitionsGetter struct { | ||||||
|  | } | ||||||
|  | 
 | ||||||
| type mockSecret struct { | type mockSecret struct { | ||||||
| 	corev1.SecretInterface | 	corev1.SecretInterface | ||||||
| } | } | ||||||
|  | @ -242,15 +248,16 @@ func (client *KubernetesClient) SetFinalizer(clusterName spec.NamespacedName, pg | ||||||
| 	return updatedPg, nil | 	return updatedPg, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // SamePDB compares the PodDisruptionBudgets
 | func (c *mockCustomResourceDefinition) Get(ctx context.Context, name string, options metav1.GetOptions) (*apiextv1.CustomResourceDefinition, error) { | ||||||
| func SamePDB(cur, new *apipolicyv1.PodDisruptionBudget) (match bool, reason string) { | 	return &apiextv1.CustomResourceDefinition{}, nil | ||||||
| 	//TODO: improve comparison
 |  | ||||||
| 	match = reflect.DeepEqual(new.Spec, cur.Spec) |  | ||||||
| 	if !match { |  | ||||||
| 		reason = "new PDB spec does not match the current one" |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 	return | func (c *mockCustomResourceDefinition) Create(ctx context.Context, crd *apiextv1.CustomResourceDefinition, options metav1.CreateOptions) (*apiextv1.CustomResourceDefinition, error) { | ||||||
|  | 	return &apiextv1.CustomResourceDefinition{}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (mock *MockCustomResourceDefinitionsGetter) CustomResourceDefinitions() apiextv1client.CustomResourceDefinitionInterface { | ||||||
|  | 	return &mockCustomResourceDefinition{} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { | func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { | ||||||
|  | @ -457,6 +464,8 @@ func NewMockKubernetesClient() KubernetesClient { | ||||||
| 		ConfigMapsGetter:  &MockConfigMapsGetter{}, | 		ConfigMapsGetter:  &MockConfigMapsGetter{}, | ||||||
| 		DeploymentsGetter: &MockDeploymentGetter{}, | 		DeploymentsGetter: &MockDeploymentGetter{}, | ||||||
| 		ServicesGetter:    &MockServiceGetter{}, | 		ServicesGetter:    &MockServiceGetter{}, | ||||||
|  | 
 | ||||||
|  | 		CustomResourceDefinitionsGetter: &MockCustomResourceDefinitionsGetter{}, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -35,7 +35,7 @@ const ( | ||||||
| var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") | var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") | ||||||
| 
 | 
 | ||||||
| func init() { | func init() { | ||||||
| 	rand.Seed(time.Now().Unix()) | 	rand.New(rand.NewSource(time.Now().Unix())) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // helper function to get bool pointers
 | // helper function to get bool pointers
 | ||||||
|  |  | ||||||
|  | @ -1,6 +1,6 @@ | ||||||
| { | { | ||||||
|   "name": "postgres-operator-ui", |   "name": "postgres-operator-ui", | ||||||
|   "version": "1.12.2", |   "version": "1.13.0", | ||||||
|   "description": "PostgreSQL Operator UI", |   "description": "PostgreSQL Operator UI", | ||||||
|   "main": "src/app.js", |   "main": "src/app.js", | ||||||
|   "config": { |   "config": { | ||||||
|  |  | ||||||
|  | @ -142,6 +142,7 @@ edit | ||||||
|         o.spec.enableReplicaConnectionPooler = i.spec.enableReplicaConnectionPooler || false |         o.spec.enableReplicaConnectionPooler = i.spec.enableReplicaConnectionPooler || false | ||||||
|         o.spec.enableMasterPoolerLoadBalancer = i.spec.enableMasterPoolerLoadBalancer || false |         o.spec.enableMasterPoolerLoadBalancer = i.spec.enableMasterPoolerLoadBalancer || false | ||||||
|         o.spec.enableReplicaPoolerLoadBalancer = i.spec.enableReplicaPoolerLoadBalancer || false |         o.spec.enableReplicaPoolerLoadBalancer = i.spec.enableReplicaPoolerLoadBalancer || false | ||||||
|  |         o.spec.maintenanceWindows = i.spec.maintenanceWindows || [] | ||||||
| 
 | 
 | ||||||
|         o.spec.volume = { |         o.spec.volume = { | ||||||
|           size: i.spec.volume.size, |           size: i.spec.volume.size, | ||||||
|  |  | ||||||
|  | @ -594,6 +594,12 @@ new | ||||||
|         {{#if enableReplicaPoolerLoadBalancer}} |         {{#if enableReplicaPoolerLoadBalancer}} | ||||||
|         enableReplicaPoolerLoadBalancer: true |         enableReplicaPoolerLoadBalancer: true | ||||||
|         {{/if}} |         {{/if}} | ||||||
|  |         {{#if maintenanceWindows}} | ||||||
|  |         maintenanceWindows: | ||||||
|  |         {{#each maintenanceWindows}} | ||||||
|  |           - "{{ this }}" | ||||||
|  |         {{/each}} | ||||||
|  |         {{/if}} | ||||||
|         volume: |         volume: | ||||||
|           size: "{{ volumeSize }}Gi"{{#if volumeStorageClass}} |           size: "{{ volumeSize }}Gi"{{#if volumeStorageClass}} | ||||||
|           storageClass: "{{ volumeStorageClass }}"{{/if}}{{#if iops}} |           storageClass: "{{ volumeStorageClass }}"{{/if}}{{#if iops}} | ||||||
|  | @ -651,6 +657,7 @@ new | ||||||
|         enableReplicaConnectionPooler: this.enableReplicaConnectionPooler, |         enableReplicaConnectionPooler: this.enableReplicaConnectionPooler, | ||||||
|         enableMasterPoolerLoadBalancer: this.enableMasterPoolerLoadBalancer, |         enableMasterPoolerLoadBalancer: this.enableMasterPoolerLoadBalancer, | ||||||
|         enableReplicaPoolerLoadBalancer: this.enableReplicaPoolerLoadBalancer, |         enableReplicaPoolerLoadBalancer: this.enableReplicaPoolerLoadBalancer, | ||||||
|  |         maintenanceWindows: this.maintenanceWindows, | ||||||
|         volumeSize: this.volumeSize, |         volumeSize: this.volumeSize, | ||||||
|         volumeStorageClass: this.volumeStorageClass, |         volumeStorageClass: this.volumeStorageClass, | ||||||
|         iops: this.iops, |         iops: this.iops, | ||||||
|  | @ -727,6 +734,10 @@ new | ||||||
|       this.enableReplicaPoolerLoadBalancer = !this.enableReplicaPoolerLoadBalancer |       this.enableReplicaPoolerLoadBalancer = !this.enableReplicaPoolerLoadBalancer | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     this.maintenanceWindows = e => { | ||||||
|  |       this.maintenanceWindows = e.target.value | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     this.volumeChange = e => { |     this.volumeChange = e => { | ||||||
|       this.volumeSize = +e.target.value |       this.volumeSize = +e.target.value | ||||||
|     } |     } | ||||||
|  | @ -1042,6 +1053,7 @@ new | ||||||
|       this.enableReplicaConnectionPooler = false |       this.enableReplicaConnectionPooler = false | ||||||
|       this.enableMasterPoolerLoadBalancer = false |       this.enableMasterPoolerLoadBalancer = false | ||||||
|       this.enableReplicaPoolerLoadBalancer = false |       this.enableReplicaPoolerLoadBalancer = false | ||||||
|  |       this.maintenanceWindows = {} | ||||||
| 
 | 
 | ||||||
|       this.postgresqlVersion = this.postgresqlVersion = ( |       this.postgresqlVersion = this.postgresqlVersion = ( | ||||||
|         this.config.postgresql_versions[0] |         this.config.postgresql_versions[0] | ||||||
|  |  | ||||||
|  | @ -18,7 +18,7 @@ spec: | ||||||
|       serviceAccountName: postgres-operator-ui |       serviceAccountName: postgres-operator-ui | ||||||
|       containers: |       containers: | ||||||
|         - name: "service" |         - name: "service" | ||||||
|           image: ghcr.io/zalando/postgres-operator-ui:v1.12.2 |           image: ghcr.io/zalando/postgres-operator-ui:v1.13.0 | ||||||
|           ports: |           ports: | ||||||
|             - containerPort: 8081 |             - containerPort: 8081 | ||||||
|               protocol: "TCP" |               protocol: "TCP" | ||||||
|  |  | ||||||
|  | @ -465,6 +465,7 @@ def get_postgresqls(): | ||||||
|             'status': status, |             'status': status, | ||||||
|             'num_elb': spec.get('enableMasterLoadBalancer', 0) + spec.get('enableReplicaLoadBalancer', 0) + \ |             'num_elb': spec.get('enableMasterLoadBalancer', 0) + spec.get('enableReplicaLoadBalancer', 0) + \ | ||||||
|                        spec.get('enableMasterPoolerLoadBalancer', 0) + spec.get('enableReplicaPoolerLoadBalancer', 0), |                        spec.get('enableMasterPoolerLoadBalancer', 0) + spec.get('enableReplicaPoolerLoadBalancer', 0), | ||||||
|  |             'maintenance_windows': spec.get('maintenanceWindows', []), | ||||||
|         } |         } | ||||||
|         for cluster in these( |         for cluster in these( | ||||||
|             read_postgresqls( |             read_postgresqls( | ||||||
|  | @ -566,6 +567,11 @@ def update_postgresql(namespace: str, cluster: str): | ||||||
|             return fail('allowedSourceRanges invalid') |             return fail('allowedSourceRanges invalid') | ||||||
|         spec['allowedSourceRanges'] = postgresql['spec']['allowedSourceRanges'] |         spec['allowedSourceRanges'] = postgresql['spec']['allowedSourceRanges'] | ||||||
| 
 | 
 | ||||||
|  |     if 'maintenanceWindows' in postgresql['spec']: | ||||||
|  |         if not isinstance(postgresql['spec']['maintenanceWindows'], list): | ||||||
|  |             return fail('maintenanceWindows invalid') | ||||||
|  |         spec['maintenanceWindows'] = postgresql['spec']['maintenanceWindows'] | ||||||
|  | 
 | ||||||
|     if 'numberOfInstances' in postgresql['spec']: |     if 'numberOfInstances' in postgresql['spec']: | ||||||
|         if not isinstance(postgresql['spec']['numberOfInstances'], int): |         if not isinstance(postgresql['spec']['numberOfInstances'], int): | ||||||
|             return fail('numberOfInstances invalid') |             return fail('numberOfInstances invalid') | ||||||
|  |  | ||||||
|  | @ -305,7 +305,7 @@ def read_versions( | ||||||
|         if uid == 'wal' or defaulting(lambda: UUID(uid)) |         if uid == 'wal' or defaulting(lambda: UUID(uid)) | ||||||
|     ] |     ] | ||||||
| 
 | 
 | ||||||
| BACKUP_VERSION_PREFIXES = ['', '9.6/', '10/', '11/', '12/', '13/', '14/', '15/', '16/'] | BACKUP_VERSION_PREFIXES = ['', '10/', '11/', '12/', '13/', '14/', '15/', '16/'] | ||||||
| 
 | 
 | ||||||
| def read_basebackups( | def read_basebackups( | ||||||
|     pg_cluster, |     pg_cluster, | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue