Full AWS gp3 support for iops and througput config. (#1261)
Support new AWS EBS volume type `gp3` with `iops` and `throughput` in the manifest. Co-authored-by: Felix Kunde <felix-kunde@gmx.de>
This commit is contained in:
		
							parent
							
								
									4ea0b5f432
								
							
						
					
					
						commit
						4a88f00a3f
					
				|  | @ -557,6 +557,8 @@ spec: | ||||||
|                 required: |                 required: | ||||||
|                   - size |                   - size | ||||||
|                 properties: |                 properties: | ||||||
|  |                   iops: | ||||||
|  |                     type: integer | ||||||
|                   size: |                   size: | ||||||
|                     type: string |                     type: string | ||||||
|                     pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' |                     pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' | ||||||
|  | @ -565,6 +567,8 @@ spec: | ||||||
|                     type: string |                     type: string | ||||||
|                   subPath: |                   subPath: | ||||||
|                     type: string |                     type: string | ||||||
|  |                   throughput: | ||||||
|  |                     type: integer | ||||||
|           status: |           status: | ||||||
|             type: object |             type: object | ||||||
|             additionalProperties: |             additionalProperties: | ||||||
|  |  | ||||||
|  | @ -235,6 +235,24 @@ Then you can for example check the Patroni logs: | ||||||
| kubectl logs acid-minimal-cluster-0 | kubectl logs acid-minimal-cluster-0 | ||||||
| ``` | ``` | ||||||
| 
 | 
 | ||||||
|  | ## Unit tests with Mocks and K8s Fake API | ||||||
|  | 
 | ||||||
|  | Whenever possible you should rely on leveraging proper mocks and K8s fake client that allows full fledged testing of K8s objects in your unit tests. | ||||||
|  | 
 | ||||||
|  | To enable mocks, a code annotation is needed: | ||||||
|  | [Mock code gen annotation](https://github.com/zalando/postgres-operator/blob/master/pkg/util/volumes/volumes.go#L3) | ||||||
|  | 
 | ||||||
|  | To generate mocks run: | ||||||
|  | ```bash | ||||||
|  | make mocks | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | Examples for mocks can be found in: | ||||||
|  | [Example mock usage](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/volumes_test.go#L248) | ||||||
|  | 
 | ||||||
|  | Examples for fake K8s objects can be found in: | ||||||
|  | [Example fake K8s client usage](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/volumes_test.go#L166) | ||||||
|  | 
 | ||||||
| ## End-to-end tests | ## End-to-end tests | ||||||
| 
 | 
 | ||||||
| The operator provides reference end-to-end (e2e) tests to | The operator provides reference end-to-end (e2e) tests to | ||||||
|  |  | ||||||
|  | @ -338,13 +338,13 @@ archive is supported. | ||||||
|   the url to S3 bucket containing the WAL archive of the remote primary. |   the url to S3 bucket containing the WAL archive of the remote primary. | ||||||
|   Required when the `standby` section is present. |   Required when the `standby` section is present. | ||||||
| 
 | 
 | ||||||
| ## EBS volume resizing | ## Volume properties | ||||||
| 
 | 
 | ||||||
| Those parameters are grouped under the `volume` top-level key and define the | Those parameters are grouped under the `volume` top-level key and define the | ||||||
| properties of the persistent storage that stores Postgres data. | properties of the persistent storage that stores Postgres data. | ||||||
| 
 | 
 | ||||||
| * **size** | * **size** | ||||||
|   the size of the target EBS volume. Usual Kubernetes size modifiers, i.e. `Gi` |   the size of the target volume. Usual Kubernetes size modifiers, i.e. `Gi` | ||||||
|   or `Mi`, apply. Required. |   or `Mi`, apply. Required. | ||||||
| 
 | 
 | ||||||
| * **storageClass** | * **storageClass** | ||||||
|  | @ -356,6 +356,14 @@ properties of the persistent storage that stores Postgres data. | ||||||
| * **subPath** | * **subPath** | ||||||
|   Subpath to use when mounting volume into Spilo container. Optional. |   Subpath to use when mounting volume into Spilo container. Optional. | ||||||
| 
 | 
 | ||||||
|  | * **iops** | ||||||
|  |   When running the operator on AWS the latest generation of EBS volumes (`gp3`) | ||||||
|  |   allows for configuring the number of IOPS. Maximum is 16000. Optional. | ||||||
|  | 
 | ||||||
|  | * **throughput** | ||||||
|  |   When running the operator on AWS the latest generation of EBS volumes (`gp3`) | ||||||
|  |   allows for configuring the throughput in MB/s. Maximum is 1000. Optional. | ||||||
|  | 
 | ||||||
| ## Sidecar definitions | ## Sidecar definitions | ||||||
| 
 | 
 | ||||||
| Those parameters are defined under the `sidecars` key. They consist of a list | Those parameters are defined under the `sidecars` key. They consist of a list | ||||||
|  |  | ||||||
|  | @ -373,10 +373,13 @@ configuration they are grouped under the `kubernetes` key. | ||||||
|   possible value is `parallel`. |   possible value is `parallel`. | ||||||
| 
 | 
 | ||||||
| * **storage_resize_mode** | * **storage_resize_mode** | ||||||
|   defines how operator handels the difference between requested volume size and |   defines how operator handles the difference between the requested volume size and | ||||||
|   actual size. Available options are: ebs - tries to resize EBS volume, pvc - |     the actual size. Available options are: | ||||||
|   changes PVC definition, off - disables resize of the volumes. Default is "pvc". |     1. `ebs` : operator resizes EBS volumes directly and executes `resizefs` within a pod | ||||||
|   When using OpenShift please use one of the other available options. |     2. `pvc` : operator only changes PVC definition | ||||||
|  |     3. `off` : disables resize of the volumes. | ||||||
|  |     4. `mixed` :operator  uses AWS API to adjust size, throughput, and IOPS, and calls pvc change for file system resize | ||||||
|  |     Default is "pvc". | ||||||
| 
 | 
 | ||||||
| ## Kubernetes resource requests | ## Kubernetes resource requests | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										2
									
								
								go.mod
								
								
								
								
							
							
						
						
									
										2
									
								
								go.mod
								
								
								
								
							|  | @ -3,7 +3,7 @@ module github.com/zalando/postgres-operator | ||||||
| go 1.15 | go 1.15 | ||||||
| 
 | 
 | ||||||
| require ( | require ( | ||||||
| 	github.com/aws/aws-sdk-go v1.36.3 | 	github.com/aws/aws-sdk-go v1.36.29 | ||||||
| 	github.com/golang/mock v1.4.4 | 	github.com/golang/mock v1.4.4 | ||||||
| 	github.com/lib/pq v1.9.0 | 	github.com/lib/pq v1.9.0 | ||||||
| 	github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d | 	github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								go.sum
								
								
								
								
							
							
						
						
									
										8
									
								
								go.sum
								
								
								
								
							|  | @ -35,6 +35,8 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt | ||||||
| github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= | github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= | ||||||
| github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= | github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= | ||||||
| github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= | github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= | ||||||
|  | github.com/Storytel/gomock-matchers v1.2.0 h1:VPsbL6c/9/eCa4rH13LOEXPsIsnA1z+INamGIx1lWQo= | ||||||
|  | github.com/Storytel/gomock-matchers v1.2.0/go.mod h1:7HEuwyU/eq/W3mrSqPSYETGXiTyU2um0Rrb+dh5KmKM= | ||||||
| github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= | github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= | ||||||
| github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= | github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= | ||||||
| github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= | ||||||
|  | @ -45,8 +47,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 | ||||||
| github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= | github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= | ||||||
| github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= | github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= | ||||||
| github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= | github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= | ||||||
| github.com/aws/aws-sdk-go v1.36.3 h1:KYpG5OegwW3xgOsMxy01nj/Td281yxi1Ha2lJQJs4tI= | github.com/aws/aws-sdk-go v1.36.29 h1:lM1G3AF1+7vzFm0n7hfH8r2+750BTo+6Lo6FtPB7kzk= | ||||||
| github.com/aws/aws-sdk-go v1.36.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= | github.com/aws/aws-sdk-go v1.36.29/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= | ||||||
| github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= | ||||||
| github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= | github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= | ||||||
| github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= | ||||||
|  | @ -182,6 +184,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF | ||||||
| github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= | github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= | ||||||
| github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= | ||||||
| github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= | github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= | ||||||
|  | github.com/golang/mock v1.2.1-0.20190311213431-837231f7bb37/go.mod h1:L3bP22mxdfCUHSUVMs+SPJMx55FrxQew7MSXT11Q86g= | ||||||
| github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= | github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= | ||||||
| github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= | github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= | ||||||
| github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= | github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= | ||||||
|  | @ -525,6 +528,7 @@ golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGm | ||||||
| golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||||
| golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||||
| golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||||
|  | golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= | ||||||
| golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= | ||||||
| golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | ||||||
| golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= | ||||||
|  |  | ||||||
|  | @ -44,6 +44,8 @@ spec: | ||||||
|   volume: |   volume: | ||||||
|     size: 1Gi |     size: 1Gi | ||||||
| #    storageClass: my-sc | #    storageClass: my-sc | ||||||
|  | #    iops: 1000  # for EBS gp3 | ||||||
|  |  #   throughput: 250  # in MB/s for EBS gp3 | ||||||
|   additionalVolumes: |   additionalVolumes: | ||||||
|     - name: empty |     - name: empty | ||||||
|       mountPath: /opt/empty |       mountPath: /opt/empty | ||||||
|  |  | ||||||
|  | @ -553,6 +553,8 @@ spec: | ||||||
|                 required: |                 required: | ||||||
|                   - size |                   - size | ||||||
|                 properties: |                 properties: | ||||||
|  |                   iops: | ||||||
|  |                     type: integer | ||||||
|                   size: |                   size: | ||||||
|                     type: string |                     type: string | ||||||
|                     pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' |                     pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' | ||||||
|  | @ -561,6 +563,8 @@ spec: | ||||||
|                     type: string |                     type: string | ||||||
|                   subPath: |                   subPath: | ||||||
|                     type: string |                     type: string | ||||||
|  |                   throughput: | ||||||
|  |                     type: integer | ||||||
|           status: |           status: | ||||||
|             type: object |             type: object | ||||||
|             additionalProperties: |             additionalProperties: | ||||||
|  |  | ||||||
|  | @ -835,6 +835,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ | ||||||
| 						Type:     "object", | 						Type:     "object", | ||||||
| 						Required: []string{"size"}, | 						Required: []string{"size"}, | ||||||
| 						Properties: map[string]apiextv1.JSONSchemaProps{ | 						Properties: map[string]apiextv1.JSONSchemaProps{ | ||||||
|  | 							"iops": { | ||||||
|  | 								Type: "integer", | ||||||
|  | 							}, | ||||||
| 							"size": { | 							"size": { | ||||||
| 								Type:        "string", | 								Type:        "string", | ||||||
| 								Description: "Value must not be zero", | 								Description: "Value must not be zero", | ||||||
|  | @ -846,6 +849,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ | ||||||
| 							"subPath": { | 							"subPath": { | ||||||
| 								Type: "string", | 								Type: "string", | ||||||
| 							}, | 							}, | ||||||
|  | 							"throughput": { | ||||||
|  | 								Type: "integer", | ||||||
|  | 							}, | ||||||
| 						}, | 						}, | ||||||
| 					}, | 					}, | ||||||
| 				}, | 				}, | ||||||
|  |  | ||||||
|  | @ -118,6 +118,7 @@ type Volume struct { | ||||||
| 	SubPath      string `json:"subPath,omitempty"` | 	SubPath      string `json:"subPath,omitempty"` | ||||||
| 	Iops         *int64 `json:"iops,omitempty"` | 	Iops         *int64 `json:"iops,omitempty"` | ||||||
| 	Throughput   *int64 `json:"throughput,omitempty"` | 	Throughput   *int64 `json:"throughput,omitempty"` | ||||||
|  | 	VolumeType   string `json:"type,omitempty"` | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // AdditionalVolume specs additional optional volumes for statefulset
 | // AdditionalVolume specs additional optional volumes for statefulset
 | ||||||
|  |  | ||||||
|  | @ -53,8 +53,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode) |  | ||||||
| 
 |  | ||||||
| 	if c.OpConfig.EnableEBSGp3Migration { | 	if c.OpConfig.EnableEBSGp3Migration { | ||||||
| 		err = c.executeEBSMigration() | 		err = c.executeEBSMigration() | ||||||
| 		if nil != err { | 		if nil != err { | ||||||
|  | @ -62,32 +60,8 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if c.OpConfig.StorageResizeMode == "mixed" { | 	if err = c.syncVolumes(); err != nil { | ||||||
| 		// mixed op uses AWS API to adjust size,throughput,iops and calls pvc chance for file system resize
 | 		return err | ||||||
| 
 |  | ||||||
| 		// resize pvc to adjust filesystem size until better K8s support
 |  | ||||||
| 		if err = c.syncVolumeClaims(); err != nil { |  | ||||||
| 			err = fmt.Errorf("could not sync persistent volume claims: %v", err) |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 	} else if c.OpConfig.StorageResizeMode == "pvc" { |  | ||||||
| 		if err = c.syncVolumeClaims(); err != nil { |  | ||||||
| 			err = fmt.Errorf("could not sync persistent volume claims: %v", err) |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 	} else if c.OpConfig.StorageResizeMode == "ebs" { |  | ||||||
| 		// potentially enlarge volumes before changing the statefulset. By doing that
 |  | ||||||
| 		// in this order we make sure the operator is not stuck waiting for a pod that
 |  | ||||||
| 		// cannot start because it ran out of disk space.
 |  | ||||||
| 		// TODO: handle the case of the cluster that is downsized and enlarged again
 |  | ||||||
| 		// (there will be a volume from the old pod for which we can't act before the
 |  | ||||||
| 		//  the statefulset modification is concluded)
 |  | ||||||
| 		if err = c.syncVolumes(); err != nil { |  | ||||||
| 			err = fmt.Errorf("could not sync persistent volumes: %v", err) |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 	} else { |  | ||||||
| 		c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err = c.enforceMinResourceLimits(&c.Spec); err != nil { | 	if err = c.enforceMinResourceLimits(&c.Spec); err != nil { | ||||||
|  | @ -590,48 +564,6 @@ func (c *Cluster) syncRoles() (err error) { | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // syncVolumeClaims reads all persistent volume claims and checks that their size matches the one declared in the statefulset.
 |  | ||||||
| func (c *Cluster) syncVolumeClaims() error { |  | ||||||
| 	c.setProcessName("syncing volume claims") |  | ||||||
| 
 |  | ||||||
| 	act, err := c.volumeClaimsNeedResizing(c.Spec.Volume) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return fmt.Errorf("could not compare size of the volume claims: %v", err) |  | ||||||
| 	} |  | ||||||
| 	if !act { |  | ||||||
| 		c.logger.Infof("volume claims do not require changes") |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 	if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil { |  | ||||||
| 		return fmt.Errorf("could not sync volume claims: %v", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	c.logger.Infof("volume claims have been synced successfully") |  | ||||||
| 
 |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset.
 |  | ||||||
| func (c *Cluster) syncVolumes() error { |  | ||||||
| 	c.setProcessName("syncing volumes") |  | ||||||
| 
 |  | ||||||
| 	act, err := c.volumesNeedResizing(c.Spec.Volume) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return fmt.Errorf("could not compare size of the volumes: %v", err) |  | ||||||
| 	} |  | ||||||
| 	if !act { |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := c.resizeVolumes(); err != nil { |  | ||||||
| 		return fmt.Errorf("could not sync volumes: %v", err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	c.logger.Infof("volumes have been synced successfully") |  | ||||||
| 
 |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (c *Cluster) syncDatabases() error { | func (c *Cluster) syncDatabases() error { | ||||||
| 	c.setProcessName("syncing databases") | 	c.setProcessName("syncing databases") | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -10,13 +10,215 @@ import ( | ||||||
| 	"k8s.io/apimachinery/pkg/api/resource" | 	"k8s.io/apimachinery/pkg/api/resource" | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 
 | 
 | ||||||
|  | 	"github.com/aws/aws-sdk-go/aws" | ||||||
| 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/spec" | 	"github.com/zalando/postgres-operator/pkg/spec" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util" | 	"github.com/zalando/postgres-operator/pkg/util" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/constants" | 	"github.com/zalando/postgres-operator/pkg/util/constants" | ||||||
| 	"github.com/zalando/postgres-operator/pkg/util/filesystems" | 	"github.com/zalando/postgres-operator/pkg/util/filesystems" | ||||||
|  | 	"github.com/zalando/postgres-operator/pkg/util/volumes" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | func (c *Cluster) syncVolumes() error { | ||||||
|  | 	c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode) | ||||||
|  | 	var err error | ||||||
|  | 
 | ||||||
|  | 	// check quantity string once, and do not bother with it anymore anywhere else
 | ||||||
|  | 	_, err = resource.ParseQuantity(c.Spec.Volume.Size) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("could not parse volume size from the manifest: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if c.OpConfig.StorageResizeMode == "mixed" { | ||||||
|  | 		// mixed op uses AWS API to adjust size, throughput, iops, and calls pvc change for file system resize
 | ||||||
|  | 		// in case of errors we proceed to let K8s do its work, favoring disk space increase of other adjustments
 | ||||||
|  | 
 | ||||||
|  | 		err = c.populateVolumeMetaData() | ||||||
|  | 		if err != nil { | ||||||
|  | 			c.logger.Errorf("populating EBS meta data failed, skipping potential adjustements: %v", err) | ||||||
|  | 		} else { | ||||||
|  | 			err = c.syncUnderlyingEBSVolume() | ||||||
|  | 			if err != nil { | ||||||
|  | 				c.logger.Errorf("errors occured during EBS volume adjustments: %v", err) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// resize pvc to adjust filesystem size until better K8s support
 | ||||||
|  | 		if err = c.syncVolumeClaims(); err != nil { | ||||||
|  | 			err = fmt.Errorf("could not sync persistent volume claims: %v", err) | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} else if c.OpConfig.StorageResizeMode == "pvc" { | ||||||
|  | 		if err = c.syncVolumeClaims(); err != nil { | ||||||
|  | 			err = fmt.Errorf("could not sync persistent volume claims: %v", err) | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} else if c.OpConfig.StorageResizeMode == "ebs" { | ||||||
|  | 		// potentially enlarge volumes before changing the statefulset. By doing that
 | ||||||
|  | 		// in this order we make sure the operator is not stuck waiting for a pod that
 | ||||||
|  | 		// cannot start because it ran out of disk space.
 | ||||||
|  | 		// TODO: handle the case of the cluster that is downsized and enlarged again
 | ||||||
|  | 		// (there will be a volume from the old pod for which we can't act before the
 | ||||||
|  | 		//  the statefulset modification is concluded)
 | ||||||
|  | 		if err = c.syncEbsVolumes(); err != nil { | ||||||
|  | 			err = fmt.Errorf("could not sync persistent volumes: %v", err) | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} else { | ||||||
|  | 		c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) syncUnderlyingEBSVolume() error { | ||||||
|  | 	c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size") | ||||||
|  | 
 | ||||||
|  | 	var err error | ||||||
|  | 
 | ||||||
|  | 	targetValue := c.Spec.Volume | ||||||
|  | 	newSize, err := resource.ParseQuantity(targetValue.Size) | ||||||
|  | 	targetSize := quantityToGigabyte(newSize) | ||||||
|  | 
 | ||||||
|  | 	awsGp3 := aws.String("gp3") | ||||||
|  | 	awsIo2 := aws.String("io2") | ||||||
|  | 
 | ||||||
|  | 	errors := []string{} | ||||||
|  | 
 | ||||||
|  | 	for _, volume := range c.EBSVolumes { | ||||||
|  | 		var modifyIops *int64 | ||||||
|  | 		var modifyThroughput *int64 | ||||||
|  | 		var modifySize *int64 | ||||||
|  | 		var modifyType *string | ||||||
|  | 
 | ||||||
|  | 		if targetValue.Iops != nil { | ||||||
|  | 			if volume.Iops != *targetValue.Iops { | ||||||
|  | 				modifyIops = targetValue.Iops | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if targetValue.Throughput != nil { | ||||||
|  | 			if volume.Throughput != *targetValue.Throughput { | ||||||
|  | 				modifyThroughput = targetValue.Throughput | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if targetSize > volume.Size { | ||||||
|  | 			modifySize = &targetSize | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if modifyIops != nil || modifyThroughput != nil || modifySize != nil { | ||||||
|  | 			if modifyIops != nil || modifyThroughput != nil { | ||||||
|  | 				// we default to gp3 if iops and throughput are configured
 | ||||||
|  | 				modifyType = awsGp3 | ||||||
|  | 				if targetValue.VolumeType == "io2" { | ||||||
|  | 					modifyType = awsIo2 | ||||||
|  | 				} | ||||||
|  | 			} else if targetValue.VolumeType == "gp3" && volume.VolumeType != "gp3" { | ||||||
|  | 				modifyType = awsGp3 | ||||||
|  | 			} else { | ||||||
|  | 				// do not touch type
 | ||||||
|  | 				modifyType = nil | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			err = c.VolumeResizer.ModifyVolume(volume.VolumeID, modifyType, modifySize, modifyIops, modifyThroughput) | ||||||
|  | 			if err != nil { | ||||||
|  | 				errors = append(errors, fmt.Sprintf("modify volume failed: volume=%s size=%d iops=%d throughput=%d", volume.VolumeID, volume.Size, volume.Iops, volume.Throughput)) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(errors) > 0 { | ||||||
|  | 		for _, s := range errors { | ||||||
|  | 			c.logger.Warningf(s) | ||||||
|  | 		} | ||||||
|  | 		// c.logger.Errorf("failed to modify %d of %d volumes", len(c.EBSVolumes), len(errors))
 | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *Cluster) populateVolumeMetaData() error { | ||||||
|  | 	c.logger.Infof("starting reading ebs meta data") | ||||||
|  | 
 | ||||||
|  | 	pvs, err := c.listPersistentVolumes() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("could not list persistent volumes: %v", err) | ||||||
|  | 	} | ||||||
|  | 	c.logger.Debugf("found %d volumes, size of known volumes %d", len(pvs), len(c.EBSVolumes)) | ||||||
|  | 
 | ||||||
|  | 	volumeIds := []string{} | ||||||
|  | 	var volumeID string | ||||||
|  | 	for _, pv := range pvs { | ||||||
|  | 		volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) | ||||||
|  | 		if err != nil { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		volumeIds = append(volumeIds, volumeID) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	currentVolumes, err := c.VolumeResizer.DescribeVolumes(volumeIds) | ||||||
|  | 	if nil != err { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(currentVolumes) != len(c.EBSVolumes) { | ||||||
|  | 		c.logger.Debugf("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes)) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// reset map, operator is not responsible for dangling ebs volumes
 | ||||||
|  | 	c.EBSVolumes = make(map[string]volumes.VolumeProperties) | ||||||
|  | 	for _, volume := range currentVolumes { | ||||||
|  | 		c.EBSVolumes[volume.VolumeID] = volume | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // syncVolumeClaims reads all persistent volume claims and checks that their size matches the one declared in the statefulset.
 | ||||||
|  | func (c *Cluster) syncVolumeClaims() error { | ||||||
|  | 	c.setProcessName("syncing volume claims") | ||||||
|  | 
 | ||||||
|  | 	needsResizing, err := c.volumeClaimsNeedResizing(c.Spec.Volume) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("could not compare size of the volume claims: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if !needsResizing { | ||||||
|  | 		c.logger.Infof("volume claims do not require changes") | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil { | ||||||
|  | 		return fmt.Errorf("could not sync volume claims: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	c.logger.Infof("volume claims have been synced successfully") | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // syncVolumes reads all persistent volumes and checks that their size matches the one declared in the statefulset.
 | ||||||
|  | func (c *Cluster) syncEbsVolumes() error { | ||||||
|  | 	c.setProcessName("syncing EBS and Claims volumes") | ||||||
|  | 
 | ||||||
|  | 	act, err := c.volumesNeedResizing() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return fmt.Errorf("could not compare size of the volumes: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if !act { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := c.resizeVolumes(); err != nil { | ||||||
|  | 		return fmt.Errorf("could not sync volumes: %v", err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	c.logger.Infof("volumes have been synced successfully") | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, error) { | func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, error) { | ||||||
| 	ns := c.Namespace | 	ns := c.Namespace | ||||||
| 	listOptions := metav1.ListOptions{ | 	listOptions := metav1.ListOptions{ | ||||||
|  | @ -125,15 +327,16 @@ func (c *Cluster) resizeVolumes() error { | ||||||
| 
 | 
 | ||||||
| 	c.setProcessName("resizing EBS volumes") | 	c.setProcessName("resizing EBS volumes") | ||||||
| 
 | 
 | ||||||
| 	resizer := c.VolumeResizer |  | ||||||
| 	var totalIncompatible int |  | ||||||
| 
 |  | ||||||
| 	newQuantity, err := resource.ParseQuantity(c.Spec.Volume.Size) | 	newQuantity, err := resource.ParseQuantity(c.Spec.Volume.Size) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not parse volume size: %v", err) | 		return fmt.Errorf("could not parse volume size: %v", err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	pvs, newSize, err := c.listVolumesWithManifestSize(c.Spec.Volume) | 	newSize := quantityToGigabyte(newQuantity) | ||||||
|  | 	resizer := c.VolumeResizer | ||||||
|  | 	var totalIncompatible int | ||||||
|  | 
 | ||||||
|  | 	pvs, err := c.listPersistentVolumes() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not list persistent volumes: %v", err) | 		return fmt.Errorf("could not list persistent volumes: %v", err) | ||||||
| 	} | 	} | ||||||
|  | @ -214,33 +417,23 @@ func (c *Cluster) volumeClaimsNeedResizing(newVolume acidv1.Volume) (bool, error | ||||||
| 	return false, nil | 	return false, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) { | func (c *Cluster) volumesNeedResizing() (bool, error) { | ||||||
| 	vols, manifestSize, err := c.listVolumesWithManifestSize(newVolume) | 	newQuantity, _ := resource.ParseQuantity(c.Spec.Volume.Size) | ||||||
|  | 	newSize := quantityToGigabyte(newQuantity) | ||||||
|  | 
 | ||||||
|  | 	vols, err := c.listPersistentVolumes() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return false, err | 		return false, err | ||||||
| 	} | 	} | ||||||
| 	for _, pv := range vols { | 	for _, pv := range vols { | ||||||
| 		currentSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage]) | 		currentSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage]) | ||||||
| 		if currentSize != manifestSize { | 		if currentSize != newSize { | ||||||
| 			return true, nil | 			return true, nil | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	return false, nil | 	return false, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (c *Cluster) listVolumesWithManifestSize(newVolume acidv1.Volume) ([]*v1.PersistentVolume, int64, error) { |  | ||||||
| 	newSize, err := resource.ParseQuantity(newVolume.Size) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, 0, fmt.Errorf("could not parse volume size from the manifest: %v", err) |  | ||||||
| 	} |  | ||||||
| 	manifestSize := quantityToGigabyte(newSize) |  | ||||||
| 	vols, err := c.listPersistentVolumes() |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, 0, fmt.Errorf("could not list persistent volumes: %v", err) |  | ||||||
| 	} |  | ||||||
| 	return vols, manifestSize, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // getPodNameFromPersistentVolume returns a pod name that it extracts from the volume claim ref.
 | // getPodNameFromPersistentVolume returns a pod name that it extracts from the volume claim ref.
 | ||||||
| func getPodNameFromPersistentVolume(pv *v1.PersistentVolume) *spec.NamespacedName { | func getPodNameFromPersistentVolume(pv *v1.PersistentVolume) *spec.NamespacedName { | ||||||
| 	namespace := pv.Spec.ClaimRef.Namespace | 	namespace := pv.Spec.ClaimRef.Namespace | ||||||
|  | @ -258,7 +451,7 @@ func (c *Cluster) executeEBSMigration() error { | ||||||
| 	} | 	} | ||||||
| 	c.logger.Infof("starting EBS gp2 to gp3 migration") | 	c.logger.Infof("starting EBS gp2 to gp3 migration") | ||||||
| 
 | 
 | ||||||
| 	pvs, _, err := c.listVolumesWithManifestSize(c.Spec.Volume) | 	pvs, err := c.listPersistentVolumes() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not list persistent volumes: %v", err) | 		return fmt.Errorf("could not list persistent volumes: %v", err) | ||||||
| 	} | 	} | ||||||
|  | @ -294,10 +487,13 @@ func (c *Cluster) executeEBSMigration() error { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	var i3000 int64 = 3000 | ||||||
|  | 	var i125 int64 = 125 | ||||||
|  | 
 | ||||||
| 	for _, volume := range awsVolumes { | 	for _, volume := range awsVolumes { | ||||||
| 		if volume.VolumeType == "gp2" && volume.Size < c.OpConfig.EnableEBSGp3MigrationMaxSize { | 		if volume.VolumeType == "gp2" && volume.Size < c.OpConfig.EnableEBSGp3MigrationMaxSize { | ||||||
| 			c.logger.Infof("modifying EBS volume %s to type gp3 migration (%d)", volume.VolumeID, volume.Size) | 			c.logger.Infof("modifying EBS volume %s to type gp3 migration (%d)", volume.VolumeID, volume.Size) | ||||||
| 			err = c.VolumeResizer.ModifyVolume(volume.VolumeID, "gp3", volume.Size, 3000, 125) | 			err = c.VolumeResizer.ModifyVolume(volume.VolumeID, aws.String("gp3"), &volume.Size, &i3000, &i125) | ||||||
| 			if nil != err { | 			if nil != err { | ||||||
| 				c.logger.Warningf("modifying volume %s failed: %v", volume.VolumeID, err) | 				c.logger.Warningf("modifying volume %s failed: %v", volume.VolumeID, err) | ||||||
| 			} | 			} | ||||||
|  |  | ||||||
|  | @ -11,7 +11,9 @@ import ( | ||||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||
| 	"k8s.io/apimachinery/pkg/labels" | 	"k8s.io/apimachinery/pkg/labels" | ||||||
| 
 | 
 | ||||||
|  | 	"github.com/aws/aws-sdk-go/aws" | ||||||
| 	"github.com/golang/mock/gomock" | 	"github.com/golang/mock/gomock" | ||||||
|  | 
 | ||||||
| 	"github.com/stretchr/testify/assert" | 	"github.com/stretchr/testify/assert" | ||||||
| 	"github.com/zalando/postgres-operator/mocks" | 	"github.com/zalando/postgres-operator/mocks" | ||||||
| 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | ||||||
|  | @ -187,60 +189,16 @@ func TestMigrateEBS(t *testing.T) { | ||||||
| 	cluster.Namespace = namespace | 	cluster.Namespace = namespace | ||||||
| 	filterLabels := cluster.labelsSet(false) | 	filterLabels := cluster.labelsSet(false) | ||||||
| 
 | 
 | ||||||
| 	pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi") | 	testVolumes := []testVolume{ | ||||||
| 
 | 		{ | ||||||
| 	ps := v1.PersistentVolumeSpec{} | 			size: 100, | ||||||
| 	ps.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{} | 		}, | ||||||
| 	ps.AWSElasticBlockStore.VolumeID = "aws://eu-central-1b/ebs-volume-1" | 		{ | ||||||
| 
 | 			size: 100, | ||||||
| 	ps2 := v1.PersistentVolumeSpec{} |  | ||||||
| 	ps2.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{} |  | ||||||
| 	ps2.AWSElasticBlockStore.VolumeID = "aws://eu-central-1b/ebs-volume-2" |  | ||||||
| 
 |  | ||||||
| 	pvList := &v1.PersistentVolumeList{ |  | ||||||
| 		Items: []v1.PersistentVolume{ |  | ||||||
| 			{ |  | ||||||
| 				ObjectMeta: metav1.ObjectMeta{ |  | ||||||
| 					Name: "persistent-volume-0", |  | ||||||
| 				}, |  | ||||||
| 				Spec: ps, |  | ||||||
| 			}, |  | ||||||
| 			{ |  | ||||||
| 				ObjectMeta: metav1.ObjectMeta{ |  | ||||||
| 					Name: "persistent-volume-1", |  | ||||||
| 				}, |  | ||||||
| 				Spec: ps2, |  | ||||||
| 			}, |  | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	for _, pvc := range pvcList.Items { | 	initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) | ||||||
| 		cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	for _, pv := range pvList.Items { |  | ||||||
| 		cluster.KubeClient.PersistentVolumes().Create(context.TODO(), &pv, metav1.CreateOptions{}) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	pod := v1.Pod{ |  | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ |  | ||||||
| 			Name:   clusterName + "-0", |  | ||||||
| 			Labels: filterLabels, |  | ||||||
| 		}, |  | ||||||
| 		Spec: v1.PodSpec{}, |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) |  | ||||||
| 
 |  | ||||||
| 	pod = v1.Pod{ |  | ||||||
| 		ObjectMeta: metav1.ObjectMeta{ |  | ||||||
| 			Name:   clusterName + "-1", |  | ||||||
| 			Labels: filterLabels, |  | ||||||
| 		}, |  | ||||||
| 		Spec: v1.PodSpec{}, |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) |  | ||||||
| 
 | 
 | ||||||
| 	ctrl := gomock.NewController(t) | 	ctrl := gomock.NewController(t) | ||||||
| 	defer ctrl.Finish() | 	defer ctrl.Finish() | ||||||
|  | @ -256,8 +214,251 @@ func TestMigrateEBS(t *testing.T) { | ||||||
| 			{VolumeID: "ebs-volume-2", VolumeType: "gp3", Size: 100}}, nil) | 			{VolumeID: "ebs-volume-2", VolumeType: "gp3", Size: 100}}, nil) | ||||||
| 
 | 
 | ||||||
| 	// expect only gp2 volume to be modified
 | 	// expect only gp2 volume to be modified
 | ||||||
| 	resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq("gp3"), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) | 	resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) | ||||||
| 
 | 
 | ||||||
| 	cluster.VolumeResizer = resizer | 	cluster.VolumeResizer = resizer | ||||||
| 	cluster.executeEBSMigration() | 	cluster.executeEBSMigration() | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | type testVolume struct { | ||||||
|  | 	iops        int64 | ||||||
|  | 	throughtput int64 | ||||||
|  | 	size        int64 | ||||||
|  | 	volType     string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustername string, labels labels.Set, volumes []testVolume) { | ||||||
|  | 	i := 0 | ||||||
|  | 	for _, v := range volumes { | ||||||
|  | 		storage1Gi, _ := resource.ParseQuantity(fmt.Sprintf("%d", v.size)) | ||||||
|  | 
 | ||||||
|  | 		ps := v1.PersistentVolumeSpec{} | ||||||
|  | 		ps.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{} | ||||||
|  | 		ps.AWSElasticBlockStore.VolumeID = fmt.Sprintf("aws://eu-central-1b/ebs-volume-%d", i+1) | ||||||
|  | 
 | ||||||
|  | 		pv := v1.PersistentVolume{ | ||||||
|  | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
|  | 				Name: fmt.Sprintf("persistent-volume-%d", i), | ||||||
|  | 			}, | ||||||
|  | 			Spec: ps, | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		client.PersistentVolumes().Create(context.TODO(), &pv, metav1.CreateOptions{}) | ||||||
|  | 
 | ||||||
|  | 		pvc := v1.PersistentVolumeClaim{ | ||||||
|  | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
|  | 				Name:      fmt.Sprintf("%s-%s-%d", constants.DataVolumeName, clustername, i), | ||||||
|  | 				Namespace: namespace, | ||||||
|  | 				Labels:    labels, | ||||||
|  | 			}, | ||||||
|  | 			Spec: v1.PersistentVolumeClaimSpec{ | ||||||
|  | 				Resources: v1.ResourceRequirements{ | ||||||
|  | 					Requests: v1.ResourceList{ | ||||||
|  | 						v1.ResourceStorage: storage1Gi, | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 				VolumeName: fmt.Sprintf("persistent-volume-%d", i), | ||||||
|  | 			}, | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		client.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) | ||||||
|  | 
 | ||||||
|  | 		pod := v1.Pod{ | ||||||
|  | 			ObjectMeta: metav1.ObjectMeta{ | ||||||
|  | 				Name:   fmt.Sprintf("%s-%d", clustername, i), | ||||||
|  | 				Labels: labels, | ||||||
|  | 			}, | ||||||
|  | 			Spec: v1.PodSpec{}, | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		client.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) | ||||||
|  | 
 | ||||||
|  | 		i = i + 1 | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestMigrateGp3Support(t *testing.T) { | ||||||
|  | 	client, _ := newFakeK8sPVCclient() | ||||||
|  | 	clusterName := "acid-test-cluster" | ||||||
|  | 	namespace := "default" | ||||||
|  | 
 | ||||||
|  | 	// new cluster with pvc storage resize mode and configured labels
 | ||||||
|  | 	var cluster = New( | ||||||
|  | 		Config{ | ||||||
|  | 			OpConfig: config.Config{ | ||||||
|  | 				Resources: config.Resources{ | ||||||
|  | 					ClusterLabels:    map[string]string{"application": "spilo"}, | ||||||
|  | 					ClusterNameLabel: "cluster-name", | ||||||
|  | 				}, | ||||||
|  | 				StorageResizeMode:            "mixed", | ||||||
|  | 				EnableEBSGp3Migration:        false, | ||||||
|  | 				EnableEBSGp3MigrationMaxSize: 1000, | ||||||
|  | 			}, | ||||||
|  | 		}, client, acidv1.Postgresql{}, logger, eventRecorder) | ||||||
|  | 
 | ||||||
|  | 	cluster.Spec.Volume.Size = "150Gi" | ||||||
|  | 	cluster.Spec.Volume.Iops = aws.Int64(6000) | ||||||
|  | 	cluster.Spec.Volume.Throughput = aws.Int64(275) | ||||||
|  | 
 | ||||||
|  | 	// set metadata, so that labels will get correct values
 | ||||||
|  | 	cluster.Name = clusterName | ||||||
|  | 	cluster.Namespace = namespace | ||||||
|  | 	filterLabels := cluster.labelsSet(false) | ||||||
|  | 
 | ||||||
|  | 	testVolumes := []testVolume{ | ||||||
|  | 		{ | ||||||
|  | 			size: 100, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			size: 100, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			size: 100, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) | ||||||
|  | 
 | ||||||
|  | 	ctrl := gomock.NewController(t) | ||||||
|  | 	defer ctrl.Finish() | ||||||
|  | 
 | ||||||
|  | 	resizer := mocks.NewMockVolumeResizer(ctrl) | ||||||
|  | 
 | ||||||
|  | 	resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) | ||||||
|  | 	resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) | ||||||
|  | 	resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil) | ||||||
|  | 
 | ||||||
|  | 	resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return( | ||||||
|  | 		[]volumes.VolumeProperties{ | ||||||
|  | 			{VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000}, | ||||||
|  | 			{VolumeID: "ebs-volume-2", VolumeType: "gp3", Size: 105, Iops: 4000}, | ||||||
|  | 			{VolumeID: "ebs-volume-3", VolumeType: "gp3", Size: 151, Iops: 6000, Throughput: 275}}, nil) | ||||||
|  | 
 | ||||||
|  | 	// expect only gp2 volume to be modified
 | ||||||
|  | 	resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Eq(aws.Int64(150)), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil) | ||||||
|  | 	resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Eq(aws.String("gp3")), gomock.Eq(aws.Int64(150)), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil) | ||||||
|  | 	// resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-3"), gomock.Eq(aws.String("gp3")), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
 | ||||||
|  | 
 | ||||||
|  | 	cluster.VolumeResizer = resizer | ||||||
|  | 	cluster.syncVolumes() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestManualGp2Gp3Support(t *testing.T) { | ||||||
|  | 	client, _ := newFakeK8sPVCclient() | ||||||
|  | 	clusterName := "acid-test-cluster" | ||||||
|  | 	namespace := "default" | ||||||
|  | 
 | ||||||
|  | 	// new cluster with pvc storage resize mode and configured labels
 | ||||||
|  | 	var cluster = New( | ||||||
|  | 		Config{ | ||||||
|  | 			OpConfig: config.Config{ | ||||||
|  | 				Resources: config.Resources{ | ||||||
|  | 					ClusterLabels:    map[string]string{"application": "spilo"}, | ||||||
|  | 					ClusterNameLabel: "cluster-name", | ||||||
|  | 				}, | ||||||
|  | 				StorageResizeMode:            "mixed", | ||||||
|  | 				EnableEBSGp3Migration:        false, | ||||||
|  | 				EnableEBSGp3MigrationMaxSize: 1000, | ||||||
|  | 			}, | ||||||
|  | 		}, client, acidv1.Postgresql{}, logger, eventRecorder) | ||||||
|  | 
 | ||||||
|  | 	cluster.Spec.Volume.Size = "150Gi" | ||||||
|  | 	cluster.Spec.Volume.Iops = aws.Int64(6000) | ||||||
|  | 	cluster.Spec.Volume.Throughput = aws.Int64(275) | ||||||
|  | 
 | ||||||
|  | 	// set metadata, so that labels will get correct values
 | ||||||
|  | 	cluster.Name = clusterName | ||||||
|  | 	cluster.Namespace = namespace | ||||||
|  | 	filterLabels := cluster.labelsSet(false) | ||||||
|  | 
 | ||||||
|  | 	testVolumes := []testVolume{ | ||||||
|  | 		{ | ||||||
|  | 			size: 100, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			size: 100, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) | ||||||
|  | 
 | ||||||
|  | 	ctrl := gomock.NewController(t) | ||||||
|  | 	defer ctrl.Finish() | ||||||
|  | 
 | ||||||
|  | 	resizer := mocks.NewMockVolumeResizer(ctrl) | ||||||
|  | 
 | ||||||
|  | 	resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) | ||||||
|  | 	resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) | ||||||
|  | 
 | ||||||
|  | 	resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( | ||||||
|  | 		[]volumes.VolumeProperties{ | ||||||
|  | 			{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, | ||||||
|  | 			{VolumeID: "ebs-volume-2", VolumeType: "gp2", Size: 150, Iops: 4000}, | ||||||
|  | 		}, nil) | ||||||
|  | 
 | ||||||
|  | 	// expect only gp2 volume to be modified
 | ||||||
|  | 	resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Eq(aws.String("gp3")), gomock.Nil(), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil) | ||||||
|  | 	resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Eq(aws.String("gp3")), gomock.Nil(), gomock.Eq(aws.Int64(6000)), gomock.Eq(aws.Int64(275))).Return(nil) | ||||||
|  | 
 | ||||||
|  | 	cluster.VolumeResizer = resizer | ||||||
|  | 	cluster.syncVolumes() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func TestDontTouchType(t *testing.T) { | ||||||
|  | 	client, _ := newFakeK8sPVCclient() | ||||||
|  | 	clusterName := "acid-test-cluster" | ||||||
|  | 	namespace := "default" | ||||||
|  | 
 | ||||||
|  | 	// new cluster with pvc storage resize mode and configured labels
 | ||||||
|  | 	var cluster = New( | ||||||
|  | 		Config{ | ||||||
|  | 			OpConfig: config.Config{ | ||||||
|  | 				Resources: config.Resources{ | ||||||
|  | 					ClusterLabels:    map[string]string{"application": "spilo"}, | ||||||
|  | 					ClusterNameLabel: "cluster-name", | ||||||
|  | 				}, | ||||||
|  | 				StorageResizeMode:            "mixed", | ||||||
|  | 				EnableEBSGp3Migration:        false, | ||||||
|  | 				EnableEBSGp3MigrationMaxSize: 1000, | ||||||
|  | 			}, | ||||||
|  | 		}, client, acidv1.Postgresql{}, logger, eventRecorder) | ||||||
|  | 
 | ||||||
|  | 	cluster.Spec.Volume.Size = "177Gi" | ||||||
|  | 
 | ||||||
|  | 	// set metadata, so that labels will get correct values
 | ||||||
|  | 	cluster.Name = clusterName | ||||||
|  | 	cluster.Namespace = namespace | ||||||
|  | 	filterLabels := cluster.labelsSet(false) | ||||||
|  | 
 | ||||||
|  | 	testVolumes := []testVolume{ | ||||||
|  | 		{ | ||||||
|  | 			size: 150, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			size: 150, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes) | ||||||
|  | 
 | ||||||
|  | 	ctrl := gomock.NewController(t) | ||||||
|  | 	defer ctrl.Finish() | ||||||
|  | 
 | ||||||
|  | 	resizer := mocks.NewMockVolumeResizer(ctrl) | ||||||
|  | 
 | ||||||
|  | 	resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) | ||||||
|  | 	resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) | ||||||
|  | 
 | ||||||
|  | 	resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( | ||||||
|  | 		[]volumes.VolumeProperties{ | ||||||
|  | 			{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, | ||||||
|  | 			{VolumeID: "ebs-volume-2", VolumeType: "gp2", Size: 150, Iops: 4000}, | ||||||
|  | 		}, nil) | ||||||
|  | 
 | ||||||
|  | 	// expect only gp2 volume to be modified
 | ||||||
|  | 	resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-1"), gomock.Nil(), gomock.Eq(aws.Int64(177)), gomock.Nil(), gomock.Nil()).Return(nil) | ||||||
|  | 	resizer.EXPECT().ModifyVolume(gomock.Eq("ebs-volume-2"), gomock.Nil(), gomock.Eq(aws.Int64(177)), gomock.Nil(), gomock.Nil()).Return(nil) | ||||||
|  | 
 | ||||||
|  | 	cluster.VolumeResizer = resizer | ||||||
|  | 	cluster.syncVolumes() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -141,18 +141,9 @@ func (r *EBSVolumeResizer) ResizeVolume(volumeID string, newSize int64) error { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // ModifyVolume Modify EBS volume
 | // ModifyVolume Modify EBS volume
 | ||||||
| func (r *EBSVolumeResizer) ModifyVolume(volumeID string, newType string, newSize int64, iops int64, throughput int64) error { | func (r *EBSVolumeResizer) ModifyVolume(volumeID string, newType *string, newSize *int64, iops *int64, throughput *int64) error { | ||||||
| 	/* first check if the volume is already of a requested size */ | 	/* first check if the volume is already of a requested size */ | ||||||
| 	volumeOutput, err := r.connection.DescribeVolumes(&ec2.DescribeVolumesInput{VolumeIds: []*string{&volumeID}}) | 	input := ec2.ModifyVolumeInput{Size: newSize, VolumeId: &volumeID, VolumeType: newType, Iops: iops, Throughput: throughput} | ||||||
| 	if err != nil { |  | ||||||
| 		return fmt.Errorf("could not get information about the volume: %v", err) |  | ||||||
| 	} |  | ||||||
| 	vol := volumeOutput.Volumes[0] |  | ||||||
| 	if *vol.VolumeId != volumeID { |  | ||||||
| 		return fmt.Errorf("describe volume %q returned information about a non-matching volume %q", volumeID, *vol.VolumeId) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	input := ec2.ModifyVolumeInput{Size: &newSize, VolumeId: &volumeID, VolumeType: &newType, Iops: &iops, Throughput: &throughput} |  | ||||||
| 	output, err := r.connection.ModifyVolume(&input) | 	output, err := r.connection.ModifyVolume(&input) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return fmt.Errorf("could not modify persistent volume: %v", err) | 		return fmt.Errorf("could not modify persistent volume: %v", err) | ||||||
|  |  | ||||||
|  | @ -21,7 +21,7 @@ type VolumeResizer interface { | ||||||
| 	GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) | 	GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) | ||||||
| 	ExtractVolumeID(volumeID string) (string, error) | 	ExtractVolumeID(volumeID string) (string, error) | ||||||
| 	ResizeVolume(providerVolumeID string, newSize int64) error | 	ResizeVolume(providerVolumeID string, newSize int64) error | ||||||
| 	ModifyVolume(providerVolumeID string, newType string, newSize int64, iops int64, throughput int64) error | 	ModifyVolume(providerVolumeID string, newType *string, newSize *int64, iops *int64, throughput *int64) error | ||||||
| 	DisconnectFromProvider() error | 	DisconnectFromProvider() error | ||||||
| 	DescribeVolumes(providerVolumesID []string) ([]VolumeProperties, error) | 	DescribeVolumes(providerVolumesID []string) ([]VolumeProperties, error) | ||||||
| } | } | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue