Merge branch 'master' into docs-backup-restore

This commit is contained in:
Felix Kunde 2021-08-26 15:01:20 +02:00
commit bb474ec542
20 changed files with 315 additions and 31 deletions

View File

@ -395,6 +395,8 @@ spec:
type: string type: string
wal_s3_bucket: wal_s3_bucket:
type: string type: string
wal_az_storage_account:
type: string
logical_backup: logical_backup:
type: object type: object
properties: properties:

View File

@ -561,6 +561,24 @@ spec:
properties: properties:
iops: iops:
type: integer type: integer
selector:
type: object
properties:
matchExpressions:
type: array
items:
type: object
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchLabels:
type: object
size: size:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'

View File

@ -268,6 +268,9 @@ configAwsOrGcp:
# GCS bucket to use for shipping WAL segments with WAL-E # GCS bucket to use for shipping WAL segments with WAL-E
# wal_gs_bucket: "" # wal_gs_bucket: ""
# Azure Storage Account to use for shipping WAL segments with WAL-G
# wal_az_storage_account: ""
# configure K8s cron job managed by the operator # configure K8s cron job managed by the operator
configLogicalBackup: configLogicalBackup:
# image for pods of the logical backup job (example runs pg_dumpall) # image for pods of the logical backup job (example runs pg_dumpall)

View File

@ -836,6 +836,63 @@ pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
... ...
``` ```
### Azure setup
To configure the operator on Azure these prerequisites are needed:
* A storage account in the same region as the Kubernetes cluster.
The configuration parameters that we will be using are:
* `pod_environment_secret`
* `wal_az_storage_account`
1. Generate the K8s secret resource that will contain your storage account's
access key. You will need a copy of this secret in every namespace you want to
create postgresql clusters.
The latest version of WAL-G (v1.0) supports the use of a SASS token, but you'll
have to make due with using the primary or secondary access token until the
version of WAL-G is updated in the postgres-operator.
```yaml
apiVersion: v1
kind: Secret
metadata:
name: psql-backup-creds
namespace: default
type: Opaque
stringData:
AZURE_STORAGE_ACCESS_KEY: <primary or secondary access key>
```
2. Setup pod environment configmap that instructs the operator to use WAL-G,
instead of WAL-E, for backup and restore.
```yml
apiVersion: v1
kind: ConfigMap
metadata:
name: pod-env-overrides
namespace: postgres-operator-system
data:
# Any env variable used by spilo can be added
USE_WALG_BACKUP: "true"
USE_WALG_RESTORE: "true"
CLONE_USE_WALG_RESTORE: "true"
```
3. Setup your operator configuration values. With the `psql-backup-creds`
and `pod-env-overrides` resources applied to your cluster, ensure that the operator's configuration
is set up like the following:
```yml
...
aws_or_gcp:
pod_environment_secret: "pgsql-backup-creds"
pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
wal_az_storage_account: "postgresbackupsbucket28302F2" # name of storage account to save the WAL-G logs
...
```
### Restoring physical backups ### Restoring physical backups
If cluster members have to be (re)initialized restoring physical backups If cluster members have to be (re)initialized restoring physical backups

View File

@ -399,6 +399,11 @@ properties of the persistent storage that stores Postgres data.
When running the operator on AWS the latest generation of EBS volumes (`gp3`) When running the operator on AWS the latest generation of EBS volumes (`gp3`)
allows for configuring the throughput in MB/s. Maximum is 1000. Optional. allows for configuring the throughput in MB/s. Maximum is 1000. Optional.
* **selector**
A label query over PVs to consider for binding. See the [Kubernetes
documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
for details on using `matchLabels` and `matchExpressions`. Optional
## Sidecar definitions ## Sidecar definitions
Those parameters are defined under the `sidecars` key. They consist of a list Those parameters are defined under the `sidecars` key. They consist of a list

View File

@ -557,6 +557,12 @@ yet officially supported.
[service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform). [service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform).
The default is empty The default is empty
* **wal_az_storage_account**
Azure Storage Account to use for shipping WAL segments with WAL-G. The
storage account must exist and be accessible by Postgres pods. Note, only the
name of the storage account is required.
The default is empty.
* **log_s3_bucket** * **log_s3_bucket**
S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS. S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS.
The bucket has to be present and accessible by Postgres pods. The default is The bucket has to be present and accessible by Postgres pods. The default is

View File

@ -46,6 +46,12 @@ spec:
# storageClass: my-sc # storageClass: my-sc
# iops: 1000 # for EBS gp3 # iops: 1000 # for EBS gp3
# throughput: 250 # in MB/s for EBS gp3 # throughput: 250 # in MB/s for EBS gp3
# selector:
# matchExpressions:
# - { key: flavour, operator: In, values: [ "banana", "chocolate" ] }
# matchLabels:
# environment: dev
# service: postgres
additionalVolumes: additionalVolumes:
- name: empty - name: empty
mountPath: /opt/empty mountPath: /opt/empty

View File

@ -129,6 +129,7 @@ data:
# team_api_role_configuration: "log_statement:all" # team_api_role_configuration: "log_statement:all"
# teams_api_url: http://fake-teams-api.default.svc.cluster.local # teams_api_url: http://fake-teams-api.default.svc.cluster.local
# toleration: "" # toleration: ""
# wal_az_storage_account: ""
# wal_gs_bucket: "" # wal_gs_bucket: ""
# wal_s3_bucket: "" # wal_s3_bucket: ""
watched_namespace: "*" # listen to all namespaces watched_namespace: "*" # listen to all namespaces

View File

@ -384,6 +384,8 @@ spec:
type: string type: string
log_s3_bucket: log_s3_bucket:
type: string type: string
wal_az_storage_account:
type: string
wal_gs_bucket: wal_gs_bucket:
type: string type: string
wal_s3_bucket: wal_s3_bucket:

View File

@ -121,6 +121,7 @@ configuration:
# gcp_credentials: "" # gcp_credentials: ""
# kube_iam_role: "" # kube_iam_role: ""
# log_s3_bucket: "" # log_s3_bucket: ""
# wal_az_storage_account: ""
# wal_gs_bucket: "" # wal_gs_bucket: ""
# wal_s3_bucket: "" # wal_s3_bucket: ""
logical_backup: logical_backup:

View File

@ -557,6 +557,24 @@ spec:
properties: properties:
iops: iops:
type: integer type: integer
selector:
type: object
properties:
matchExpressions:
type: array
items:
type: object
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchLabels:
type: object
size: size:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'

View File

@ -841,6 +841,54 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
"iops": { "iops": {
Type: "integer", Type: "integer",
}, },
"selector": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Required: []string{"key", "operator", "values"},
Properties: map[string]apiextv1.JSONSchemaProps{
"key": {
Type: "string",
},
"operator": {
Type: "string",
Enum: []apiextv1.JSON{
{
Raw: []byte(`"In"`),
},
{
Raw: []byte(`"NotIn"`),
},
{
Raw: []byte(`"Exists"`),
},
{
Raw: []byte(`"DoesNotExist"`),
},
},
},
"values": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "string",
},
},
},
},
},
},
},
"matchLabels": {
Type: "object",
XPreserveUnknownFields: util.True(),
},
},
},
"size": { "size": {
Type: "string", Type: "string",
Description: "Value must not be zero", Description: "Value must not be zero",

View File

@ -132,6 +132,7 @@ type AWSGCPConfiguration struct {
AWSRegion string `json:"aws_region,omitempty"` AWSRegion string `json:"aws_region,omitempty"`
WALGSBucket string `json:"wal_gs_bucket,omitempty"` WALGSBucket string `json:"wal_gs_bucket,omitempty"`
GCPCredentials string `json:"gcp_credentials,omitempty"` GCPCredentials string `json:"gcp_credentials,omitempty"`
WALAZStorageAccount string `json:"wal_az_storage_account,omitempty"`
LogS3Bucket string `json:"log_s3_bucket,omitempty"` LogS3Bucket string `json:"log_s3_bucket,omitempty"`
KubeIAMRole string `json:"kube_iam_role,omitempty"` KubeIAMRole string `json:"kube_iam_role,omitempty"`
AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` AdditionalSecretMount string `json:"additional_secret_mount,omitempty"`

View File

@ -114,12 +114,13 @@ type MaintenanceWindow struct {
// Volume describes a single volume in the manifest. // Volume describes a single volume in the manifest.
type Volume struct { type Volume struct {
Size string `json:"size"` Selector *metav1.LabelSelector `json:"selector,omitempty"`
StorageClass string `json:"storageClass,omitempty"` Size string `json:"size"`
SubPath string `json:"subPath,omitempty"` StorageClass string `json:"storageClass,omitempty"`
Iops *int64 `json:"iops,omitempty"` SubPath string `json:"subPath,omitempty"`
Throughput *int64 `json:"throughput,omitempty"` Iops *int64 `json:"iops,omitempty"`
VolumeType string `json:"type,omitempty"` Throughput *int64 `json:"throughput,omitempty"`
VolumeType string `json:"type,omitempty"`
} }
// AdditionalVolume specs additional optional volumes for statefulset // AdditionalVolume specs additional optional volumes for statefulset

View File

@ -29,6 +29,7 @@ package v1
import ( import (
config "github.com/zalando/postgres-operator/pkg/util/config" config "github.com/zalando/postgres-operator/pkg/util/config"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
@ -314,22 +315,6 @@ func (in *MaintenanceWindow) DeepCopy() *MaintenanceWindow {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MajorVersionUpgradeConfiguration) DeepCopyInto(out *MajorVersionUpgradeConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MajorVersionUpgradeConfiguration.
func (in *MajorVersionUpgradeConfiguration) DeepCopy() *MajorVersionUpgradeConfiguration {
if in == nil {
return nil
}
out := new(MajorVersionUpgradeConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) {
*out = *in *out = *in
@ -385,7 +370,6 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
} }
} }
out.PostgresUsersConfiguration = in.PostgresUsersConfiguration out.PostgresUsersConfiguration = in.PostgresUsersConfiguration
out.MajorVersionUpgrade = in.MajorVersionUpgrade
in.Kubernetes.DeepCopyInto(&out.Kubernetes) in.Kubernetes.DeepCopyInto(&out.Kubernetes)
out.PostgresPodResources = in.PostgresPodResources out.PostgresPodResources = in.PostgresPodResources
out.Timeouts = in.Timeouts out.Timeouts = in.Timeouts
@ -1197,6 +1181,11 @@ func (in UserFlags) DeepCopy() UserFlags {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Volume) DeepCopyInto(out *Volume) { func (in *Volume) DeepCopyInto(out *Volume) {
*out = *in *out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Iops != nil { if in.Iops != nil {
in, out := &in.Iops, &out.Iops in, out := &in.Iops, &out.Iops
*out = new(int64) *out = new(int64)

View File

@ -285,6 +285,8 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
}, },
} }
tolerationsSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
podTemplate := &v1.PodTemplateSpec{ podTemplate := &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Labels: c.connectionPoolerLabels(role, true).MatchLabels, Labels: c.connectionPoolerLabels(role, true).MatchLabels,
@ -294,12 +296,18 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
Spec: v1.PodSpec{ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &gracePeriod, TerminationGracePeriodSeconds: &gracePeriod,
Containers: []v1.Container{poolerContainer}, Containers: []v1.Container{poolerContainer},
// TODO: add tolerations to scheduler pooler on the same node Tolerations: tolerationsSpec,
// as database
//Tolerations: *tolerationsSpec,
}, },
} }
nodeAffinity := nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity)
if c.OpConfig.EnablePodAntiAffinity {
labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels)
podTemplate.Spec.Affinity = generatePodAffinity(labelsSet, c.OpConfig.PodAntiAffinityTopologyKey, nodeAffinity)
} else if nodeAffinity != nil {
podTemplate.Spec.Affinity = nodeAffinity
}
return podTemplate, nil return podTemplate, nil
} }

View File

@ -798,6 +798,12 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
} }
if c.OpConfig.WALAZStorageAccount != "" {
envVars = append(envVars, v1.EnvVar{Name: "AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}
if c.OpConfig.GCPCredentials != "" { if c.OpConfig.GCPCredentials != "" {
envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials}) envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials})
} }
@ -1170,9 +1176,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
// generate the spilo container // generate the spilo container
c.logger.Debugf("Generating Spilo container, environment variables")
c.logger.Debugf("%v", spiloEnvVars)
spiloContainer := generateContainer(constants.PostgresContainerName, spiloContainer := generateContainer(constants.PostgresContainerName,
&effectiveDockerImage, &effectiveDockerImage,
resourceRequirements, resourceRequirements,
@ -1275,7 +1278,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size, if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size,
spec.Volume.StorageClass); err != nil { spec.Volume.StorageClass, spec.Volume.Selector); err != nil {
return nil, fmt.Errorf("could not generate volume claim template: %v", err) return nil, fmt.Errorf("could not generate volume claim template: %v", err)
} }
@ -1523,7 +1526,8 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
podSpec.Volumes = volumes podSpec.Volumes = volumes
} }
func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) { func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string,
volumeSelector *metav1.LabelSelector) (*v1.PersistentVolumeClaim, error) {
var storageClassName *string var storageClassName *string
@ -1556,6 +1560,7 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string
}, },
StorageClassName: storageClassName, StorageClassName: storageClassName,
VolumeMode: &volumeMode, VolumeMode: &volumeMode,
Selector: volumeSelector,
}, },
} }
@ -1806,6 +1811,14 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
}, },
} }
result = append(result, envs...) result = append(result, envs...)
} else if c.OpConfig.WALAZStorageAccount != "" {
envs := []v1.EnvVar{
{
Name: "CLONE_AZURE_STORAGE_ACCOUNT",
Value: c.OpConfig.WALAZStorageAccount,
},
}
result = append(result, envs...)
} else { } else {
c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.") c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.")
} }

View File

@ -1509,3 +1509,106 @@ func TestGenerateCapabilities(t *testing.T) {
} }
} }
} }
func TestVolumeSelector(t *testing.T) {
testName := "TestVolumeSelector"
makeSpec := func(volume acidv1.Volume) acidv1.PostgresSpec {
return acidv1.PostgresSpec{
TeamID: "myapp",
NumberOfInstances: 0,
Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
},
Volume: volume,
}
}
tests := []struct {
subTest string
volume acidv1.Volume
wantSelector *metav1.LabelSelector
}{
{
subTest: "PVC template has no selector",
volume: acidv1.Volume{
Size: "1G",
},
wantSelector: nil,
},
{
subTest: "PVC template has simple label selector",
volume: acidv1.Volume{
Size: "1G",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"environment": "unittest"},
},
},
wantSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"environment": "unittest"},
},
},
{
subTest: "PVC template has full selector",
volume: acidv1.Volume{
Size: "1G",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"environment": "unittest"},
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "flavour",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"banana", "chocolate"},
},
},
},
},
wantSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"environment": "unittest"},
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "flavour",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"banana", "chocolate"},
},
},
},
},
}
cluster := New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
for _, tt := range tests {
pgSpec := makeSpec(tt.volume)
sts, err := cluster.generateStatefulSet(&pgSpec)
if err != nil {
t.Fatalf("%s %s: no statefulset created %v", testName, tt.subTest, err)
}
volIdx := len(sts.Spec.VolumeClaimTemplates)
for i, ct := range sts.Spec.VolumeClaimTemplates {
if ct.ObjectMeta.Name == constants.DataVolumeName {
volIdx = i
break
}
}
if volIdx == len(sts.Spec.VolumeClaimTemplates) {
t.Errorf("%s %s: no datavolume found in sts", testName, tt.subTest)
}
selector := sts.Spec.VolumeClaimTemplates[volIdx].Spec.Selector
if !reflect.DeepEqual(selector, tt.wantSelector) {
t.Errorf("%s %s: expected: %#v but got: %#v", testName, tt.subTest, tt.wantSelector, selector)
}
}
}

View File

@ -146,6 +146,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole
result.WALGSBucket = fromCRD.AWSGCP.WALGSBucket result.WALGSBucket = fromCRD.AWSGCP.WALGSBucket
result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials
result.WALAZStorageAccount = fromCRD.AWSGCP.WALAZStorageAccount
result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount
result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials") result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials")
result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration

View File

@ -167,6 +167,7 @@ type Config struct {
KubeIAMRole string `name:"kube_iam_role"` KubeIAMRole string `name:"kube_iam_role"`
WALGSBucket string `name:"wal_gs_bucket"` WALGSBucket string `name:"wal_gs_bucket"`
GCPCredentials string `name:"gcp_credentials"` GCPCredentials string `name:"gcp_credentials"`
WALAZStorageAccount string `name:"wal_az_storage_account"`
AdditionalSecretMount string `name:"additional_secret_mount"` AdditionalSecretMount string `name:"additional_secret_mount"`
AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"` AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"`
EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"` EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"`