diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index cdd58f5ba..a24571cf1 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -13,6 +13,7 @@ rules: - acid.zalan.do resources: - postgresqls + - postgresqls/status - operatorconfigurations verbs: - "*" @@ -23,6 +24,8 @@ rules: verbs: - create - get + - patch + - update - apiGroups: - "" resources: diff --git a/docs/quickstart.md b/docs/quickstart.md index 244f45b54..09072ea50 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -32,16 +32,19 @@ kubectl create -f manifests/postgres-operator.yaml # deployment ## Helm chart -Another possibility is using a provided [Helm](https://helm.sh/) chart which -saves you these steps. Therefore, you would need to install the helm CLI on your -machine. After initializing helm (and its server component Tiller) in your local -cluster you can install the operator chart. +Alternatively, the operator can be installed by using the provided [Helm](https://helm.sh/) +chart which saves you the manual steps. Therefore, you would need to install +the helm CLI on your machine. After initializing helm (and its server +component Tiller) in your local cluster you can install the operator chart. +You can define a release name that is prepended to the operator resource's +names. Use `--name zalando` to match with the default service account name +as older operator versions do not support custom names for service accounts. ```bash # 1) initialize helm helm init # 2) install postgres-operator chart -helm install --name postgres-operator ./charts/postgres-operator +helm install --name zalando ./charts/postgres-operator ``` ## Create a Postgres cluster diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 7f52b2b0a..07e4f8308 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -63,10 +63,14 @@ spec: # uid: "efd12e58-5786-11e8-b5a7-06148230260c" # cluster: "acid-batman" # timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6) +<<<<<<< HEAD # run periodic backups with k8s cron jobs # enableLogicalBackup: true # logicalBackupSchedule: "30 00 * * *" +======= + # s3_wal_path: "s3://custom/path/to/bucket" +>>>>>>> master maintenanceWindows: - 01:00-06:00 #UTC - Sat:00:00-04:00 diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index c070f4064..9e0b6c52e 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -14,6 +14,7 @@ rules: - acid.zalan.do resources: - postgresqls + - postgresqls/status - operatorconfigurations verbs: - "*" @@ -24,6 +25,8 @@ rules: verbs: - create - get + - patch + - update - apiGroups: - "" resources: @@ -137,7 +140,7 @@ rules: - clusterroles verbs: - bind - resourceNames: + resourceNames: - zalando-postgres-operator - apiGroups: - batch diff --git a/pkg/apis/acid.zalan.do/v1/const.go b/pkg/apis/acid.zalan.do/v1/const.go index 59d6c1406..3cb1c1ade 100644 --- a/pkg/apis/acid.zalan.do/v1/const.go +++ b/pkg/apis/acid.zalan.do/v1/const.go @@ -2,14 +2,14 @@ package v1 // ClusterStatusUnknown etc : status of a Postgres cluster known to the operator const ( - ClusterStatusUnknown PostgresStatus = "" - ClusterStatusCreating PostgresStatus = "Creating" - ClusterStatusUpdating PostgresStatus = "Updating" - ClusterStatusUpdateFailed PostgresStatus = "UpdateFailed" - ClusterStatusSyncFailed PostgresStatus = "SyncFailed" - ClusterStatusAddFailed PostgresStatus = "CreateFailed" - ClusterStatusRunning PostgresStatus = "Running" - ClusterStatusInvalid PostgresStatus = "Invalid" + ClusterStatusUnknown = "" + ClusterStatusCreating = "Creating" + ClusterStatusUpdating = "Updating" + ClusterStatusUpdateFailed = "UpdateFailed" + ClusterStatusSyncFailed = "SyncFailed" + ClusterStatusAddFailed = "CreateFailed" + ClusterStatusRunning = "Running" + ClusterStatusInvalid = "Invalid" ) const ( diff --git a/pkg/apis/acid.zalan.do/v1/marshal.go b/pkg/apis/acid.zalan.do/v1/marshal.go index 823ff0ef2..d180f784c 100644 --- a/pkg/apis/acid.zalan.do/v1/marshal.go +++ b/pkg/apis/acid.zalan.do/v1/marshal.go @@ -8,6 +8,7 @@ import ( ) type postgresqlCopy Postgresql +type postgresStatusCopy PostgresStatus // MarshalJSON converts a maintenance window definition to JSON. func (m *MaintenanceWindow) MarshalJSON() ([]byte, error) { @@ -69,6 +70,26 @@ func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error { return nil } +// UnmarshalJSON converts a JSON to the status subresource definition. +func (ps *PostgresStatus) UnmarshalJSON(data []byte) error { + var ( + tmp postgresStatusCopy + status string + ) + + err := json.Unmarshal(data, &tmp) + if err != nil { + metaErr := json.Unmarshal(data, &status) + if metaErr != nil { + return fmt.Errorf("Could not parse status: %v; err %v", string(data), metaErr) + } + tmp.PostgresClusterStatus = status + } + *ps = PostgresStatus(tmp) + + return nil +} + // UnmarshalJSON converts a JSON into the PostgreSQL object. func (p *Postgresql) UnmarshalJSON(data []byte) error { var tmp postgresqlCopy @@ -81,7 +102,7 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error { } tmp.Error = err.Error() - tmp.Status = ClusterStatusInvalid + tmp.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} *p = Postgresql(tmp) @@ -91,10 +112,10 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error { if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil { tmp2.Error = err.Error() - tmp2.Status = ClusterStatusInvalid + tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} } else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil { tmp2.Error = err.Error() - tmp2.Status = ClusterStatusInvalid + tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid} } else { tmp2.Spec.ClusterName = clusterName } diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 8f9118d52..87a079da9 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -16,7 +16,7 @@ type Postgresql struct { metav1.ObjectMeta `json:"metadata,omitempty"` Spec PostgresSpec `json:"spec"` - Status PostgresStatus `json:"status,omitempty"` + Status PostgresStatus `json:"status"` Error string `json:"-"` } @@ -116,6 +116,7 @@ type CloneDescription struct { ClusterName string `json:"cluster,omitempty"` UID string `json:"uid,omitempty"` EndTimestamp string `json:"timestamp,omitempty"` + S3WalPath string `json:"s3_wal_path,omitempty"` } // Sidecar defines a container to be run in the same pod as the Postgres container. @@ -131,4 +132,6 @@ type Sidecar struct { type UserFlags []string // PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) -type PostgresStatus string +type PostgresStatus struct { + PostgresClusterStatus string `json:"PostgresClusterStatus"` +} diff --git a/pkg/apis/acid.zalan.do/v1/util.go b/pkg/apis/acid.zalan.do/v1/util.go index 0a3267972..db6efcd71 100644 --- a/pkg/apis/acid.zalan.do/v1/util.go +++ b/pkg/apis/acid.zalan.do/v1/util.go @@ -85,12 +85,22 @@ func validateCloneClusterDescription(clone *CloneDescription) error { } // Success of the current Status -func (status PostgresStatus) Success() bool { - return status != ClusterStatusAddFailed && - status != ClusterStatusUpdateFailed && - status != ClusterStatusSyncFailed +func (postgresStatus PostgresStatus) Success() bool { + return postgresStatus.PostgresClusterStatus != ClusterStatusAddFailed && + postgresStatus.PostgresClusterStatus != ClusterStatusUpdateFailed && + postgresStatus.PostgresClusterStatus != ClusterStatusSyncFailed } -func (status PostgresStatus) String() string { - return string(status) +// Running status of cluster +func (postgresStatus PostgresStatus) Running() bool { + return postgresStatus.PostgresClusterStatus == ClusterStatusRunning +} + +// Creating status of cluster +func (postgresStatus PostgresStatus) Creating() bool { + return postgresStatus.PostgresClusterStatus == ClusterStatusCreating +} + +func (postgresStatus PostgresStatus) String() string { + return postgresStatus.PostgresClusterStatus } diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index 01be31e88..537619aaf 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -61,12 +61,12 @@ var cloneClusterDescriptions = []struct { in *CloneDescription err error }{ - {&CloneDescription{"foo+bar", "", "NotEmpty"}, nil}, - {&CloneDescription{"foo+bar", "", ""}, + {&CloneDescription{"foo+bar", "", "NotEmpty", ""}, nil}, + {&CloneDescription{"foo+bar", "", "", ""}, errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)}, - {&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", ""}, + {&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", ""}, errors.New("clone cluster name must be no longer than 63 characters")}, - {&CloneDescription{"foobar", "", ""}, nil}, + {&CloneDescription{"foobar", "", "", ""}, nil}, } var maintenanceWindows = []struct { @@ -111,101 +111,139 @@ var maintenanceWindows = []struct { {[]byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}, {[]byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}} +var postgresStatus = []struct { + in []byte + out PostgresStatus + err error +}{ + {[]byte(`{"PostgresClusterStatus":"Running"}`), + PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil}, + {[]byte(`{"PostgresClusterStatus":""}`), + PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil}, + {[]byte(`"Running"`), + PostgresStatus{PostgresClusterStatus: ClusterStatusRunning}, nil}, + {[]byte(`""`), + PostgresStatus{PostgresClusterStatus: ClusterStatusUnknown}, nil}} + var unmarshalCluster = []struct { in []byte out Postgresql marshal []byte err error -}{{ - []byte(`{ - "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", - "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), - Postgresql{ - TypeMeta: metav1.TypeMeta{ - Kind: "Postgresql", - APIVersion: "acid.zalan.do/v1", +}{ + // example with simple status field + { + in: []byte(`{ + "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", + "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), + out: Postgresql{ + TypeMeta: metav1.TypeMeta{ + Kind: "Postgresql", + APIVersion: "acid.zalan.do/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "acid-testcluster1", + }, + Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, + // This error message can vary between Go versions, so compute it for the current version. + Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(), }, - ObjectMeta: metav1.ObjectMeta{ - Name: "acid-testcluster1", + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), + err: nil}, + // example with /status subresource + { + in: []byte(`{ + "kind": "Postgresql","apiVersion": "acid.zalan.do/v1", + "metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": 100}}`), + out: Postgresql{ + TypeMeta: metav1.TypeMeta{ + Kind: "Postgresql", + APIVersion: "acid.zalan.do/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "acid-testcluster1", + }, + Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, + // This error message can vary between Go versions, so compute it for the current version. + Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(), }, - Status: ClusterStatusInvalid, - // This error message can vary between Go versions, so compute it for the current version. - Error: json.Unmarshal([]byte(`{"teamId": 0}`), &PostgresSpec{}).Error(), - }, - []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil}, - {[]byte(`{ - "kind": "Postgresql", - "apiVersion": "acid.zalan.do/v1", - "metadata": { - "name": "acid-testcluster1" - }, - "spec": { - "teamId": "ACID", - "volume": { - "size": "5Gi", - "storageClass": "SSD" - }, - "numberOfInstances": 2, - "users": { - "zalando": [ - "superuser", - "createdb" - ] - }, - "allowedSourceRanges": [ - "127.0.0.1/32" - ], - "postgresql": { - "version": "9.6", - "parameters": { - "shared_buffers": "32MB", - "max_connections": "10", - "log_statement": "all" - } - }, - "resources": { - "requests": { - "cpu": "10m", - "memory": "50Mi" - }, - "limits": { - "cpu": "300m", - "memory": "3000Mi" - } - }, - "clone" : { - "cluster": "acid-batman" - }, - "patroni": { - "initdb": { - "encoding": "UTF8", - "locale": "en_US.UTF-8", - "data-checksums": "true" - }, - "pg_hba": [ - "hostssl all all 0.0.0.0/0 md5", - "host all all 0.0.0.0/0 md5" - ], - "ttl": 30, - "loop_wait": 10, - "retry_timeout": 10, - "maximum_lag_on_failover": 33554432, - "slots" : { - "permanent_logical_1" : { - "type" : "logical", - "database" : "foo", - "plugin" : "pgoutput" - } + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + err: nil}, + // example with detailed input manifest + { + in: []byte(`{ + "kind": "Postgresql", + "apiVersion": "acid.zalan.do/v1", + "metadata": { + "name": "acid-testcluster1" + }, + "spec": { + "teamId": "ACID", + "volume": { + "size": "5Gi", + "storageClass": "SSD" + }, + "numberOfInstances": 2, + "users": { + "zalando": [ + "superuser", + "createdb" + ] + }, + "allowedSourceRanges": [ + "127.0.0.1/32" + ], + "postgresql": { + "version": "9.6", + "parameters": { + "shared_buffers": "32MB", + "max_connections": "10", + "log_statement": "all" + } + }, + "resources": { + "requests": { + "cpu": "10m", + "memory": "50Mi" + }, + "limits": { + "cpu": "300m", + "memory": "3000Mi" + } + }, + "clone" : { + "cluster": "acid-batman" + }, + "patroni": { + "initdb": { + "encoding": "UTF8", + "locale": "en_US.UTF-8", + "data-checksums": "true" + }, + "pg_hba": [ + "hostssl all all 0.0.0.0/0 md5", + "host all all 0.0.0.0/0 md5" + ], + "ttl": 30, + "loop_wait": 10, + "retry_timeout": 10, + "maximum_lag_on_failover": 33554432, + "slots" : { + "permanent_logical_1" : { + "type" : "logical", + "database" : "foo", + "plugin" : "pgoutput" + } + } + }, + "maintenanceWindows": [ + "Mon:01:00-06:00", + "Sat:00:00-04:00", + "05:00-05:15" + ] } - }, - "maintenanceWindows": [ - "Mon:01:00-06:00", - "Sat:00:00-04:00", - "05:00-05:15" - ] - } -}`), - Postgresql{ + }`), + out: Postgresql{ TypeMeta: metav1.TypeMeta{ Kind: "Postgresql", APIVersion: "acid.zalan.do/v1", @@ -273,10 +311,12 @@ var unmarshalCluster = []struct { }, Error: "", }, - []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}}}`), nil}, + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), + err: nil}, + // example with teamId set in input { - []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`), - Postgresql{ + in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`), + out: Postgresql{ TypeMeta: metav1.TypeMeta{ Kind: "Postgresql", APIVersion: "acid.zalan.do/v1", @@ -285,10 +325,12 @@ var unmarshalCluster = []struct { Name: "teapot-testcluster1", }, Spec: PostgresSpec{TeamID: "acid"}, - Status: ClusterStatusInvalid, + Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}, Error: errors.New("name must match {TEAM}-{NAME} format").Error(), }, - []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil}, + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + err: nil}, + // clone example { in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "clone": {"cluster": "team-batman"}}}`), out: Postgresql{ @@ -308,22 +350,26 @@ var unmarshalCluster = []struct { }, Error: "", }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}}}`), err: nil}, - {[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`), - Postgresql{}, - []byte{}, - errors.New("unexpected end of JSON input")}, - {[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), - Postgresql{}, - []byte{}, - errors.New("invalid character 'q' looking for beginning of value")}} + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}},"status":{"PostgresClusterStatus":""}}`), + err: nil}, + // erroneous examples + { + in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`), + out: Postgresql{}, + marshal: []byte{}, + err: errors.New("unexpected end of JSON input")}, + { + in: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster","creationTimestamp":qaz},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), + out: Postgresql{}, + marshal: []byte{}, + err: errors.New("invalid character 'q' looking for beginning of value")}} var postgresqlList = []struct { in []byte out PostgresqlList err error }{ - {[]byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":"Running"}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), + {[]byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), PostgresqlList{ TypeMeta: metav1.TypeMeta{ Kind: "List", @@ -350,8 +396,10 @@ var postgresqlList = []struct { AllowedSourceRanges: []string{"185.85.220.0/22"}, NumberOfInstances: 1, }, - Status: ClusterStatusRunning, - Error: "", + Status: PostgresStatus{ + PostgresClusterStatus: ClusterStatusRunning, + }, + Error: "", }}, }, nil}, @@ -469,6 +517,25 @@ func TestMarshalMaintenanceWindow(t *testing.T) { } } +func TestUnmarshalPostgresStatus(t *testing.T) { + for _, tt := range postgresStatus { + var ps PostgresStatus + err := ps.UnmarshalJSON(tt.in) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("CR status unmarshal expected error: %v, got %v", tt.err, err) + } + continue + //} else if tt.err != nil { + //t.Errorf("Expected error: %v", tt.err) + } + + if !reflect.DeepEqual(ps, tt.out) { + t.Errorf("Expected status: %#v, got: %#v", tt.out, ps) + } + } +} + func TestPostgresUnmarshal(t *testing.T) { for _, tt := range unmarshalCluster { var cluster Postgresql @@ -494,12 +561,26 @@ func TestMarshal(t *testing.T) { continue } + // Unmarshal and marshal example to capture api changes + var cluster Postgresql + err := cluster.UnmarshalJSON(tt.marshal) + if err != nil { + if tt.err == nil || err.Error() != tt.err.Error() { + t.Errorf("Backwards compatibility unmarshal expected error: %v, got: %v", tt.err, err) + } + continue + } + expected, err := json.Marshal(cluster) + if err != nil { + t.Errorf("Backwards compatibility marshal error: %v", err) + } + m, err := json.Marshal(tt.out) if err != nil { t.Errorf("Marshal error: %v", err) } - if !bytes.Equal(m, tt.marshal) { - t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(tt.marshal), string(m)) + if !bytes.Equal(m, expected) { + t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(expected), string(m)) } } } diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 8b566bb42..7a27bb794 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -496,6 +496,22 @@ func (in *PostgresSpec) DeepCopy() *PostgresSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresStatus) DeepCopyInto(out *PostgresStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresStatus. +func (in *PostgresStatus) DeepCopy() *PostgresStatus { + if in == nil { + return nil + } + out := new(PostgresStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) { *out = *in @@ -518,6 +534,7 @@ func (in *Postgresql) DeepCopyInto(out *Postgresql) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status return } diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 7921ac739..9cbc46e70 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -4,6 +4,7 @@ package cluster import ( "database/sql" + "encoding/json" "fmt" "reflect" "regexp" @@ -19,8 +20,6 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - "encoding/json" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" @@ -150,21 +149,24 @@ func (c *Cluster) setProcessName(procName string, args ...interface{}) { } } -func (c *Cluster) setStatus(status acidv1.PostgresStatus) { - // TODO: eventually switch to updateStatus() for kubernetes 1.11 and above - var ( - err error - b []byte - ) - if b, err = json.Marshal(status); err != nil { +// SetStatus of Postgres cluster +// TODO: eventually switch to updateStatus() for kubernetes 1.11 and above +func (c *Cluster) setStatus(status string) { + var pgStatus acidv1.PostgresStatus + pgStatus.PostgresClusterStatus = status + + patch, err := json.Marshal(struct { + PgStatus interface{} `json:"status"` + }{&pgStatus}) + + if err != nil { c.logger.Errorf("could not marshal status: %v", err) } - patch := []byte(fmt.Sprintf(`{"status": %s}`, string(b))) // we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ), // however, we could do patch without it. In the future, once /status subresource is there (starting Kubernets 1.11) // we should take advantage of it. - newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch(c.Name, types.MergePatchType, patch) + newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch(c.Name, types.MergePatchType, patch, "status") if err != nil { c.logger.Errorf("could not update status: %v", err) } @@ -173,7 +175,7 @@ func (c *Cluster) setStatus(status acidv1.PostgresStatus) { } func (c *Cluster) isNewCluster() bool { - return c.Status == acidv1.ClusterStatusCreating + return c.Status.Creating() } // initUsers populates c.systemUsers and c.pgUsers maps. diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 5ff00c7b3..6f10aae22 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -20,10 +20,20 @@ const ( ) var logger = logrus.New().WithField("test", "cluster") -var cl = New(Config{OpConfig: config.Config{ProtectedRoles: []string{"admin"}, - Auth: config.Auth{SuperUsername: superUserName, - ReplicationUsername: replicationUserName}}}, - k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) +var cl = New( + Config{ + OpConfig: config.Config{ + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, + k8sutil.NewMockKubernetesClient(), + acidv1.Postgresql{}, + logger, +) func TestInitRobotUsers(t *testing.T) { testName := "TestInitRobotUsers" diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 84693d9c3..fe1d3eb5d 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1016,6 +1016,7 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string return nil, fmt.Errorf("could not parse volume size: %v", err) } + volumeMode := v1.PersistentVolumeFilesystem volumeClaim := &v1.PersistentVolumeClaim{ ObjectMeta: metadata, Spec: v1.PersistentVolumeClaimSpec{ @@ -1026,6 +1027,7 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string }, }, StorageClassName: storageClassName, + VolumeMode: &volumeMode, }, } @@ -1218,10 +1220,37 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) }) } else { // cloning with S3, find out the bucket to clone + msg := "Clone from S3 bucket" + c.logger.Info(msg, description.S3WalPath) + + if description.S3WalPath == "" { + msg := "Figure out which S3 bucket to use from env" + c.logger.Info(msg, description.S3WalPath) + + envs := []v1.EnvVar{ + v1.EnvVar{ + Name: "CLONE_WAL_S3_BUCKET", + Value: c.OpConfig.WALES3Bucket, + }, + v1.EnvVar{ + Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", + Value: getBucketScopeSuffix(description.UID), + }, + } + + result = append(result, envs...) + } else { + msg := "Use custom parsed S3WalPath %s from the manifest" + c.logger.Warningf(msg, description.S3WalPath) + + result = append(result, v1.EnvVar{ + Name: "CLONE_WALE_S3_PREFIX", + Value: description.S3WalPath, + }) + } + result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"}) - result = append(result, v1.EnvVar{Name: "CLONE_WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp}) - result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.UID)}) result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""}) } diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 6b96f8ec1..dc48c0389 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -129,3 +129,82 @@ func TestShmVolume(t *testing.T) { } } } + +func TestCloneEnv(t *testing.T) { + testName := "TestCloneEnv" + tests := []struct { + subTest string + cloneOpts *acidv1.CloneDescription + env v1.EnvVar + envPos int + }{ + { + subTest: "custom s3 path", + cloneOpts: &acidv1.CloneDescription{ + ClusterName: "test-cluster", + S3WalPath: "s3://some/path/", + EndTimestamp: "somewhen", + }, + env: v1.EnvVar{ + Name: "CLONE_WALE_S3_PREFIX", + Value: "s3://some/path/", + }, + envPos: 1, + }, + { + subTest: "generated s3 path, bucket", + cloneOpts: &acidv1.CloneDescription{ + ClusterName: "test-cluster", + EndTimestamp: "somewhen", + UID: "0000", + }, + env: v1.EnvVar{ + Name: "CLONE_WAL_S3_BUCKET", + Value: "wale-bucket", + }, + envPos: 1, + }, + { + subTest: "generated s3 path, target time", + cloneOpts: &acidv1.CloneDescription{ + ClusterName: "test-cluster", + EndTimestamp: "somewhen", + UID: "0000", + }, + env: v1.EnvVar{ + Name: "CLONE_TARGET_TIME", + Value: "somewhen", + }, + envPos: 4, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + WALES3Bucket: "wale-bucket", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) + + for _, tt := range tests { + envs := cluster.generateCloneEnvironment(tt.cloneOpts) + + env := envs[tt.envPos] + + if env.Name != tt.env.Name { + t.Errorf("%s %s: Expected env name %s, have %s instead", + testName, tt.subTest, tt.env.Name, env.Name) + } + + if env.Value != tt.env.Value { + t.Errorf("%s %s: Expected env value %s, have %s instead", + testName, tt.subTest, tt.env.Value, env.Value) + } + + } +} diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 63e2c5c2d..f5ae30b81 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -29,7 +29,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { if err != nil { c.logger.Warningf("error while syncing cluster state: %v", err) c.setStatus(acidv1.ClusterStatusSyncFailed) - } else if c.Status != acidv1.ClusterStatusRunning { + } else if !c.Status.Running() { c.setStatus(acidv1.ClusterStatusRunning) } }() diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 57633ae82..f9fc4468a 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -1,11 +1,13 @@ package controller import ( + "encoding/json" "fmt" "k8s.io/api/core/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" @@ -52,7 +54,15 @@ func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefiniti if !k8sutil.ResourceAlreadyExists(err) { return fmt.Errorf("could not create customResourceDefinition: %v", err) } - c.logger.Infof("customResourceDefinition %q is already registered", crd.Name) + c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name) + + patch, err := json.Marshal(crd) + if err != nil { + return fmt.Errorf("could not marshal new customResourceDefintion: %v", err) + } + if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil { + return fmt.Errorf("could not update customResourceDefinition: %v", err) + } } else { c.logger.Infof("customResourceDefinition %q has been registered", crd.Name) } diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index cb782904c..c9e16cbd9 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -6,82 +6,24 @@ import ( "testing" b64 "encoding/base64" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( testInfrastructureRolesSecretName = "infrastructureroles-test" ) -type mockSecret struct { - v1core.SecretInterface -} - -type mockConfigMap struct { - v1core.ConfigMapInterface -} - -func (c *mockSecret) Get(name string, options metav1.GetOptions) (*v1.Secret, error) { - if name != testInfrastructureRolesSecretName { - return nil, fmt.Errorf("NotFound") - } - secret := &v1.Secret{} - secret.Name = mockController.opConfig.ClusterNameLabel - secret.Data = map[string][]byte{ - "user1": []byte("testrole"), - "password1": []byte("testpassword"), - "inrole1": []byte("testinrole"), - "foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))), - } - return secret, nil - -} - -func (c *mockConfigMap) Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error) { - if name != testInfrastructureRolesSecretName { - return nil, fmt.Errorf("NotFound") - } - configmap := &v1.ConfigMap{} - configmap.Name = mockController.opConfig.ClusterNameLabel - configmap.Data = map[string]string{ - "foobar": "{}", - } - return configmap, nil -} - -type MockSecretGetter struct { -} - -type MockConfigMapsGetter struct { -} - -func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface { - return &mockSecret{} -} - -func (c *MockConfigMapsGetter) ConfigMaps(namespace string) v1core.ConfigMapInterface { - return &mockConfigMap{} -} - -func newMockKubernetesClient() k8sutil.KubernetesClient { - return k8sutil.KubernetesClient{ - SecretsGetter: &MockSecretGetter{}, - ConfigMapsGetter: &MockConfigMapsGetter{}, - } -} - func newMockController() *Controller { controller := NewController(&spec.ControllerConfig{}) controller.opConfig.ClusterNameLabel = "cluster-name" controller.opConfig.InfrastructureRolesSecretName = spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesSecretName} controller.opConfig.Workers = 4 - controller.KubeClient = newMockKubernetesClient() + controller.KubeClient = k8sutil.NewMockKubernetesClient() return controller } diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index fc5dca1b8..bd10256e0 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -4,6 +4,8 @@ import ( "fmt" "reflect" + b64 "encoding/base64" + batchv1beta1 "k8s.io/api/batch/v1beta1" clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" @@ -22,6 +24,7 @@ import ( "k8s.io/client-go/tools/clientcmd" acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // KubernetesClient describes getters for Kubernetes objects @@ -46,6 +49,20 @@ type KubernetesClient struct { AcidV1ClientSet *acidv1client.Clientset } +type mockSecret struct { + v1core.SecretInterface +} + +type MockSecretGetter struct { +} + +type mockConfigMap struct { + v1core.ConfigMapInterface +} + +type MockConfigMapsGetter struct { +} + // RestConfig creates REST config func RestConfig(kubeConfig string, outOfCluster bool) (*rest.Config, error) { if outOfCluster { @@ -168,3 +185,49 @@ func SameLogicalBackupJob(cur, new *batchv1beta1.CronJob) (match bool, reason st return true, "" } + +func (c *mockSecret) Get(name string, options metav1.GetOptions) (*v1.Secret, error) { + if name != "infrastructureroles-test" { + return nil, fmt.Errorf("NotFound") + } + secret := &v1.Secret{} + secret.Name = "testcluster" + secret.Data = map[string][]byte{ + "user1": []byte("testrole"), + "password1": []byte("testpassword"), + "inrole1": []byte("testinrole"), + "foobar": []byte(b64.StdEncoding.EncodeToString([]byte("password"))), + } + return secret, nil + +} + +func (c *mockConfigMap) Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error) { + if name != "infrastructureroles-test" { + return nil, fmt.Errorf("NotFound") + } + configmap := &v1.ConfigMap{} + configmap.Name = "testcluster" + configmap.Data = map[string]string{ + "foobar": "{}", + } + return configmap, nil +} + +// Secrets to be mocked +func (c *MockSecretGetter) Secrets(namespace string) v1core.SecretInterface { + return &mockSecret{} +} + +// ConfigMaps to be mocked +func (c *MockConfigMapsGetter) ConfigMaps(namespace string) v1core.ConfigMapInterface { + return &mockConfigMap{} +} + +// NewMockKubernetesClient for other tests +func NewMockKubernetesClient() KubernetesClient { + return KubernetesClient{ + SecretsGetter: &MockSecretGetter{}, + ConfigMapsGetter: &MockConfigMapsGetter{}, + } +}