From bbf28c4df70f9ba2ac0b1ccf6673bea9b975550f Mon Sep 17 00:00:00 2001 From: "teuto.net Netzdienste GmbH" Date: Fri, 14 Jun 2019 12:28:00 +0200 Subject: [PATCH 1/6] Add additional S3 settings for cloning (#497) --- docs/reference/cluster_manifest.md | 13 +++++++++++ docs/user.md | 18 +++++++++++++++ pkg/apis/acid.zalan.do/v1/postgresql_type.go | 12 ++++++---- pkg/apis/acid.zalan.do/v1/util_test.go | 8 +++---- .../acid.zalan.do/v1/zz_generated.deepcopy.go | 7 +++++- pkg/cluster/k8sres.go | 23 +++++++++++++++++++ 6 files changed, 72 insertions(+), 9 deletions(-) diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 2768f02aa..d2bbcbe88 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -254,6 +254,19 @@ under the `clone` top-level key and do not affect the already running cluster. timestamp. When this parameter is set the operator will not consider cloning from the live cluster, even if it is running, and instead goes to S3. Optional. +* **s3_endpoint** + the url of the S3-compatible service should be set when cloning from non AWS S3. Optional. + +* **s3_access_key_id** + the access key id, used for authentication on S3 service. Optional. + +* **s3_secret_access_key** + the secret access key, used for authentication on S3 service. Optional. + +* **s3_force_path_style** + to enable path-style addressing(i.e., http://s3.amazonaws.com/BUCKET/KEY) when connecting to an S3-compatible service + that lack of support for sub-domain style bucket URLs (i.e., http://BUCKET.s3.amazonaws.com/KEY). Optional. + ### EBS volume resizing Those parameters are grouped under the `volume` top-level key and define the diff --git a/docs/user.md b/docs/user.md index 0f643f4c5..f33409720 100644 --- a/docs/user.md +++ b/docs/user.md @@ -254,6 +254,24 @@ metadata: Note that timezone is required for `timestamp`. Otherwise, offset is relative to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt). +For non AWS S3 following settings can be set to support cloning from other S3 implementations: + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql +metadata: + name: acid-test-cluster +spec: + clone: + uid: "efd12e58-5786-11e8-b5a7-06148230260c" + cluster: "acid-batman" + timestamp: "2017-12-19T12:40:33+01:00" + s3_endpoint: https://s3.acme.org + s3_access_key_id: 0123456789abcdef0123456789abcdef + s3_secret_access_key: 0123456789abcdef0123456789abcdef + s3_force_path_style: true +``` + ## Sidecar Support Each cluster can specify arbitrary sidecars to run. These containers could be used for diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 1a191786e..33c3a159b 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -115,10 +115,14 @@ type Patroni struct { // CloneDescription describes which cluster the new should clone and up to which point in time type CloneDescription struct { - ClusterName string `json:"cluster,omitempty"` - UID string `json:"uid,omitempty"` - EndTimestamp string `json:"timestamp,omitempty"` - S3WalPath string `json:"s3_wal_path,omitempty"` + ClusterName string `json:"cluster,omitempty"` + UID string `json:"uid,omitempty"` + EndTimestamp string `json:"timestamp,omitempty"` + S3WalPath string `json:"s3_wal_path,omitempty"` + S3Endpoint string `json:"s3_endpoint,omitempty"` + S3AccessKeyId string `json:"s3_access_key_id,omitempty"` + S3SecretAccessKey string `json:"s3_secret_access_key,omitempty"` + S3ForcePathStyle *bool `json:"s3_force_path_style,omitempty" defaults:"false"` } // Sidecar defines a container to be run in the same pod as the Postgres container. diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index 537619aaf..02bdcca1c 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -61,12 +61,12 @@ var cloneClusterDescriptions = []struct { in *CloneDescription err error }{ - {&CloneDescription{"foo+bar", "", "NotEmpty", ""}, nil}, - {&CloneDescription{"foo+bar", "", "", ""}, + {&CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil}, + {&CloneDescription{"foo+bar", "", "", "", "", "", "", nil}, errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)}, - {&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", ""}, + {&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil}, errors.New("clone cluster name must be no longer than 63 characters")}, - {&CloneDescription{"foobar", "", "", ""}, nil}, + {&CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil}, } var maintenanceWindows = []struct { diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 282fb311f..65d8ee925 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -50,6 +50,11 @@ func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CloneDescription) DeepCopyInto(out *CloneDescription) { *out = *in + if in.S3ForcePathStyle != nil { + in, out := &in.S3ForcePathStyle, &out.S3ForcePathStyle + *out = new(bool) + **out = **in + } return } @@ -459,7 +464,7 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - out.Clone = in.Clone + in.Clone.DeepCopyInto(&out.Clone) if in.Databases != nil { in, out := &in.Databases, &out.Databases *out = make(map[string]string, len(*in)) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index a16e810ed..585cf2bed 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1266,6 +1266,29 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"}) result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp}) result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""}) + + if description.S3Endpoint != "" { + result = append(result, v1.EnvVar{Name: "CLONE_AWS_ENDPOINT", Value: description.S3Endpoint}) + result = append(result, v1.EnvVar{Name: "CLONE_WALE_S3_ENDPOINT", Value: description.S3Endpoint}) + } + + if description.S3AccessKeyId != "" { + result = append(result, v1.EnvVar{Name: "CLONE_AWS_ACCESS_KEY_ID", Value: description.S3AccessKeyId}) + } + + if description.S3SecretAccessKey != "" { + result = append(result, v1.EnvVar{Name: "CLONE_AWS_SECRET_ACCESS_KEY", Value: description.S3SecretAccessKey}) + } + + if description.S3ForcePathStyle != nil { + s3ForcePathStyle := "0" + + if *description.S3ForcePathStyle { + s3ForcePathStyle = "1" + } + + result = append(result, v1.EnvVar{Name: "CLONE_AWS_S3_FORCE_PATH_STYLE", Value: s3ForcePathStyle}) + } } return result From 6fbfee3903d74e604ebbe8077409810d68536991 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Inge=20Bols=C3=B8?= Date: Fri, 14 Jun 2019 14:24:23 +0200 Subject: [PATCH 2/6] decouple clusterrole name and serviceaccount name (#581) Decouple clusterrole name and service account name. --- pkg/controller/controller.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 2d814fd14..a492a85e2 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -197,25 +197,25 @@ func (c *Controller) initRoleBinding() { // operator binds it to the cluster role with sufficient privileges // we assume the role is created by the k8s administrator if c.opConfig.PodServiceAccountRoleBindingDefinition == "" { - c.opConfig.PodServiceAccountRoleBindingDefinition = ` + c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(` { "apiVersion": "rbac.authorization.k8s.io/v1beta1", "kind": "RoleBinding", "metadata": { - "name": "zalando-postgres-operator" + "name": "%s" }, "roleRef": { "apiGroup": "rbac.authorization.k8s.io", "kind": "ClusterRole", - "name": "zalando-postgres-operator" + "name": "%s" }, "subjects": [ { "kind": "ServiceAccount", - "name": "operator" + "name": "%s" } ] - }` + }`, c.PodServiceAccount.Name, c.PodServiceAccount.Name, c.PodServiceAccount.Name) } c.logger.Info("Parse role bindings") // re-uses k8s internal parsing. See k8s client-go issue #193 for explanation @@ -230,9 +230,6 @@ func (c *Controller) initRoleBinding() { default: c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding) c.PodServiceAccountRoleBinding.Namespace = "" - c.PodServiceAccountRoleBinding.ObjectMeta.Name = c.PodServiceAccount.Name - c.PodServiceAccountRoleBinding.RoleRef.Name = c.PodServiceAccount.Name - c.PodServiceAccountRoleBinding.Subjects[0].Name = c.PodServiceAccount.Name c.logger.Info("successfully parsed") } From 35a1f2cff81b2b56425d2483637b6d4c2c31d2fd Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Fri, 14 Jun 2019 14:56:40 +0200 Subject: [PATCH 3/6] Add Rafia to code owners (#588) --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 98d9cd7bb..96fe74510 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # global owners -* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu +* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih From 028b834ea6f4db8407e50e43cec1c40b779f7822 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Inge=20Bols=C3=B8?= Date: Fri, 14 Jun 2019 15:47:08 +0200 Subject: [PATCH 4/6] postgres-operator deployment template: run operator as non-root, and with readonly filesystem (#582) --- docker/Dockerfile | 5 +++++ manifests/postgres-operator.yaml | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/docker/Dockerfile b/docker/Dockerfile index 66abb6c30..196ac93d3 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -6,4 +6,9 @@ RUN apk --no-cache add ca-certificates COPY build/* / +RUN addgroup -g 1000 pgo +RUN adduser -D -u 1000 -G pgo -g 'Postgres operator' pgo + +USER 1000:1000 + ENTRYPOINT ["/postgres-operator"] diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index d43c0f8a8..005f02521 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -21,6 +21,10 @@ spec: limits: cpu: 2000m memory: 500Mi + securityContext: + runAsUser: 1000 + runAsNonRoot: true + readOnlyRootFilesystem: true env: # provided additional ENV vars can overwrite individual config map entries - name: CONFIG_MAP_NAME From e1d93953384265d5c96f0270c21166ed6ecff433 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Inge=20Bols=C3=B8?= Date: Fri, 14 Jun 2019 15:59:51 +0200 Subject: [PATCH 5/6] rbac: add user-facing clusterroles (#585) * rbac: add user-facing clusterroles --- docs/user.md | 9 +++++ manifests/user-facing-clusterroles.yaml | 51 +++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 manifests/user-facing-clusterroles.yaml diff --git a/docs/user.md b/docs/user.md index f33409720..4cce153aa 100644 --- a/docs/user.md +++ b/docs/user.md @@ -41,6 +41,15 @@ $ kubectl create -f manifests/minimal-postgres-manifest.yaml $ kubectl get pods -w --show-labels ``` +## Give K8S users access to create/list postgresqls + +```bash +$ kubectl create -f manifests/user-facing-clusterroles.yaml +``` + +Creates zalando-postgres-operator:users:view, :edit and :admin clusterroles that are +aggregated into the default roles. + ## Connect to PostgreSQL With a `port-forward` on one of the database pods (e.g. the master) you can diff --git a/manifests/user-facing-clusterroles.yaml b/manifests/user-facing-clusterroles.yaml new file mode 100644 index 000000000..800aafdb9 --- /dev/null +++ b/manifests/user-facing-clusterroles.yaml @@ -0,0 +1,51 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: zalando-postgres-operator:users:admin +rules: +- apiGroups: + - acid.zalan.do + resources: + - postgresqls + - postgresqls/status + verbs: + - "*" + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: zalando-postgres-operator:users:edit +rules: +- apiGroups: + - acid.zalan.do + resources: + - postgresqls + verbs: + - create + - update + - patch + - delete + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: zalando-postgres-operator:users:view +rules: +- apiGroups: + - acid.zalan.do + resources: + - postgresqls + - postgresqls/status + verbs: + - get + - list + - watch + From 44acd7e4dbb5fb79b3ce83e357dc29ac4855e729 Mon Sep 17 00:00:00 2001 From: Maxim Ivanov Date: Fri, 14 Jun 2019 15:08:29 +0100 Subject: [PATCH 6/6] Not being able to register CRD is not a fatal error (#444) Operator proceeds to checking if CRD is present and ready, and if not, only then it is a fatal error. --- docs/reference/operator_parameters.md | 2 +- pkg/controller/util.go | 21 +++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 9c14b8669..c9a9db753 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -32,7 +32,7 @@ configuration. kubectl create -f manifests/postgresql-operator-default-configuration.yaml kubectl get operatorconfigurations postgresql-operator-default-configuration -o yaml ``` - Note that the operator first registers the CRD of the `OperatorConfiguration` + Note that the operator first attempts to register the CRD of the `OperatorConfiguration` and then waits for an instance to be created. In between these two event the operator pod may be failing since it cannot fetch the not-yet-existing `OperatorConfiguration` instance. diff --git a/pkg/controller/util.go b/pkg/controller/util.go index f9fc4468a..0adb85dbd 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -51,17 +51,18 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 { func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefinition) error { if _, err := c.KubeClient.CustomResourceDefinitions().Create(crd); err != nil { - if !k8sutil.ResourceAlreadyExists(err) { - return fmt.Errorf("could not create customResourceDefinition: %v", err) - } - c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name) + if k8sutil.ResourceAlreadyExists(err) { + c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name) - patch, err := json.Marshal(crd) - if err != nil { - return fmt.Errorf("could not marshal new customResourceDefintion: %v", err) - } - if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil { - return fmt.Errorf("could not update customResourceDefinition: %v", err) + patch, err := json.Marshal(crd) + if err != nil { + return fmt.Errorf("could not marshal new customResourceDefintion: %v", err) + } + if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil { + return fmt.Errorf("could not update customResourceDefinition: %v", err) + } + } else { + c.logger.Errorf("could not create customResourceDefinition %q: %v", crd.Name, err) } } else { c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)