Merge branch 'master' of https://github.com/zalando/postgres-operator into standby
This commit is contained in:
commit
cd2a713a97
|
|
@ -1,2 +1,2 @@
|
||||||
# global owners
|
# global owners
|
||||||
* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu
|
* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih
|
||||||
|
|
|
||||||
|
|
@ -6,4 +6,9 @@ RUN apk --no-cache add ca-certificates
|
||||||
|
|
||||||
COPY build/* /
|
COPY build/* /
|
||||||
|
|
||||||
|
RUN addgroup -g 1000 pgo
|
||||||
|
RUN adduser -D -u 1000 -G pgo -g 'Postgres operator' pgo
|
||||||
|
|
||||||
|
USER 1000:1000
|
||||||
|
|
||||||
ENTRYPOINT ["/postgres-operator"]
|
ENTRYPOINT ["/postgres-operator"]
|
||||||
|
|
|
||||||
|
|
@ -257,6 +257,19 @@ under the `clone` top-level key and do not affect the already running cluster.
|
||||||
timestamp. When this parameter is set the operator will not consider cloning
|
timestamp. When this parameter is set the operator will not consider cloning
|
||||||
from the live cluster, even if it is running, and instead goes to S3. Optional.
|
from the live cluster, even if it is running, and instead goes to S3. Optional.
|
||||||
|
|
||||||
|
* **s3_endpoint**
|
||||||
|
the url of the S3-compatible service should be set when cloning from non AWS S3. Optional.
|
||||||
|
|
||||||
|
* **s3_access_key_id**
|
||||||
|
the access key id, used for authentication on S3 service. Optional.
|
||||||
|
|
||||||
|
* **s3_secret_access_key**
|
||||||
|
the secret access key, used for authentication on S3 service. Optional.
|
||||||
|
|
||||||
|
* **s3_force_path_style**
|
||||||
|
to enable path-style addressing(i.e., http://s3.amazonaws.com/BUCKET/KEY) when connecting to an S3-compatible service
|
||||||
|
that lack of support for sub-domain style bucket URLs (i.e., http://BUCKET.s3.amazonaws.com/KEY). Optional.
|
||||||
|
|
||||||
### EBS volume resizing
|
### EBS volume resizing
|
||||||
|
|
||||||
Those parameters are grouped under the `volume` top-level key and define the
|
Those parameters are grouped under the `volume` top-level key and define the
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ configuration.
|
||||||
kubectl create -f manifests/postgresql-operator-default-configuration.yaml
|
kubectl create -f manifests/postgresql-operator-default-configuration.yaml
|
||||||
kubectl get operatorconfigurations postgresql-operator-default-configuration -o yaml
|
kubectl get operatorconfigurations postgresql-operator-default-configuration -o yaml
|
||||||
```
|
```
|
||||||
Note that the operator first registers the CRD of the `OperatorConfiguration`
|
Note that the operator first attempts to register the CRD of the `OperatorConfiguration`
|
||||||
and then waits for an instance to be created. In between these two event the
|
and then waits for an instance to be created. In between these two event the
|
||||||
operator pod may be failing since it cannot fetch the not-yet-existing
|
operator pod may be failing since it cannot fetch the not-yet-existing
|
||||||
`OperatorConfiguration` instance.
|
`OperatorConfiguration` instance.
|
||||||
|
|
|
||||||
28
docs/user.md
28
docs/user.md
|
|
@ -41,6 +41,15 @@ $ kubectl create -f manifests/minimal-postgres-manifest.yaml
|
||||||
$ kubectl get pods -w --show-labels
|
$ kubectl get pods -w --show-labels
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Give K8S users access to create/list postgresqls
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ kubectl create -f manifests/user-facing-clusterroles.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
Creates zalando-postgres-operator:users:view, :edit and :admin clusterroles that are
|
||||||
|
aggregated into the default roles.
|
||||||
|
|
||||||
## Connect to PostgreSQL
|
## Connect to PostgreSQL
|
||||||
|
|
||||||
With a `port-forward` on one of the database pods (e.g. the master) you can
|
With a `port-forward` on one of the database pods (e.g. the master) you can
|
||||||
|
|
@ -254,6 +263,24 @@ metadata:
|
||||||
Note that timezone is required for `timestamp`. Otherwise, offset is relative
|
Note that timezone is required for `timestamp`. Otherwise, offset is relative
|
||||||
to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt).
|
to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt).
|
||||||
|
|
||||||
|
For non AWS S3 following settings can be set to support cloning from other S3 implementations:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: "acid.zalan.do/v1"
|
||||||
|
kind: postgresql
|
||||||
|
metadata:
|
||||||
|
name: acid-test-cluster
|
||||||
|
spec:
|
||||||
|
clone:
|
||||||
|
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
||||||
|
cluster: "acid-batman"
|
||||||
|
timestamp: "2017-12-19T12:40:33+01:00"
|
||||||
|
s3_endpoint: https://s3.acme.org
|
||||||
|
s3_access_key_id: 0123456789abcdef0123456789abcdef
|
||||||
|
s3_secret_access_key: 0123456789abcdef0123456789abcdef
|
||||||
|
s3_force_path_style: true
|
||||||
|
```
|
||||||
|
|
||||||
## Setting up a standby cluster
|
## Setting up a standby cluster
|
||||||
|
|
||||||
Standby clusters are like normal cluster but they are streaming from a remote cluster. Patroni supports it, [read this](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster) to know more about them.
|
Standby clusters are like normal cluster but they are streaming from a remote cluster. Patroni supports it, [read this](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster) to know more about them.
|
||||||
|
|
@ -265,6 +292,7 @@ spec:
|
||||||
standby:
|
standby:
|
||||||
s3_wal_path: "s3 bucket path to the master"
|
s3_wal_path: "s3 bucket path to the master"
|
||||||
```
|
```
|
||||||
|
|
||||||
Things to note:
|
Things to note:
|
||||||
|
|
||||||
- An empty string is provided in s3_wal_path of the standby cluster will result in error and no statefulset will be created.
|
- An empty string is provided in s3_wal_path of the standby cluster will result in error and no statefulset will be created.
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,10 @@ spec:
|
||||||
limits:
|
limits:
|
||||||
cpu: 2000m
|
cpu: 2000m
|
||||||
memory: 500Mi
|
memory: 500Mi
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 1000
|
||||||
|
runAsNonRoot: true
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
env:
|
env:
|
||||||
# provided additional ENV vars can overwrite individual config map entries
|
# provided additional ENV vars can overwrite individual config map entries
|
||||||
- name: CONFIG_MAP_NAME
|
- name: CONFIG_MAP_NAME
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,51 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||||
|
name: zalando-postgres-operator:users:admin
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- acid.zalan.do
|
||||||
|
resources:
|
||||||
|
- postgresqls
|
||||||
|
- postgresqls/status
|
||||||
|
verbs:
|
||||||
|
- "*"
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||||
|
name: zalando-postgres-operator:users:edit
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- acid.zalan.do
|
||||||
|
resources:
|
||||||
|
- postgresqls
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||||
|
name: zalando-postgres-operator:users:view
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- acid.zalan.do
|
||||||
|
resources:
|
||||||
|
- postgresqls
|
||||||
|
- postgresqls/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
|
||||||
|
|
@ -121,10 +121,14 @@ type StandbyDescription struct {
|
||||||
|
|
||||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||||
type CloneDescription struct {
|
type CloneDescription struct {
|
||||||
ClusterName string `json:"cluster,omitempty"`
|
ClusterName string `json:"cluster,omitempty"`
|
||||||
UID string `json:"uid,omitempty"`
|
UID string `json:"uid,omitempty"`
|
||||||
EndTimestamp string `json:"timestamp,omitempty"`
|
EndTimestamp string `json:"timestamp,omitempty"`
|
||||||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||||
|
S3Endpoint string `json:"s3_endpoint,omitempty"`
|
||||||
|
S3AccessKeyId string `json:"s3_access_key_id,omitempty"`
|
||||||
|
S3SecretAccessKey string `json:"s3_secret_access_key,omitempty"`
|
||||||
|
S3ForcePathStyle *bool `json:"s3_force_path_style,omitempty" defaults:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sidecar defines a container to be run in the same pod as the Postgres container.
|
// Sidecar defines a container to be run in the same pod as the Postgres container.
|
||||||
|
|
|
||||||
|
|
@ -61,12 +61,12 @@ var cloneClusterDescriptions = []struct {
|
||||||
in *CloneDescription
|
in *CloneDescription
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{&CloneDescription{"foo+bar", "", "NotEmpty", ""}, nil},
|
{&CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil},
|
||||||
{&CloneDescription{"foo+bar", "", "", ""},
|
{&CloneDescription{"foo+bar", "", "", "", "", "", "", nil},
|
||||||
errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
|
errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
|
||||||
{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", ""},
|
{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil},
|
||||||
errors.New("clone cluster name must be no longer than 63 characters")},
|
errors.New("clone cluster name must be no longer than 63 characters")},
|
||||||
{&CloneDescription{"foobar", "", "", ""}, nil},
|
{&CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil},
|
||||||
}
|
}
|
||||||
|
|
||||||
var maintenanceWindows = []struct {
|
var maintenanceWindows = []struct {
|
||||||
|
|
|
||||||
|
|
@ -50,6 +50,11 @@ func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *CloneDescription) DeepCopyInto(out *CloneDescription) {
|
func (in *CloneDescription) DeepCopyInto(out *CloneDescription) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.S3ForcePathStyle != nil {
|
||||||
|
in, out := &in.S3ForcePathStyle, &out.S3ForcePathStyle
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -459,7 +464,7 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out.Clone = in.Clone
|
in.Clone.DeepCopyInto(&out.Clone)
|
||||||
if in.Databases != nil {
|
if in.Databases != nil {
|
||||||
in, out := &in.Databases, &out.Databases
|
in, out := &in.Databases, &out.Databases
|
||||||
*out = make(map[string]string, len(*in))
|
*out = make(map[string]string, len(*in))
|
||||||
|
|
|
||||||
|
|
@ -1278,6 +1278,29 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
|
||||||
result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"})
|
result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"})
|
||||||
result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp})
|
result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp})
|
||||||
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||||
|
|
||||||
|
if description.S3Endpoint != "" {
|
||||||
|
result = append(result, v1.EnvVar{Name: "CLONE_AWS_ENDPOINT", Value: description.S3Endpoint})
|
||||||
|
result = append(result, v1.EnvVar{Name: "CLONE_WALE_S3_ENDPOINT", Value: description.S3Endpoint})
|
||||||
|
}
|
||||||
|
|
||||||
|
if description.S3AccessKeyId != "" {
|
||||||
|
result = append(result, v1.EnvVar{Name: "CLONE_AWS_ACCESS_KEY_ID", Value: description.S3AccessKeyId})
|
||||||
|
}
|
||||||
|
|
||||||
|
if description.S3SecretAccessKey != "" {
|
||||||
|
result = append(result, v1.EnvVar{Name: "CLONE_AWS_SECRET_ACCESS_KEY", Value: description.S3SecretAccessKey})
|
||||||
|
}
|
||||||
|
|
||||||
|
if description.S3ForcePathStyle != nil {
|
||||||
|
s3ForcePathStyle := "0"
|
||||||
|
|
||||||
|
if *description.S3ForcePathStyle {
|
||||||
|
s3ForcePathStyle = "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
result = append(result, v1.EnvVar{Name: "CLONE_AWS_S3_FORCE_PATH_STYLE", Value: s3ForcePathStyle})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
||||||
|
|
@ -197,25 +197,25 @@ func (c *Controller) initRoleBinding() {
|
||||||
// operator binds it to the cluster role with sufficient privileges
|
// operator binds it to the cluster role with sufficient privileges
|
||||||
// we assume the role is created by the k8s administrator
|
// we assume the role is created by the k8s administrator
|
||||||
if c.opConfig.PodServiceAccountRoleBindingDefinition == "" {
|
if c.opConfig.PodServiceAccountRoleBindingDefinition == "" {
|
||||||
c.opConfig.PodServiceAccountRoleBindingDefinition = `
|
c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(`
|
||||||
{
|
{
|
||||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||||
"kind": "RoleBinding",
|
"kind": "RoleBinding",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"name": "zalando-postgres-operator"
|
"name": "%s"
|
||||||
},
|
},
|
||||||
"roleRef": {
|
"roleRef": {
|
||||||
"apiGroup": "rbac.authorization.k8s.io",
|
"apiGroup": "rbac.authorization.k8s.io",
|
||||||
"kind": "ClusterRole",
|
"kind": "ClusterRole",
|
||||||
"name": "zalando-postgres-operator"
|
"name": "%s"
|
||||||
},
|
},
|
||||||
"subjects": [
|
"subjects": [
|
||||||
{
|
{
|
||||||
"kind": "ServiceAccount",
|
"kind": "ServiceAccount",
|
||||||
"name": "operator"
|
"name": "%s"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}`
|
}`, c.PodServiceAccount.Name, c.PodServiceAccount.Name, c.PodServiceAccount.Name)
|
||||||
}
|
}
|
||||||
c.logger.Info("Parse role bindings")
|
c.logger.Info("Parse role bindings")
|
||||||
// re-uses k8s internal parsing. See k8s client-go issue #193 for explanation
|
// re-uses k8s internal parsing. See k8s client-go issue #193 for explanation
|
||||||
|
|
@ -230,9 +230,6 @@ func (c *Controller) initRoleBinding() {
|
||||||
default:
|
default:
|
||||||
c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding)
|
c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding)
|
||||||
c.PodServiceAccountRoleBinding.Namespace = ""
|
c.PodServiceAccountRoleBinding.Namespace = ""
|
||||||
c.PodServiceAccountRoleBinding.ObjectMeta.Name = c.PodServiceAccount.Name
|
|
||||||
c.PodServiceAccountRoleBinding.RoleRef.Name = c.PodServiceAccount.Name
|
|
||||||
c.PodServiceAccountRoleBinding.Subjects[0].Name = c.PodServiceAccount.Name
|
|
||||||
c.logger.Info("successfully parsed")
|
c.logger.Info("successfully parsed")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -51,17 +51,18 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 {
|
||||||
|
|
||||||
func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefinition) error {
|
func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefinition) error {
|
||||||
if _, err := c.KubeClient.CustomResourceDefinitions().Create(crd); err != nil {
|
if _, err := c.KubeClient.CustomResourceDefinitions().Create(crd); err != nil {
|
||||||
if !k8sutil.ResourceAlreadyExists(err) {
|
if k8sutil.ResourceAlreadyExists(err) {
|
||||||
return fmt.Errorf("could not create customResourceDefinition: %v", err)
|
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
||||||
}
|
|
||||||
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
|
||||||
|
|
||||||
patch, err := json.Marshal(crd)
|
patch, err := json.Marshal(crd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not marshal new customResourceDefintion: %v", err)
|
return fmt.Errorf("could not marshal new customResourceDefintion: %v", err)
|
||||||
}
|
}
|
||||||
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil {
|
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil {
|
||||||
return fmt.Errorf("could not update customResourceDefinition: %v", err)
|
return fmt.Errorf("could not update customResourceDefinition: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.logger.Errorf("could not create customResourceDefinition %q: %v", crd.Name, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)
|
c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue