Merge branch 'master' into coderanger-disruption-budget
This commit is contained in:
commit
8285a3b7b7
|
|
@ -1,2 +1,2 @@
|
|||
# global owners
|
||||
* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu
|
||||
* @alexeyklyukin @erthalion @sdudoladov @Jan-M @CyberDem0n @avaczi @FxKu @RafiaSabih
|
||||
|
|
|
|||
|
|
@ -6,4 +6,9 @@ RUN apk --no-cache add ca-certificates
|
|||
|
||||
COPY build/* /
|
||||
|
||||
RUN addgroup -g 1000 pgo
|
||||
RUN adduser -D -u 1000 -G pgo -g 'Postgres operator' pgo
|
||||
|
||||
USER 1000:1000
|
||||
|
||||
ENTRYPOINT ["/postgres-operator"]
|
||||
|
|
|
|||
|
|
@ -254,6 +254,19 @@ under the `clone` top-level key and do not affect the already running cluster.
|
|||
timestamp. When this parameter is set the operator will not consider cloning
|
||||
from the live cluster, even if it is running, and instead goes to S3. Optional.
|
||||
|
||||
* **s3_endpoint**
|
||||
the url of the S3-compatible service should be set when cloning from non AWS S3. Optional.
|
||||
|
||||
* **s3_access_key_id**
|
||||
the access key id, used for authentication on S3 service. Optional.
|
||||
|
||||
* **s3_secret_access_key**
|
||||
the secret access key, used for authentication on S3 service. Optional.
|
||||
|
||||
* **s3_force_path_style**
|
||||
to enable path-style addressing(i.e., http://s3.amazonaws.com/BUCKET/KEY) when connecting to an S3-compatible service
|
||||
that lack of support for sub-domain style bucket URLs (i.e., http://BUCKET.s3.amazonaws.com/KEY). Optional.
|
||||
|
||||
### EBS volume resizing
|
||||
|
||||
Those parameters are grouped under the `volume` top-level key and define the
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ configuration.
|
|||
kubectl create -f manifests/postgresql-operator-default-configuration.yaml
|
||||
kubectl get operatorconfigurations postgresql-operator-default-configuration -o yaml
|
||||
```
|
||||
Note that the operator first registers the CRD of the `OperatorConfiguration`
|
||||
Note that the operator first attempts to register the CRD of the `OperatorConfiguration`
|
||||
and then waits for an instance to be created. In between these two event the
|
||||
operator pod may be failing since it cannot fetch the not-yet-existing
|
||||
`OperatorConfiguration` instance.
|
||||
|
|
|
|||
27
docs/user.md
27
docs/user.md
|
|
@ -41,6 +41,15 @@ $ kubectl create -f manifests/minimal-postgres-manifest.yaml
|
|||
$ kubectl get pods -w --show-labels
|
||||
```
|
||||
|
||||
## Give K8S users access to create/list postgresqls
|
||||
|
||||
```bash
|
||||
$ kubectl create -f manifests/user-facing-clusterroles.yaml
|
||||
```
|
||||
|
||||
Creates zalando-postgres-operator:users:view, :edit and :admin clusterroles that are
|
||||
aggregated into the default roles.
|
||||
|
||||
## Connect to PostgreSQL
|
||||
|
||||
With a `port-forward` on one of the database pods (e.g. the master) you can
|
||||
|
|
@ -254,6 +263,24 @@ metadata:
|
|||
Note that timezone is required for `timestamp`. Otherwise, offset is relative
|
||||
to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
|
||||
For non AWS S3 following settings can be set to support cloning from other S3 implementations:
|
||||
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-test-cluster
|
||||
spec:
|
||||
clone:
|
||||
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
||||
cluster: "acid-batman"
|
||||
timestamp: "2017-12-19T12:40:33+01:00"
|
||||
s3_endpoint: https://s3.acme.org
|
||||
s3_access_key_id: 0123456789abcdef0123456789abcdef
|
||||
s3_secret_access_key: 0123456789abcdef0123456789abcdef
|
||||
s3_force_path_style: true
|
||||
```
|
||||
|
||||
## Sidecar Support
|
||||
|
||||
Each cluster can specify arbitrary sidecars to run. These containers could be used for
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ spec:
|
|||
teamId: "ACID"
|
||||
volume:
|
||||
size: 1Gi
|
||||
#storageClass: my-sc
|
||||
numberOfInstances: 2
|
||||
users: #Application/Robot users
|
||||
zalando:
|
||||
|
|
@ -46,13 +47,13 @@ spec:
|
|||
pg_hba:
|
||||
- hostssl all all 0.0.0.0/0 md5
|
||||
- host all all 0.0.0.0/0 md5
|
||||
slots:
|
||||
permanent_physical_1:
|
||||
type: physical
|
||||
permanent_logical_1:
|
||||
type: logical
|
||||
database: foo
|
||||
plugin: pgoutput
|
||||
#slots:
|
||||
# permanent_physical_1:
|
||||
# type: physical
|
||||
# permanent_logical_1:
|
||||
# type: logical
|
||||
# database: foo
|
||||
# plugin: pgoutput
|
||||
ttl: 30
|
||||
loop_wait: &loop_wait 10
|
||||
retry_timeout: 10
|
||||
|
|
|
|||
|
|
@ -21,6 +21,10 @@ spec:
|
|||
limits:
|
||||
cpu: 2000m
|
||||
memory: 500Mi
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
readOnlyRootFilesystem: true
|
||||
env:
|
||||
# provided additional ENV vars can overwrite individual config map entries
|
||||
- name: CONFIG_MAP_NAME
|
||||
|
|
|
|||
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
name: zalando-postgres-operator:users:admin
|
||||
rules:
|
||||
- apiGroups:
|
||||
- acid.zalan.do
|
||||
resources:
|
||||
- postgresqls
|
||||
- postgresqls/status
|
||||
verbs:
|
||||
- "*"
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
name: zalando-postgres-operator:users:edit
|
||||
rules:
|
||||
- apiGroups:
|
||||
- acid.zalan.do
|
||||
resources:
|
||||
- postgresqls
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
name: zalando-postgres-operator:users:view
|
||||
rules:
|
||||
- apiGroups:
|
||||
- acid.zalan.do
|
||||
resources:
|
||||
- postgresqls
|
||||
- postgresqls/status
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
|
|
@ -115,10 +115,14 @@ type Patroni struct {
|
|||
|
||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||
type CloneDescription struct {
|
||||
ClusterName string `json:"cluster,omitempty"`
|
||||
UID string `json:"uid,omitempty"`
|
||||
EndTimestamp string `json:"timestamp,omitempty"`
|
||||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||
ClusterName string `json:"cluster,omitempty"`
|
||||
UID string `json:"uid,omitempty"`
|
||||
EndTimestamp string `json:"timestamp,omitempty"`
|
||||
S3WalPath string `json:"s3_wal_path,omitempty"`
|
||||
S3Endpoint string `json:"s3_endpoint,omitempty"`
|
||||
S3AccessKeyId string `json:"s3_access_key_id,omitempty"`
|
||||
S3SecretAccessKey string `json:"s3_secret_access_key,omitempty"`
|
||||
S3ForcePathStyle *bool `json:"s3_force_path_style,omitempty" defaults:"false"`
|
||||
}
|
||||
|
||||
// Sidecar defines a container to be run in the same pod as the Postgres container.
|
||||
|
|
|
|||
|
|
@ -61,12 +61,12 @@ var cloneClusterDescriptions = []struct {
|
|||
in *CloneDescription
|
||||
err error
|
||||
}{
|
||||
{&CloneDescription{"foo+bar", "", "NotEmpty", ""}, nil},
|
||||
{&CloneDescription{"foo+bar", "", "", ""},
|
||||
{&CloneDescription{"foo+bar", "", "NotEmpty", "", "", "", "", nil}, nil},
|
||||
{&CloneDescription{"foo+bar", "", "", "", "", "", "", nil},
|
||||
errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
|
||||
{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", ""},
|
||||
{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", "", "", "", "", nil},
|
||||
errors.New("clone cluster name must be no longer than 63 characters")},
|
||||
{&CloneDescription{"foobar", "", "", ""}, nil},
|
||||
{&CloneDescription{"foobar", "", "", "", "", "", "", nil}, nil},
|
||||
}
|
||||
|
||||
var maintenanceWindows = []struct {
|
||||
|
|
|
|||
|
|
@ -50,6 +50,11 @@ func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CloneDescription) DeepCopyInto(out *CloneDescription) {
|
||||
*out = *in
|
||||
if in.S3ForcePathStyle != nil {
|
||||
in, out := &in.S3ForcePathStyle, &out.S3ForcePathStyle
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -464,7 +469,7 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
out.Clone = in.Clone
|
||||
in.Clone.DeepCopyInto(&out.Clone)
|
||||
if in.Databases != nil {
|
||||
in, out := &in.Databases, &out.Databases
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
|
|
|||
|
|
@ -342,7 +342,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
|
|||
if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName {
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's serviceAccountName service asccount name doesn't match the current one")
|
||||
reasons = append(reasons, "new statefulset's serviceAccountName service account name doesn't match the current one")
|
||||
}
|
||||
if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds {
|
||||
needsReplace = true
|
||||
|
|
@ -462,16 +462,16 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
|
|||
func compareResources(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool {
|
||||
equal := true
|
||||
if a != nil {
|
||||
equal = compareResoucesAssumeFirstNotNil(a, b)
|
||||
equal = compareResourcesAssumeFirstNotNil(a, b)
|
||||
}
|
||||
if equal && (b != nil) {
|
||||
equal = compareResoucesAssumeFirstNotNil(b, a)
|
||||
equal = compareResourcesAssumeFirstNotNil(b, a)
|
||||
}
|
||||
|
||||
return equal
|
||||
}
|
||||
|
||||
func compareResoucesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool {
|
||||
func compareResourcesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool {
|
||||
if b == nil || (len(b.Requests) == 0) {
|
||||
return len(a.Requests) == 0
|
||||
}
|
||||
|
|
@ -884,7 +884,7 @@ func (c *Cluster) initInfrastructureRoles() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// resolves naming conflicts between existing and new roles by chosing either of them.
|
||||
// resolves naming conflicts between existing and new roles by choosing either of them.
|
||||
func (c *Cluster) resolveNameConflict(currentRole, newRole *spec.PgUser) spec.PgUser {
|
||||
var result spec.PgUser
|
||||
if newRole.Origin >= currentRole.Origin {
|
||||
|
|
@ -978,7 +978,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
|
|||
// signal the role label waiting goroutine to close the shop and go home
|
||||
close(stopCh)
|
||||
// wait until the goroutine terminates, since unregisterPodSubscriber
|
||||
// must be called before the outer return; otherwsise we risk subscribing to the same pod twice.
|
||||
// must be called before the outer return; otherwise we risk subscribing to the same pod twice.
|
||||
wg.Wait()
|
||||
// close the label waiting channel no sooner than the waiting goroutine terminates.
|
||||
close(podLabelErr)
|
||||
|
|
|
|||
|
|
@ -329,7 +329,7 @@ func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]stri
|
|||
return []v1.Toleration{}
|
||||
}
|
||||
|
||||
// isBootstrapOnlyParameter checks asgainst special Patroni bootstrap parameters.
|
||||
// isBootstrapOnlyParameter checks against special Patroni bootstrap parameters.
|
||||
// Those parameters must go to the bootstrap/dcs/postgresql/parameters section.
|
||||
// See http://patroni.readthedocs.io/en/latest/dynamic_configuration.html.
|
||||
func isBootstrapOnlyParameter(param string) bool {
|
||||
|
|
@ -1266,6 +1266,29 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
|
|||
result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"})
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp})
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||
|
||||
if description.S3Endpoint != "" {
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_AWS_ENDPOINT", Value: description.S3Endpoint})
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_WALE_S3_ENDPOINT", Value: description.S3Endpoint})
|
||||
}
|
||||
|
||||
if description.S3AccessKeyId != "" {
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_AWS_ACCESS_KEY_ID", Value: description.S3AccessKeyId})
|
||||
}
|
||||
|
||||
if description.S3SecretAccessKey != "" {
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_AWS_SECRET_ACCESS_KEY", Value: description.S3SecretAccessKey})
|
||||
}
|
||||
|
||||
if description.S3ForcePathStyle != nil {
|
||||
s3ForcePathStyle := "0"
|
||||
|
||||
if *description.S3ForcePathStyle {
|
||||
s3ForcePathStyle = "1"
|
||||
}
|
||||
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_AWS_S3_FORCE_PATH_STYLE", Value: s3ForcePathStyle})
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
|
|
@ -1372,7 +1395,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
return nil, fmt.Errorf("could not generate pod template for logical backup pod: %v", err)
|
||||
}
|
||||
|
||||
// overwrite specifc params of logical backups pods
|
||||
// overwrite specific params of logical backups pods
|
||||
podTemplate.Spec.Affinity = &podAffinity
|
||||
podTemplate.Spec.RestartPolicy = "Never" // affects containers within a pod
|
||||
|
||||
|
|
|
|||
|
|
@ -361,7 +361,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
// TODO: check if it possible to change the service type with a patch in future versions of Kubernetes
|
||||
if newService.Spec.Type != c.Services[role].Spec.Type {
|
||||
// service type has changed, need to replace the service completely.
|
||||
// we cannot use just pach the current service, since it may contain attributes incompatible with the new type.
|
||||
// we cannot use just patch the current service, since it may contain attributes incompatible with the new type.
|
||||
var (
|
||||
currentEndpoint *v1.Endpoints
|
||||
err error
|
||||
|
|
@ -369,7 +369,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
|||
|
||||
if role == Master {
|
||||
// for the master service we need to re-create the endpoint as well. Get the up-to-date version of
|
||||
// the addresses stored in it before the service is deleted (deletion of the service removes the endpooint)
|
||||
// the addresses stored in it before the service is deleted (deletion of the service removes the endpoint)
|
||||
currentEndpoint, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get current cluster %s endpoints: %v", role, err)
|
||||
|
|
|
|||
|
|
@ -197,25 +197,25 @@ func (c *Controller) initRoleBinding() {
|
|||
// operator binds it to the cluster role with sufficient privileges
|
||||
// we assume the role is created by the k8s administrator
|
||||
if c.opConfig.PodServiceAccountRoleBindingDefinition == "" {
|
||||
c.opConfig.PodServiceAccountRoleBindingDefinition = `
|
||||
c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(`
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "zalando-postgres-operator"
|
||||
"name": "%s"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "ClusterRole",
|
||||
"name": "zalando-postgres-operator"
|
||||
"name": "%s"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "operator"
|
||||
"name": "%s"
|
||||
}
|
||||
]
|
||||
}`
|
||||
}`, c.PodServiceAccount.Name, c.PodServiceAccount.Name, c.PodServiceAccount.Name)
|
||||
}
|
||||
c.logger.Info("Parse role bindings")
|
||||
// re-uses k8s internal parsing. See k8s client-go issue #193 for explanation
|
||||
|
|
@ -230,9 +230,6 @@ func (c *Controller) initRoleBinding() {
|
|||
default:
|
||||
c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding)
|
||||
c.PodServiceAccountRoleBinding.Namespace = ""
|
||||
c.PodServiceAccountRoleBinding.ObjectMeta.Name = c.PodServiceAccount.Name
|
||||
c.PodServiceAccountRoleBinding.RoleRef.Name = c.PodServiceAccount.Name
|
||||
c.PodServiceAccountRoleBinding.Subjects[0].Name = c.PodServiceAccount.Name
|
||||
c.logger.Info("successfully parsed")
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,17 +51,18 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 {
|
|||
|
||||
func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefinition) error {
|
||||
if _, err := c.KubeClient.CustomResourceDefinitions().Create(crd); err != nil {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
return fmt.Errorf("could not create customResourceDefinition: %v", err)
|
||||
}
|
||||
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name)
|
||||
|
||||
patch, err := json.Marshal(crd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal new customResourceDefintion: %v", err)
|
||||
}
|
||||
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil {
|
||||
return fmt.Errorf("could not update customResourceDefinition: %v", err)
|
||||
patch, err := json.Marshal(crd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal new customResourceDefintion: %v", err)
|
||||
}
|
||||
if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil {
|
||||
return fmt.Errorf("could not update customResourceDefinition: %v", err)
|
||||
}
|
||||
} else {
|
||||
c.logger.Errorf("could not create customResourceDefinition %q: %v", crd.Name, err)
|
||||
}
|
||||
} else {
|
||||
c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)
|
||||
|
|
|
|||
Loading…
Reference in New Issue