Multiple fixes

- update docs to be more decriptive and cosistent with terminology
- Remove name and uid for the standby cluster
- Error out in case empty wal-path is provided for standby cluster
This commit is contained in:
Rafia Sabih 2019-06-13 12:27:19 +02:00
parent 742a43a879
commit f16f24d3b4
7 changed files with 32 additions and 28 deletions

View File

@ -176,7 +176,7 @@ explanation of `ttl` and `loop_wait` parameters.
permanent replication slots that Patroni preserves after failover by re-creating them on the new primary immediately after doing a promote. Slots could be reconfigured with the help of `patronictl edit-config`. It is the responsibility of a user to avoid clashes in names between replication slots automatically created by Patroni for cluster members and permanent replication slots. Optional.
* **standby_cluster**
initializes cluster as a standby creating a cascading replication, where elected master is streaming from specified remote location
initializes cluster as a standby creating a cascading replication, where standby leader is streaming from specified remote location
## Postgres container resources

View File

@ -256,15 +256,20 @@ to UTC, see [RFC 3339 section 5.6) 3339 section 5.6](https://www.ietf.org/rfc/rf
## Setting up a standby cluster
Standby clusters are like normal cluster but they are streaming from a remote master. Patroni supports it, [read this](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster) to know more about them. Currently operator supports setting up standby only through an S3 bucket. For this you need to add a section standby in the YAML file as follows.
Standby clusters are like normal cluster but they are streaming from a remote cluster. Patroni supports it, [read this](https://github.com/zalando/patroni/blob/bd2c54581abb42a7d3a3da551edf0b8732eefd27/docs/replica_bootstrap.rst#standby-cluster) to know more about them.
As the first version of this feature, the only scenario covered by operator is to stream from a wal archive of the master. Following the more popular infrastructure of using Amazon's S3 buckets, it is mentioned as s3_wal_path here. To make a cluster as standby add a section standby in the YAML file as follows.
```yaml
spec:
standby:
uid: "UID of the master cluster"
cluster: "Name of the master cluster"
s3_wal_path: "s3 bucket path to the master"
```
Things to note:
- An empty string is provided in s3_wal_path of the standby cluster will result in error and no statefulset will be created.
- If standby is not needed, simply remove the standby_cluster section from the patroni through patronictl edit-config.
- There is no way to transform a non-standby cluster to standby cluster through operator. Hence, if a cluster is created without standby section in YAML and later modified by adding that section, there will be no effect on the cluster. However, it can be done through Patroni by adding the standby_cluster section using patronictl edit-config. Note that the transformed standby cluster will not be doing any streaming, rather will just be in standby mode and allow read-only transactions only.
## Sidecar Support

View File

@ -64,11 +64,9 @@ spec:
# cluster: "acid-batman"
# timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6)
# s3_wal_path: "s3://custom/path/to/bucket"
# Make this a standby cluster and mention the information of source cluster from where to do continuous streaming.
# Make this a standby cluster and provide the s3 bucket path of source cluster for continuous streaming.
# standby:
# uid: "uid of the master"
# cluster: "team-standby"
# s3_wal_path: "s3://custom/path/to/bucket"
# s3_wal_path: "s3://path/to/bucket/containing/wal/of/source/cluster/"
maintenanceWindows:
- 01:00-06:00 #UTC
- Sat:00:00-04:00

View File

@ -54,7 +54,7 @@ type PostgresSpec struct {
InitContainers []v1.Container `json:"init_containers,omitempty"`
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
ShmVolume *bool `json:"enableShmVolume,omitempty"`
StandbyCluster StandbyDescription `json:"standby"`
StandbyCluster *StandbyDescription `json:"standby"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -112,9 +112,7 @@ type Patroni struct {
//StandbyCluster
type StandbyDescription struct {
ClusterName string `json:"cluster,omitempty"`
UID string `json:"uid,omitempty"`
S3WalPath string `json:"s3_wal_path,omitempty"`
S3WalPath string `json:"s3_wal_path,omitempty"`
}
// CloneDescription describes which cluster the new should clone and up to which point in time

View File

@ -354,7 +354,7 @@ var unmarshalCluster = []struct {
err: nil},
// standby example
{
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "standby": {"cluster": "team-batman"}}}`),
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"teamId": "acid", "standby": {"s3_wal_path": "s3://custom/path/to/bucket/"}}}`),
out: Postgresql{
TypeMeta: metav1.TypeMeta{
Kind: "Postgresql",
@ -365,14 +365,14 @@ var unmarshalCluster = []struct {
},
Spec: PostgresSpec{
TeamID: "acid",
StandbyCluster: StandbyDescription{
ClusterName: "team-batman",
StandbyCluster: &StandbyDescription{
S3WalPath: "s3://custom/path/to/bucket/",
},
ClusterName: "testcluster1",
},
Error: "",
},
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"standby":{"cluster":"team-batman"}},"status":{"PostgresClusterStatus":""}}`),
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"standby":{"s3_wal_path":"s3://custom/path/to/bucket/"}},"status":{"PostgresClusterStatus":""}}`),
err: nil},
// erroneous examples
{

View File

@ -466,7 +466,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = new(bool)
**out = **in
}
out.StandbyCluster = in.StandbyCluster
if in.StandbyCluster != nil {
in, out := &in.StandbyCluster, &out.StandbyCluster
*out = new(StandbyDescription)
**out = **in
}
return
}

View File

@ -588,7 +588,7 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
}
if standbyDescription.ClusterName != "" {
if c.Spec.StandbyCluster != nil {
envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...)
}
@ -781,13 +781,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
sort.Slice(customPodEnvVarsList,
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
}
if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" {
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
}
spiloConfiguration := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger)
// generate environment variables for the spilo container
spiloEnvVars := deduplicateEnvVars(
c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone,
&spec.StandbyCluster, customPodEnvVarsList), c.containerName(), c.logger)
spec.StandbyCluster, customPodEnvVarsList), c.containerName(), c.logger)
// pickup the docker image for the spilo container
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
@ -1261,12 +1264,8 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript
result := make([]v1.EnvVar, 0)
if description.S3WalPath == "" {
return result
return nil
}
cluster := description.ClusterName
result = append(result, v1.EnvVar{Name: "STANDBY_SCOPE", Value: cluster})
// standby with S3, find out the bucket to setup standby
msg := "Standby from S3 bucket"
c.logger.Info(msg, description.S3WalPath)
@ -1274,10 +1273,10 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript
msg = "Use custom parsed S3WalPath %s from the manifest"
c.logger.Warningf(msg, description.S3WalPath)
result = append(result, v1.EnvVar{
Name: "STANDBY_WALE_S3_PREFIX",
Value: description.S3WalPath,
})
result = append(result, v1.EnvVar{
Name: "STANDBY_WALE_S3_PREFIX",
Value: description.S3WalPath,
})
result = append(result, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"})
result = append(result, v1.EnvVar{Name: "STANDBY_WAL_BUCKET_SCOPE_PREFIX", Value: ""})