Merge branch 'master' into pending_rolling_updates

This commit is contained in:
Oleksii Kliukin 2018-05-03 11:27:16 +02:00
commit 43a1db2128
13 changed files with 295 additions and 92 deletions

View File

@ -87,7 +87,16 @@ By default, the operator watches the namespace it is deployed to. You can change
Note that for an operator to manage pods in the watched namespace, the operator's service account (as specified in the operator deployment manifest) has to have appropriate privileges to access the watched namespace. The operator may not be able to function in the case it watches all namespaces but lacks access rights to any of them (except Kubernetes system namespaces like `kube-system`). The reason is that for multiple namespaces operations such as 'list pods' execute at the cluster scope and fail at the first violation of access rights.
The watched namespace also needs to have a (possibly different) service account in the case database pods need to talk to the Kubernetes API (e.g. when using Kubernetes-native configuration of Patroni).
The watched namespace also needs to have a (possibly different) service account in the case database pods need to talk to the Kubernetes API (e.g. when using Kubernetes-native configuration of Patroni). The operator checks that the `pod_service_account_name` exists in the target namespace, and, if not, deploys there the `pod_service_account_definition` from the operator [`Config`](pkg/util/config/config.go) with the default value of:
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: operator
```
In this definition, the operator overwrites the account's name to match `pod_service_account_name` and the `default` namespace to match the target namespace. The operator performs **no** further syncing of this account.
### Create ConfigMap

View File

@ -49,7 +49,7 @@ spec:
# with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup
# clone:
# cluster: "acid-batman"
# endTimestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6)
# timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6)
maintenanceWindows:
- 01:00-06:00 #UTC
- Sat:00:00-04:00

View File

@ -7,7 +7,6 @@ data:
# if neither is set or evaluates to the empty string, listen to the operator's own namespace
# if set to the "*", listen to all namespaces
# watched_namespace: development
service_account_name: operator
cluster_labels: application:spilo
cluster_name_label: version
pod_role_label: spilo-role

View File

@ -42,6 +42,7 @@ type Config struct {
OpConfig config.Config
RestConfig *rest.Config
InfrastructureRoles map[string]spec.PgUser // inherited from the controller
PodServiceAccount *v1.ServiceAccount
}
type kubeResources struct {
@ -196,6 +197,39 @@ func (c *Cluster) initUsers() error {
return nil
}
/*
Ensures the service account required by StatefulSets to create pods exists in a namespace before a PG cluster is created there so that a user does not have to deploy the account manually.
The operator does not sync these accounts after creation.
*/
func (c *Cluster) createPodServiceAccounts() error {
podServiceAccountName := c.Config.OpConfig.PodServiceAccountName
_, err := c.KubeClient.ServiceAccounts(c.Namespace).Get(podServiceAccountName, metav1.GetOptions{})
if err != nil {
c.setProcessName(fmt.Sprintf("creating pod service account in the namespace %v", c.Namespace))
c.logger.Infof("the pod service account %q cannot be retrieved in the namespace %q. Trying to deploy the account.", podServiceAccountName, c.Namespace)
// get a separate copy of service account
// to prevent a race condition when setting a namespace for many clusters
sa := *c.PodServiceAccount
_, err = c.KubeClient.ServiceAccounts(c.Namespace).Create(&sa)
if err != nil {
return fmt.Errorf("cannot deploy the pod service account %q defined in the config map to the %q namespace: %v", podServiceAccountName, c.Namespace, err)
}
c.logger.Infof("successfully deployed the pod service account %q to the %q namespace", podServiceAccountName, c.Namespace)
} else {
c.logger.Infof("successfully found the service account %q used to create pods to the namespace %q", podServiceAccountName, c.Namespace)
}
return nil
}
// Create creates the new kubernetes objects associated with the cluster.
func (c *Cluster) Create() error {
c.mu.Lock()
@ -259,6 +293,11 @@ func (c *Cluster) Create() error {
}
c.logger.Infof("pod disruption budget %q has been successfully created", util.NameFromMeta(pdb.ObjectMeta))
if err = c.createPodServiceAccounts(); err != nil {
return fmt.Errorf("could not create pod service account %v : %v", c.OpConfig.PodServiceAccountName, err)
}
c.logger.Infof("pod service accounts have been successfully synced")
if c.Statefulset != nil {
return fmt.Errorf("statefulset already exists in the cluster")
}
@ -822,6 +861,7 @@ func (c *Cluster) GetStatus() *spec.ClusterStatus {
// ManualFailover does manual failover to a candidate pod
func (c *Cluster) ManualFailover(curMaster *v1.Pod, candidate spec.NamespacedName) error {
c.logger.Debugf("failing over from %q to %q", curMaster.Name, candidate)
podLabelErr := make(chan error)
stopCh := make(chan struct{})
defer close(podLabelErr)
@ -832,11 +872,12 @@ func (c *Cluster) ManualFailover(curMaster *v1.Pod, candidate spec.NamespacedNam
role := Master
_, err := c.waitForPodLabel(ch, &role)
select {
case <-stopCh:
case podLabelErr <- err:
case podLabelErr <- func() error {
_, err := c.waitForPodLabel(ch, stopCh, &role)
return err
}():
}
}()

View File

@ -360,10 +360,16 @@ func (c *Cluster) generatePodTemplate(
}
if c.OpConfig.WALES3Bucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getWALBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}
if c.OpConfig.LogS3Bucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
}
if c.patroniUsesKubernetes() {
envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"})
} else {
@ -435,7 +441,7 @@ func (c *Cluster) generatePodTemplate(
terminateGracePeriodSeconds := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
podSpec := v1.PodSpec{
ServiceAccountName: c.OpConfig.ServiceAccountName,
ServiceAccountName: c.OpConfig.PodServiceAccountName,
TerminationGracePeriodSeconds: &terminateGracePeriodSeconds,
Containers: []v1.Container{container},
Tolerations: c.tolerations(tolerationsSpec),
@ -504,7 +510,7 @@ func (c *Cluster) generatePodTemplate(
return &template
}
func getWALBucketScopeSuffix(uid string) string {
func getBucketScopeSuffix(uid string) string {
if uid != "" {
return fmt.Sprintf("/%s", uid)
}
@ -701,7 +707,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *sp
// `enable_load_balancer`` governs LB for a master service
// there is no equivalent deprecated operator option for the replica LB
if c.OpConfig.EnableLoadBalancer != nil {
c.logger.Debugf("The operator configmap sets the deprecated `enable_load_balancer` param. Consider using the `enable_master_load_balancer` or `enable_replica_load_balancer` instead.", c.Name)
c.logger.Debugf("The operator configmap sets the deprecated `enable_load_balancer` param. Consider using the `enable_master_load_balancer` or `enable_replica_load_balancer` instead.")
return *c.OpConfig.EnableLoadBalancer
}
@ -819,7 +825,7 @@ func (c *Cluster) generateCloneEnvironment(description *spec.CloneDescription) [
result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"})
result = append(result, v1.EnvVar{Name: "CLONE_WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp})
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getWALBucketScopeSuffix(description.Uid)})
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.Uid)})
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}

View File

@ -149,13 +149,19 @@ func (c *Cluster) movePodFromEndOfLifeNode(pod *v1.Pod) (*v1.Pod, error) {
}
func (c *Cluster) masterCandidate(oldNodeName string) (*v1.Pod, error) {
// Wait until at least one replica pod will come up
if err := c.waitForAnyReplicaLabelReady(); err != nil {
c.logger.Warningf("could not find at least one ready replica: %v", err)
}
replicas, err := c.getRolePods(Replica)
if err != nil {
return nil, fmt.Errorf("could not get replica pods: %v", err)
}
if len(replicas) == 0 {
c.logger.Warningf("single master pod for cluster %q, migration will cause disruption of the service")
c.logger.Warningf("no available master candidates, migration will cause longer downtime of the master instance")
return nil, nil
}
@ -168,12 +174,16 @@ func (c *Cluster) masterCandidate(oldNodeName string) (*v1.Pod, error) {
}
}
}
c.logger.Debug("no available master candidates on live nodes")
c.logger.Warningf("no available master candidates on live nodes")
return &replicas[rand.Intn(len(replicas))], nil
}
// MigrateMasterPod migrates master pod via failover to a replica
func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
var (
masterCandidatePod *v1.Pod
)
oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{})
if err != nil {
@ -193,11 +203,14 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
c.logger.Warningf("pod %q is not a master", podName)
return nil
}
masterCandidatePod, err := c.masterCandidate(oldMaster.Spec.NodeName)
if *c.Statefulset.Spec.Replicas == 1 {
c.logger.Warningf("single master pod for cluster %q, migration will cause longer downtime of the master instance", c.clusterName())
} else {
masterCandidatePod, err = c.masterCandidate(oldMaster.Spec.NodeName)
if err != nil {
return fmt.Errorf("could not get new master candidate: %v", err)
}
}
// there are two cases for each postgres cluster that has its master pod on the node to migrate from:
// - the cluster has some replicas - migrate one of those if necessary and failover to it
@ -250,6 +263,7 @@ func (c *Cluster) MigrateReplicaPod(podName spec.NamespacedName, fromNodeName st
func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
ch := c.registerPodSubscriber(podName)
defer c.unregisterPodSubscriber(podName)
stopChan := make(chan struct{})
if err := c.KubeClient.Pods(podName.Namespace).Delete(podName.Name, c.deleteOptions); err != nil {
return nil, fmt.Errorf("could not delete pod: %v", err)
@ -258,7 +272,7 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
if err := c.waitForPodDeletion(ch); err != nil {
return nil, err
}
if pod, err := c.waitForPodLabel(ch, nil); err != nil {
if pod, err := c.waitForPodLabel(ch, stopChan, nil); err != nil {
return nil, err
} else {
c.logger.Infof("pod %q has been recreated", podName)

View File

@ -108,11 +108,10 @@ func (c *Cluster) syncService(role PostgresRole) error {
svc, err := c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{})
if err == nil {
c.Services[role] = svc
desiredSvc := c.generateService(role, &c.Spec)
match, reason := k8sutil.SameService(svc, desiredSvc)
if match {
c.Services[role] = svc
return nil
}
c.logServiceChanges(role, svc, desiredSvc, false, reason)

View File

@ -216,18 +216,20 @@ func (c *Cluster) getTeamMembers() ([]string, error) {
token, err := c.oauthTokenGetter.getOAuthToken()
if err != nil {
return []string{}, fmt.Errorf("could not get oauth token: %v", err)
c.logger.Warnf("could not get oauth token to authenticate to team service API, returning empty list of team members: %v", err)
return []string{}, nil
}
teamInfo, err := c.teamsAPIClient.TeamInfo(c.Spec.TeamID, token)
if err != nil {
return nil, fmt.Errorf("could not get team info: %v", err)
c.logger.Warnf("could not get team info, returning empty list of team members: %v", err)
return []string{}, nil
}
return teamInfo.Members, nil
}
func (c *Cluster) waitForPodLabel(podEvents chan spec.PodEvent, role *PostgresRole) (*v1.Pod, error) {
func (c *Cluster) waitForPodLabel(podEvents chan spec.PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) {
timeout := time.After(c.OpConfig.PodLabelWaitTimeout)
for {
select {
@ -243,6 +245,8 @@ func (c *Cluster) waitForPodLabel(podEvents chan spec.PodEvent, role *PostgresRo
}
case <-timeout:
return nil, fmt.Errorf("pod label wait timeout")
case <-stopChan:
return nil, fmt.Errorf("pod label wait cancelled")
}
}
}
@ -280,7 +284,10 @@ func (c *Cluster) waitStatefulsetReady() error {
})
}
func (c *Cluster) waitPodLabelsReady() error {
func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
var (
podsNumber int
)
ls := c.labelsSet(false)
namespace := c.Namespace
@ -297,35 +304,56 @@ func (c *Cluster) waitPodLabelsReady() error {
c.OpConfig.PodRoleLabel: string(Replica),
}).String(),
}
podsNumber = 1
if !anyReplica {
pods, err := c.KubeClient.Pods(namespace).List(listOptions)
if err != nil {
return err
}
podsNumber := len(pods.Items)
podsNumber = len(pods.Items)
c.logger.Debugf("Waiting for %d pods to become ready", podsNumber)
} else {
c.logger.Debugf("Waiting for any replica pod to become ready")
}
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
err := retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
func() (bool, error) {
masterCount := 0
if !anyReplica {
masterPods, err2 := c.KubeClient.Pods(namespace).List(masterListOption)
if err2 != nil {
return false, err2
}
if len(masterPods.Items) > 1 {
return false, fmt.Errorf("too many masters (%d pods with the master label found)",
len(masterPods.Items))
}
masterCount = len(masterPods.Items)
}
replicaPods, err2 := c.KubeClient.Pods(namespace).List(replicaListOption)
if err2 != nil {
return false, err2
}
if len(masterPods.Items) > 1 {
return false, fmt.Errorf("too many masters")
}
if len(replicaPods.Items) == podsNumber {
replicaCount := len(replicaPods.Items)
if anyReplica && replicaCount > 0 {
c.logger.Debugf("Found %d running replica pods", replicaCount)
return true, nil
}
return len(masterPods.Items)+len(replicaPods.Items) == podsNumber, nil
return masterCount+replicaCount >= podsNumber, nil
})
return err
}
func (c *Cluster) waitForAnyReplicaLabelReady() error {
return c._waitPodLabelsReady(true)
}
func (c *Cluster) waitForAllPodsLabelReady() error {
return c._waitPodLabelsReady(false)
}
func (c *Cluster) waitStatefulsetPodsReady() error {
c.setProcessName("waiting for the pods of the statefulset")
// TODO: wait for the first Pod only
@ -334,7 +362,7 @@ func (c *Cluster) waitStatefulsetPodsReady() error {
}
// TODO: wait only for master
if err := c.waitPodLabelsReady(); err != nil {
if err := c.waitForAllPodsLabelReady(); err != nil {
return fmt.Errorf("pod labels error: %v", err)
}

View File

@ -8,6 +8,7 @@ import (
"github.com/Sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/cache"
@ -50,6 +51,8 @@ type Controller struct {
lastClusterSyncTime int64
workerLogs map[uint32]ringlog.RingLogger
PodServiceAccount *v1.ServiceAccount
}
// NewController creates a new controller
@ -113,11 +116,46 @@ func (c *Controller) initOperatorConfig() {
if scalyrAPIKey != "" {
c.opConfig.ScalyrAPIKey = scalyrAPIKey
}
}
func (c *Controller) initPodServiceAccount() {
if c.opConfig.PodServiceAccountDefinition == "" {
c.opConfig.PodServiceAccountDefinition = `
{ "apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": "operator"
}
}`
}
// re-uses k8s internal parsing. See k8s client-go issue #193 for explanation
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, groupVersionKind, err := decode([]byte(c.opConfig.PodServiceAccountDefinition), nil, nil)
switch {
case err != nil:
panic(fmt.Errorf("Unable to parse pod service account definiton from the operator config map: %v", err))
case groupVersionKind.Kind != "ServiceAccount":
panic(fmt.Errorf("pod service account definiton in the operator config map defines another type of resource: %v", groupVersionKind.Kind))
default:
c.PodServiceAccount = obj.(*v1.ServiceAccount)
if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName {
c.logger.Warnf("in the operator config map, the pod service account name %v does not match the name %v given in the account definition; using the former for consistency", c.opConfig.PodServiceAccountName, c.PodServiceAccount.Name)
c.PodServiceAccount.Name = c.opConfig.PodServiceAccountName
}
c.PodServiceAccount.Namespace = ""
}
// actual service accounts are deployed at the time of Postgres/Spilo cluster creation
}
func (c *Controller) initController() {
c.initClients()
c.initOperatorConfig()
c.initPodServiceAccount()
c.initSharedInformers()

View File

@ -26,6 +26,7 @@ func (c *Controller) makeClusterConfig() cluster.Config {
RestConfig: c.config.RestConfig,
OpConfig: config.Copy(c.opConfig),
InfrastructureRoles: infrastructureRoles,
PodServiceAccount: c.PodServiceAccount,
}
}

View File

@ -3,6 +3,7 @@ package spec
import (
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
@ -76,6 +77,12 @@ const (
ClusterStatusInvalid PostgresStatus = "Invalid"
)
const (
serviceNameMaxLength = 63
clusterNameMaxLength = serviceNameMaxLength - len("-repl")
serviceNameRegexString = `^[a-z]([-a-z0-9]*[a-z0-9])?$`
)
// Postgresql defines PostgreSQL Custom Resource Definition Object.
type Postgresql struct {
metav1.TypeMeta `json:",inline"`
@ -126,7 +133,10 @@ type PostgresqlList struct {
Items []Postgresql `json:"items"`
}
var weekdays = map[string]int{"Sun": 0, "Mon": 1, "Tue": 2, "Wed": 3, "Thu": 4, "Fri": 5, "Sat": 6}
var (
weekdays = map[string]int{"Sun": 0, "Mon": 1, "Tue": 2, "Wed": 3, "Thu": 4, "Fri": 5, "Sat": 6}
serviceNameRegex = regexp.MustCompile(serviceNameRegexString)
)
func parseTime(s string) (time.Time, error) {
parts := strings.Split(s, ":")
@ -225,10 +235,31 @@ func extractClusterName(clusterName string, teamName string) (string, error) {
if strings.ToLower(clusterName[:teamNameLen+1]) != strings.ToLower(teamName)+"-" {
return "", fmt.Errorf("name must match {TEAM}-{NAME} format")
}
if len(clusterName) > clusterNameMaxLength {
return "", fmt.Errorf("name cannot be longer than %d characters", clusterNameMaxLength)
}
if !serviceNameRegex.MatchString(clusterName) {
return "", fmt.Errorf("name must confirm to DNS-1035, regex used for validation is %q",
serviceNameRegexString)
}
return clusterName[teamNameLen+1:], nil
}
func validateCloneClusterDescription(clone *CloneDescription) error {
// when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name
if clone.ClusterName != "" && clone.EndTimestamp == "" {
if !serviceNameRegex.MatchString(clone.ClusterName) {
return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q",
serviceNameRegexString)
}
if len(clone.ClusterName) > serviceNameMaxLength {
return fmt.Errorf("clone cluster name must be no longer than %d characters", serviceNameMaxLength)
}
}
return nil
}
type postgresqlListCopy PostgresqlList
type postgresqlCopy Postgresql
@ -252,22 +283,16 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error {
}
tmp2 := Postgresql(tmp)
clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID)
if err == nil {
tmp2.Spec.ClusterName = clusterName
} else {
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
tmp2.Error = err
tmp2.Status = ClusterStatusInvalid
}
// The assumption below is that a cluster to clone, if any, belongs to the same team
if tmp2.Spec.Clone.ClusterName != "" {
_, err := extractClusterName(tmp2.Spec.Clone.ClusterName, tmp2.Spec.TeamID)
if err != nil {
tmp2.Error = fmt.Errorf("%s for the cluster to clone", err)
tmp2.Spec.Clone = CloneDescription{}
} else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil {
tmp2.Error = err
tmp2.Status = ClusterStatusInvalid
} else {
tmp2.Spec.ClusterName = clusterName
}
}
*p = tmp2
return nil

View File

@ -43,7 +43,10 @@ var clusterNames = []struct {
{"acid-test", "acid", "test", nil},
{"test-my-name", "test", "my-name", nil},
{"my-team-another-test", "my-team", "another-test", nil},
{"------strange-team-cluster", "-----", "strange-team-cluster", nil},
{"------strange-team-cluster", "-----", "strange-team-cluster",
errors.New(`name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
{"fooobar-fooobarfooobarfooobarfooobarfooobarfooobarfooobarfooobar", "fooobar", "",
errors.New("name cannot be longer than 58 characters")},
{"acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")},
{"-test", "", "", errors.New("team name is empty")},
{"-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")},
@ -51,6 +54,18 @@ var clusterNames = []struct {
{"-", "-", "", errors.New("name is too short")},
}
var cloneClusterDescriptions = []struct {
in *CloneDescription
err error
}{
{&CloneDescription{"foo+bar", "", "NotEmpty"}, nil},
{&CloneDescription{"foo+bar", "", ""},
errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)},
{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", ""},
errors.New("clone cluster name must be no longer than 63 characters")},
{&CloneDescription{"foobar", "", ""}, nil},
}
var maintenanceWindows = []struct {
in []byte
out MaintenanceWindow
@ -280,13 +295,14 @@ var unmarshalCluster = []struct {
},
Spec: PostgresSpec{
TeamID: "acid",
Clone: CloneDescription{},
Clone: CloneDescription{
ClusterName: "team-batman",
},
ClusterName: "testcluster1",
},
Status: ClusterStatusInvalid,
Error: errors.New("name must match {TEAM}-{NAME} format for the cluster to clone"),
Error: nil,
},
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), err: nil},
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}}}`), err: nil},
{[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`),
Postgresql{},
[]byte{},
@ -350,11 +366,12 @@ func TestParseTime(t *testing.T) {
for _, tt := range parseTimeTests {
aTime, err := parseTime(tt.in)
if err != nil {
if err.Error() != tt.err.Error() {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("ParseTime expected error: %v, got: %v", tt.err, err)
}
continue
} else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err)
}
if aTime != tt.out {
@ -367,11 +384,12 @@ func TestWeekdayTime(t *testing.T) {
for _, tt := range parseWeekdayTests {
aTime, err := parseWeekday(tt.in)
if err != nil {
if err.Error() != tt.err.Error() {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("ParseWeekday expected error: %v, got: %v", tt.err, err)
}
continue
} else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err)
}
if aTime != tt.out {
@ -383,9 +401,13 @@ func TestWeekdayTime(t *testing.T) {
func TestClusterName(t *testing.T) {
for _, tt := range clusterNames {
name, err := extractClusterName(tt.in, tt.inTeam)
if err != nil && err.Error() != tt.err.Error() {
if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("extractClusterName expected error: %v, got: %v", tt.err, err)
}
continue
} else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err)
}
if name != tt.clusterName {
t.Errorf("Expected cluserName: %q, got: %q", tt.clusterName, name)
@ -393,17 +415,29 @@ func TestClusterName(t *testing.T) {
}
}
func TestCloneClusterDescription(t *testing.T) {
for _, tt := range cloneClusterDescriptions {
if err := validateCloneClusterDescription(tt.in); err != nil {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("testCloneClusterDescription expected error: %v, got: %v", tt.err, err)
}
} else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err)
}
}
}
func TestUnmarshalMaintenanceWindow(t *testing.T) {
for _, tt := range maintenanceWindows {
var m MaintenanceWindow
err := m.UnmarshalJSON(tt.in)
if err != nil && err.Error() != tt.err.Error() {
if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("MaintenanceWindow unmarshal expected error: %v, got %v", tt.err, err)
continue
}
if tt.err != nil && err == nil {
t.Errorf("Expected error")
continue
} else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err)
}
if !reflect.DeepEqual(m, tt.out) {
@ -421,7 +455,6 @@ func TestMarshalMaintenanceWindow(t *testing.T) {
s, err := tt.out.MarshalJSON()
if err != nil {
t.Errorf("Marshal Error: %v", err)
continue
}
if !bytes.Equal(s, tt.in) {
@ -435,11 +468,12 @@ func TestPostgresUnmarshal(t *testing.T) {
var cluster Postgresql
err := cluster.UnmarshalJSON(tt.in)
if err != nil {
if err.Error() != tt.err.Error() {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("Unmarshal expected error: %v, got: %v", tt.err, err)
}
continue
} else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err)
}
if !reflect.DeepEqual(cluster, tt.out) {
@ -457,7 +491,6 @@ func TestMarshal(t *testing.T) {
m, err := json.Marshal(tt.out)
if err != nil {
t.Errorf("Marshal error: %v", err)
continue
}
if !bytes.Equal(m, tt.marshal) {
t.Errorf("Marshal Postgresql expected: %q, got: %q", string(tt.marshal), string(m))
@ -481,10 +514,15 @@ func TestUnmarshalPostgresList(t *testing.T) {
for _, tt := range postgresqlList {
var list PostgresqlList
err := list.UnmarshalJSON(tt.in)
if err != nil && err.Error() != tt.err.Error() {
if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("PostgresqlList unmarshal expected error: %v, got: %v", tt.err, err)
return
}
continue
} else if tt.err != nil {
t.Errorf("Expected error: %v", tt.err)
}
if !reflect.DeepEqual(list, tt.out) {
t.Errorf("Postgresql list unmarshall expected: %#v, got: %#v", tt.out, list)
}

View File

@ -67,13 +67,18 @@ type Config struct {
Resources
Auth
Scalyr
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
EtcdHost string `name:"etcd_host" default:"etcd-client.default.svc.cluster.local:2379"`
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spiloprivate-9.6:1.2-p4"`
ServiceAccountName string `name:"service_account_name" default:"operator"`
// default name `operator` enables backward compatibility with the older ServiceAccountName field
PodServiceAccountName string `name:"pod_service_account_name" default:"operator"`
// value of this string must be valid JSON or YAML; see initPodServiceAccount
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
DbHostedZone string `name:"db_hosted_zone" default:"db.example.com"`
EtcdScope string `name:"etcd_scope" default:"service"`
WALES3Bucket string `name:"wal_s3_bucket"`
LogS3Bucket string `name:"log_s3_bucket"`
KubeIAMRole string `name:"kube_iam_role"`
DebugLogging bool `name:"debug_logging" default:"true"`
EnableDBAccess bool `name:"enable_database_access" default:"true"`