From 74b19b449e5c77b15cc4ccae0d96ce537392ca57 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Wed, 27 Jun 2018 12:30:24 +0200 Subject: [PATCH 01/30] Update travis configuration. (#332) - explicitely set sudo to false, since we don't need it and it slows-down builds. - use the newest go toolchain. --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index f22275d9c..faeb2cc8b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,5 @@ dist: trusty +sudo: false branches: only: @@ -7,7 +8,7 @@ branches: language: go go: - - 1.9 + - "1.10.x" before_install: - go get github.com/Masterminds/glide From 7394c15d0a6271f561f458a18633699e73ec08ac Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Wed, 27 Jun 2018 17:29:02 +0200 Subject: [PATCH 02/30] Make AWS region configurable in the operator cofig map (#333) --- docs/reference/operator_parameters.md | 3 +++ manifests/configmap.yaml | 1 + pkg/cluster/sync.go | 2 +- pkg/util/config/config.go | 1 + pkg/util/constants/aws.go | 2 -- pkg/util/volumes/ebs.go | 3 ++- 6 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 115cb055e..4e22847dd 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -212,6 +212,9 @@ words. pods. Only used when combined with [kube2iam](https://github.com/jtblin/kube2iam) project on AWS. The default is empty. +* **aws_region** + AWS region used to store ESB volumes. + ## Debugging the operator * **debug_logging** boolean parameter that toggles verbose debug logs from the operator. The diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index c0295f4ca..7725c3630 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -25,6 +25,7 @@ data: # pam_role_name: zalandos # pam_configuration: | # https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees + aws_region: eu-central-1 db_hosted_zone: db.example.com master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}' diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index cf38a6d4f..bd2823b5b 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -447,7 +447,7 @@ func (c *Cluster) syncVolumes() error { if !act { return nil } - if err := c.resizeVolumes(c.Spec.Volume, []volumes.VolumeResizer{&volumes.EBSVolumeResizer{}}); err != nil { + if err := c.resizeVolumes(c.Spec.Volume, []volumes.VolumeResizer{&volumes.EBSVolumeResizer{AWSRegion: c.OpConfig.AWSRegion}}); err != nil { return fmt.Errorf("could not sync volumes: %v", err) } diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 182bb077c..d41b63e6e 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -76,6 +76,7 @@ type Config struct { // value of this string must be valid JSON or YAML; see initPodServiceAccount PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` DbHostedZone string `name:"db_hosted_zone" default:"db.example.com"` + AWSRegion string `name:"aws_region" default:"eu-central-1"` WALES3Bucket string `name:"wal_s3_bucket"` LogS3Bucket string `name:"log_s3_bucket"` KubeIAMRole string `name:"kube_iam_role"` diff --git a/pkg/util/constants/aws.go b/pkg/util/constants/aws.go index fb12cdd61..f1cfd5975 100644 --- a/pkg/util/constants/aws.go +++ b/pkg/util/constants/aws.go @@ -4,8 +4,6 @@ import "time" // AWS specific constants used by other modules const ( - // default region for AWS. TODO: move it to the operator configuration - AWSRegion = "eu-central-1" // EBS related constants EBSVolumeIDStart = "/vol-" EBSProvisioner = "kubernetes.io/aws-ebs" diff --git a/pkg/util/volumes/ebs.go b/pkg/util/volumes/ebs.go index 12cb405b1..57a334258 100644 --- a/pkg/util/volumes/ebs.go +++ b/pkg/util/volumes/ebs.go @@ -16,11 +16,12 @@ import ( // EBSVolumeResizer implements volume resizing interface for AWS EBS volumes. type EBSVolumeResizer struct { connection *ec2.EC2 + AWSRegion string } // ConnectToProvider connects to AWS. func (c *EBSVolumeResizer) ConnectToProvider() error { - sess, err := session.NewSession(&aws.Config{Region: aws.String(constants.AWSRegion)}) + sess, err := session.NewSession(&aws.Config{Region: aws.String(c.AWSRegion)}) if err != nil { return fmt.Errorf("could not establish AWS session: %v", err) } From 25a306244f69eb3f25c0a780816122a99cc30e3b Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Mon, 2 Jul 2018 16:25:27 +0200 Subject: [PATCH 03/30] Support for per-cluster and operator global sidecars (#331) * Define sidecars in the operator configuration. Right now only the name and the docker image can be defined, but with the help of the pod_environment_configmap parameter arbitrary environment variables can be passed to the sidecars. * Refactoring around generatePodTemplate. Original implementation of per-cluster sidecars by @theRealWardo Per review by @zerg-junior and @Jan-M --- docs/reference/cluster_manifest.md | 18 + docs/reference/operator_parameters.md | 5 + docs/user.md | 32 ++ pkg/cluster/k8sres.go | 521 ++++++++++++++++++-------- pkg/spec/postgresql.go | 10 + pkg/util/config/config.go | 7 +- 6 files changed, 432 insertions(+), 161 deletions(-) diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index c349e1631..b046f0493 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -213,3 +213,21 @@ properties of the persistent storage that stores postgres data. See [Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/) for the details on storage classes. Optional. + +### Sidecar definitions + +Those parameters are defined under the `sidecars` key. They consist of a list +of dictionaries, each defining one sidecar (an extra container running +along the main postgres container on the same pod). The following keys can be +defined in the sidecar dictionary: + +* **name** + name of the sidecar. Required. + +* **image** + docker image of the sidecar. Required. + +* **env** + a dictionary of environment variables. Use usual Kubernetes definition + (https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/) + for environment variables. Optional. diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 4e22847dd..e9e220119 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -15,6 +15,11 @@ words. your own Spilo image from the [github repository](https://github.com/zalando/spilo). +* **sidecar_docker_images** + a map of sidecar names to docker images for the containers to run alongside + Spilo. In case of the name conflict with the definition in the cluster + manifest the cluster-specific one is preferred. + * **workers** number of working routines the operator spawns to process requests to create/update/delete/sync clusters concurrently. The default is `4`. diff --git a/docs/user.md b/docs/user.md index b17cfc784..523943446 100644 --- a/docs/user.md +++ b/docs/user.md @@ -241,6 +241,38 @@ metadata: Note that timezone required for `timestamp` (offset relative to UTC, see RFC 3339 section 5.6) + +## Sidecar Support + +Each cluster can specify arbitrary sidecars to run. These containers could be used for +log aggregation, monitoring, backups or other tasks. A sidecar can be specified like this: + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: postgresql + +metadata: + name: acid-minimal-cluster +spec: + ... + sidecars: + - name: "container-name" + image: "company/image:tag" + env: + - name: "ENV_VAR_NAME" + value: "any-k8s-env-things" +``` + +In addition to any environment variables you specify, the following environment variables +are always passed to sidecars: + + - `POD_NAME` - field reference to `metadata.name` + - `POD_NAMESPACE` - field reference to `metadata.namespace` + - `POSTGRES_USER` - the superuser that can be used to connect to the database + - `POSTGRES_PASSWORD` - the password for the superuser + +The PostgreSQL volume is shared with sidecars and is mounted at `/home/postgres/pgdata`. + ## Increase volume size PostgreSQL operator supports statefulset volume resize if you're using the diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index f216797c0..49bd9fe84 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -5,6 +5,7 @@ import ( "fmt" "sort" + "github.com/Sirupsen/logrus" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -15,6 +16,7 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" + "k8s.io/apimachinery/pkg/labels" ) const ( @@ -79,25 +81,30 @@ func (c *Cluster) podDisruptionBudgetName() string { return c.OpConfig.PDBNameFormat.Format("cluster", c.Name) } -func (c *Cluster) resourceRequirements(resources spec.Resources) (*v1.ResourceRequirements, error) { - var err error - - specRequests := resources.ResourceRequest - specLimits := resources.ResourceLimits +func (c *Cluster) makeDefaultResources() spec.Resources { config := c.OpConfig defaultRequests := spec.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest} defaultLimits := spec.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit} + return spec.Resources{defaultRequests, defaultLimits} +} + +func generateResourceRequirements(resources spec.Resources, defaultResources spec.Resources) (*v1.ResourceRequirements, error) { + var err error + + specRequests := resources.ResourceRequest + specLimits := resources.ResourceLimits + result := v1.ResourceRequirements{} - result.Requests, err = fillResourceList(specRequests, defaultRequests) + result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequest) if err != nil { return nil, fmt.Errorf("could not fill resource requests: %v", err) } - result.Limits, err = fillResourceList(specLimits, defaultLimits) + result.Limits, err = fillResourceList(specLimits, defaultResources.ResourceLimits) if err != nil { return nil, fmt.Errorf("could not fill resource limits: %v", err) } @@ -135,7 +142,7 @@ func fillResourceList(spec spec.ResourceDescription, defaults spec.ResourceDescr return requests, nil } -func (c *Cluster) generateSpiloJSONConfiguration(pg *spec.PostgresqlParam, patroni *spec.Patroni) string { +func generateSpiloJSONConfiguration(pg *spec.PostgresqlParam, patroni *spec.Patroni, pamRoleName string, logger *logrus.Entry) string { config := spiloConfiguration{} config.Bootstrap = pgBootstrap{} @@ -178,7 +185,7 @@ PatroniInitDBParams: } } default: - c.logger.Warningf("unsupported type for initdb configuration item %s: %T", defaultParam, defaultParam) + logger.Warningf("unsupported type for initdb configuration item %s: %T", defaultParam, defaultParam) continue PatroniInitDBParams } } @@ -201,7 +208,7 @@ PatroniInitDBParams: } else { config.Bootstrap.PgHBA = []string{ "hostnossl all all all reject", - fmt.Sprintf("hostssl all +%s all pam", c.OpConfig.PamRoleName), + fmt.Sprintf("hostssl all +%s all pam", pamRoleName), "hostssl all all all md5", } } @@ -240,25 +247,25 @@ PatroniInitDBParams: } } config.Bootstrap.Users = map[string]pgUser{ - c.OpConfig.PamRoleName: { + pamRoleName: { Password: "", Options: []string{constants.RoleFlagCreateDB, constants.RoleFlagNoLogin}, }, } result, err := json.Marshal(config) if err != nil { - c.logger.Errorf("cannot convert spilo configuration into JSON: %v", err) + logger.Errorf("cannot convert spilo configuration into JSON: %v", err) return "" } return string(result) } -func (c *Cluster) nodeAffinity() *v1.Affinity { +func nodeAffinity(nodeReadinessLabel map[string]string) *v1.Affinity { matchExpressions := make([]v1.NodeSelectorRequirement, 0) - if len(c.OpConfig.NodeReadinessLabel) == 0 { + if len(nodeReadinessLabel) == 0 { return nil } - for k, v := range c.OpConfig.NodeReadinessLabel { + for k, v := range nodeReadinessLabel { matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{ Key: k, Operator: v1.NodeSelectorOpIn, @@ -275,13 +282,12 @@ func (c *Cluster) nodeAffinity() *v1.Affinity { } } -func (c *Cluster) tolerations(tolerationsSpec *[]v1.Toleration) []v1.Toleration { +func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]string) []v1.Toleration { // allow to override tolerations by postgresql manifest if len(*tolerationsSpec) > 0 { return *tolerationsSpec } - podToleration := c.Config.OpConfig.PodToleration if len(podToleration["key"]) > 0 || len(podToleration["operator"]) > 0 || len(podToleration["value"]) > 0 || len(podToleration["effect"]) > 0 { return []v1.Toleration{ { @@ -309,19 +315,123 @@ func isBootstrapOnlyParameter(param string) bool { param == "track_commit_timestamp" } -func (c *Cluster) generatePodTemplate( - uid types.UID, - resourceRequirements *v1.ResourceRequirements, - resourceRequirementsScalyrSidecar *v1.ResourceRequirements, - tolerationsSpec *[]v1.Toleration, - pgParameters *spec.PostgresqlParam, - patroniParameters *spec.Patroni, - cloneDescription *spec.CloneDescription, - dockerImage *string, - customPodEnvVars map[string]string, -) *v1.PodTemplateSpec { - spiloConfiguration := c.generateSpiloJSONConfiguration(pgParameters, patroniParameters) +func generateVolumeMounts() []v1.VolumeMount { + return []v1.VolumeMount{ + { + Name: constants.DataVolumeName, + MountPath: constants.PostgresDataMount, //TODO: fetch from manifest + }, + } +} +func generateSpiloContainer( + name string, + dockerImage *string, + resourceRequirements *v1.ResourceRequirements, + envVars []v1.EnvVar, + volumeMounts []v1.VolumeMount, +) *v1.Container { + + privilegedMode := true + return &v1.Container{ + Name: name, + Image: *dockerImage, + ImagePullPolicy: v1.PullIfNotPresent, + Resources: *resourceRequirements, + Ports: []v1.ContainerPort{ + { + ContainerPort: 8008, + Protocol: v1.ProtocolTCP, + }, + { + ContainerPort: 5432, + Protocol: v1.ProtocolTCP, + }, + { + ContainerPort: 8080, + Protocol: v1.ProtocolTCP, + }, + }, + VolumeMounts: volumeMounts, + Env: envVars, + SecurityContext: &v1.SecurityContext{ + Privileged: &privilegedMode, + }, + } +} + +func generateSidecarContainers(sidecars []spec.Sidecar, + volumeMounts []v1.VolumeMount, defaultResources spec.Resources, + superUserName string, credentialsSecretName string, logger *logrus.Entry) ([]v1.Container, error) { + + if sidecars != nil && len(sidecars) > 0 { + result := make([]v1.Container, 0) + for index, sidecar := range sidecars { + + resources, err := generateResourceRequirements( + makeResources( + sidecar.Resources.ResourceRequest.CPU, + sidecar.Resources.ResourceRequest.Memory, + sidecar.Resources.ResourceLimits.CPU, + sidecar.Resources.ResourceLimits.Memory, + ), + defaultResources, + ) + if err != nil { + return nil, err + } + + sc := getSidecarContainer(sidecar, index, volumeMounts, resources, superUserName, credentialsSecretName, logger) + result = append(result, *sc) + } + return result, nil + } + return nil, nil +} + +func generatePodTemplate( + namespace string, + labels labels.Set, + spiloContainer *v1.Container, + sidecarContainers []v1.Container, + tolerationsSpec *[]v1.Toleration, + nodeAffinity *v1.Affinity, + terminateGracePeriod int64, + podServiceAccountName string, + kubeIAMRole string, +) (*v1.PodTemplateSpec, error) { + + terminateGracePeriodSeconds := terminateGracePeriod + containers := []v1.Container{*spiloContainer} + containers = append(containers, sidecarContainers...) + + podSpec := v1.PodSpec{ + ServiceAccountName: podServiceAccountName, + TerminationGracePeriodSeconds: &terminateGracePeriodSeconds, + Containers: containers, + Tolerations: *tolerationsSpec, + } + + if nodeAffinity != nil { + podSpec.Affinity = nodeAffinity + } + + template := v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + Namespace: namespace, + }, + Spec: podSpec, + } + if kubeIAMRole != "" { + template.Annotations = map[string]string{constants.KubeIAmAnnotation: kubeIAMRole} + } + + return &template, nil +} + +// generatePodEnvVars generates environment variables for the Spilo Pod +func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration string, cloneDescription *spec.CloneDescription, customPodEnvVarsList []v1.EnvVar) []v1.EnvVar { envVars := []v1.EnvVar{ { Name: "SCOPE", @@ -409,134 +519,89 @@ func (c *Cluster) generatePodTemplate( envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...) } - var names []string - // handle environment variables from the PodEnvironmentConfigMap. We don't use envSource here as it is impossible - // to track any changes to the object envSource points to. In order to emulate the envSource behavior, however, we - // need to make sure that PodConfigMap variables doesn't override those we set explicitly from the configuration - // parameters - envVarsMap := make(map[string]string) - for _, envVar := range envVars { - envVarsMap[envVar.Name] = envVar.Value + if len(customPodEnvVarsList) > 0 { + envVars = append(envVars, customPodEnvVarsList...) } - for name := range customPodEnvVars { - if _, ok := envVarsMap[name]; !ok { - names = append(names, name) - } else { - c.logger.Warningf("variable %q value from %q is ignored: conflict with the definition from the operator", - name, c.OpConfig.PodEnvironmentConfigMap) + + return envVars +} + +// deduplicateEnvVars makes sure there are no duplicate in the target envVar array. While Kubernetes already +// deduplicates variables defined in a container, it leaves the last definition in the list and this behavior is not +// well-documented, which means that the behavior can be reversed at some point (it may also start producing an error). +// Therefore, the merge is done by the operator, the entries that are ahead in the passed list take priority over those +// that are behind, and only the name is considered in order to eliminate duplicates. +func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus.Entry) []v1.EnvVar { + result := make([]v1.EnvVar, 0) + names := make(map[string]int) + + for i, va := range input { + if names[va.Name] == 0 { + names[va.Name] += 1 + result = append(result, input[i]) + } else if names[va.Name] == 1 { + names[va.Name] += 1 + logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored", + va.Name, containerName) } } - sort.Strings(names) - for _, name := range names { - envVars = append(envVars, v1.EnvVar{Name: name, Value: customPodEnvVars[name]}) + return result +} + +func getSidecarContainer(sidecar spec.Sidecar, index int, volumeMounts []v1.VolumeMount, + resources *v1.ResourceRequirements, superUserName string, credentialsSecretName string, logger *logrus.Entry) *v1.Container { + name := sidecar.Name + if name == "" { + name = fmt.Sprintf("sidecar-%d", index) } - privilegedMode := true - containerImage := c.OpConfig.DockerImage - if dockerImage != nil && *dockerImage != "" { - containerImage = *dockerImage - } - volumeMounts := []v1.VolumeMount{ + env := []v1.EnvVar{ { - Name: constants.DataVolumeName, - MountPath: constants.PostgresDataMount, //TODO: fetch from manifest - }, - } - container := v1.Container{ - Name: c.containerName(), - Image: containerImage, - ImagePullPolicy: v1.PullIfNotPresent, - Resources: *resourceRequirements, - Ports: []v1.ContainerPort{ - { - ContainerPort: 8008, - Protocol: v1.ProtocolTCP, - }, - { - ContainerPort: 5432, - Protocol: v1.ProtocolTCP, - }, - { - ContainerPort: 8080, - Protocol: v1.ProtocolTCP, - }, - }, - VolumeMounts: volumeMounts, - Env: envVars, - SecurityContext: &v1.SecurityContext{ - Privileged: &privilegedMode, - }, - } - terminateGracePeriodSeconds := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) - - podSpec := v1.PodSpec{ - ServiceAccountName: c.OpConfig.PodServiceAccountName, - TerminationGracePeriodSeconds: &terminateGracePeriodSeconds, - Containers: []v1.Container{container}, - Tolerations: c.tolerations(tolerationsSpec), - } - - if affinity := c.nodeAffinity(); affinity != nil { - podSpec.Affinity = affinity - } - - if c.OpConfig.ScalyrAPIKey != "" && c.OpConfig.ScalyrImage != "" { - podSpec.Containers = append( - podSpec.Containers, - v1.Container{ - Name: "scalyr-sidecar", - Image: c.OpConfig.ScalyrImage, - ImagePullPolicy: v1.PullIfNotPresent, - Resources: *resourceRequirementsScalyrSidecar, - VolumeMounts: volumeMounts, - Env: []v1.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.name", - }, - }, - }, - { - Name: "POD_NAMESPACE", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "SCALYR_API_KEY", - Value: c.OpConfig.ScalyrAPIKey, - }, - { - Name: "SCALYR_SERVER_HOST", - Value: c.Name, - }, - { - Name: "SCALYR_SERVER_URL", - Value: c.OpConfig.ScalyrServerURL, - }, + Name: "POD_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "POSTGRES_USER", + Value: superUserName, + }, + { + Name: "POSTGRES_PASSWORD", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: credentialsSecretName, + }, + Key: "password", }, }, - ) - } - - template := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: c.labelsSet(true), - Namespace: c.Namespace, }, - Spec: podSpec, } - if c.OpConfig.KubeIAMRole != "" { - template.Annotations = map[string]string{constants.KubeIAmAnnotation: c.OpConfig.KubeIAMRole} + if len(sidecar.Env) > 0 { + env = append(env, sidecar.Env...) + } + return &v1.Container{ + Name: name, + Image: sidecar.DockerImage, + ImagePullPolicy: v1.PullIfNotPresent, + Resources: *resources, + VolumeMounts: volumeMounts, + Env: deduplicateEnvVars(env, name, logger), + Ports: sidecar.Ports, } - - return &template } func getBucketScopeSuffix(uid string) string { @@ -560,30 +625,90 @@ func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) spec } func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.StatefulSet, error) { - resourceRequirements, err := c.resourceRequirements(spec.Resources) + + defaultResources := c.makeDefaultResources() + + resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources) if err != nil { return nil, fmt.Errorf("could not generate resource requirements: %v", err) } - resourceRequirementsScalyrSidecar, err := c.resourceRequirements( - makeResources( - c.OpConfig.ScalyrCPURequest, - c.OpConfig.ScalyrMemoryRequest, - c.OpConfig.ScalyrCPULimit, - c.OpConfig.ScalyrMemoryLimit, - ), - ) + if err != nil { return nil, fmt.Errorf("could not generate Scalyr sidecar resource requirements: %v", err) } - var customPodEnvVars map[string]string + customPodEnvVarsList := make([]v1.EnvVar, 0) + if c.OpConfig.PodEnvironmentConfigMap != "" { if cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap, metav1.GetOptions{}); err != nil { return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) } else { - customPodEnvVars = cm.Data + for k, v := range cm.Data { + customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) + } + sort.Slice(customPodEnvVarsList, + func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) } } - podTemplate := c.generatePodTemplate(c.Postgresql.GetUID(), resourceRequirements, resourceRequirementsScalyrSidecar, &spec.Tolerations, &spec.PostgresqlParam, &spec.Patroni, &spec.Clone, &spec.DockerImage, customPodEnvVars) + + spiloConfiguration := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger) + + // generate environment variables for the spilo container + spiloEnvVars := deduplicateEnvVars( + c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone, customPodEnvVarsList), + c.containerName(), c.logger) + + // pickup the docker image for the spilo container + effectiveDockerImage := getEffectiveDockerImage(c.OpConfig.DockerImage, spec.DockerImage) + + volumeMounts := generateVolumeMounts() + + // generate the spilo container + spiloContainer := generateSpiloContainer(c.containerName(), &effectiveDockerImage, resourceRequirements, spiloEnvVars, volumeMounts) + + // resolve conflicts between operator-global and per-cluster sidecards + sideCars := c.mergeSidecars(spec.Sidecars) + + resourceRequirementsScalyrSidecar := makeResources( + c.OpConfig.ScalyrCPURequest, + c.OpConfig.ScalyrMemoryRequest, + c.OpConfig.ScalyrCPULimit, + c.OpConfig.ScalyrMemoryLimit, + ) + + // generate scalyr sidecar container + if scalyrSidecar := + generateScalyrSidecarSpec(c.Name, + c.OpConfig.ScalyrAPIKey, + c.OpConfig.ScalyrServerURL, + c.OpConfig.ScalyrImage, + &resourceRequirementsScalyrSidecar, c.logger); scalyrSidecar != nil { + sideCars = append(sideCars, *scalyrSidecar) + } + + // generate sidecar containers + sidecarContainers, err := generateSidecarContainers(sideCars, volumeMounts, defaultResources, + c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger) + if err != nil { + return nil, fmt.Errorf("could not generate sidecar containers: %v", err) + } + + tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) + + // generate pod template for the statefulset, based on the spilo container and sidecards + podTemplate, err := generatePodTemplate( + c.Namespace, + c.labelsSet(true), + spiloContainer, + sidecarContainers, + &tolerationSpec, + nodeAffinity(c.OpConfig.NodeReadinessLabel), + int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), + c.OpConfig.PodServiceAccountName, + c.OpConfig.KubeIAMRole) + + if err != nil { + return nil, fmt.Errorf("could not generate pod template: %v", err) + } volumeClaimTemplate, err := generatePersistentVolumeClaimTemplate(spec.Volume.Size, spec.Volume.StorageClass) if err != nil { return nil, fmt.Errorf("could not generate volume claim template: %v", err) @@ -610,6 +735,86 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu return statefulSet, nil } +func getEffectiveDockerImage(globalDockerImage, clusterDockerImage string) string { + if clusterDockerImage == "" { + return globalDockerImage + } + return clusterDockerImage +} + +func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string, + containerResources *spec.Resources, logger *logrus.Entry) *spec.Sidecar { + if APIKey == "" || serverURL == "" || dockerImage == "" { + if APIKey != "" || serverURL != "" || dockerImage != "" { + logger.Warningf("Incomplete configuration for the Scalyr sidecar: " + + "all of SCALYR_API_KEY, SCALYR_SERVER_HOST and SCALYR_SERVER_URL must be defined") + } + return nil + } + return &spec.Sidecar{ + Name: "scalyr-sidecar", + DockerImage: dockerImage, + Env: []v1.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "SCALYR_API_KEY", + Value: APIKey, + }, + { + Name: "SCALYR_SERVER_HOST", + Value: clusterName, + }, + { + Name: "SCALYR_SERVER_URL", + Value: serverURL, + }, + }, + Resources: *containerResources, + } +} + +// mergeSidecar merges globally-defined sidecars with those defined in the cluster manifest +func (c *Cluster) mergeSidecars(sidecars []spec.Sidecar) []spec.Sidecar { + globalSidecarsToSkip := map[string]bool{} + result := make([]spec.Sidecar, 0) + + for i, sidecar := range sidecars { + dockerImage, ok := c.OpConfig.Sidecars[sidecar.Name] + if ok { + if dockerImage != sidecar.DockerImage { + c.logger.Warningf("merging definitions for sidecar %q: "+ + "ignoring %q in the global scope in favor of %q defined in the cluster", + sidecar.Name, dockerImage, sidecar.DockerImage) + } + globalSidecarsToSkip[sidecar.Name] = true + } + result = append(result, sidecars[i]) + } + for name, dockerImage := range c.OpConfig.Sidecars { + if !globalSidecarsToSkip[name] { + result = append(result, spec.Sidecar{Name: name, DockerImage: dockerImage}) + } + } + return result +} + func (c *Cluster) getNumberOfInstances(spec *spec.PostgresSpec) (newcur int32) { min := c.OpConfig.MinInstances max := c.OpConfig.MaxInstances diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go index bd5d06127..d59ccd22c 100644 --- a/pkg/spec/postgresql.go +++ b/pkg/spec/postgresql.go @@ -61,6 +61,15 @@ type CloneDescription struct { EndTimestamp string `json:"timestamp,omitempty"` } +// Sidecar defines a container to be run in the same pod as the Postgres container. +type Sidecar struct { + Resources `json:"resources,omitempty"` + Name string `json:"name,omitempty"` + DockerImage string `json:"image,omitempty"` + Ports []v1.ContainerPort `json:"ports,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` +} + type UserFlags []string // PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) @@ -124,6 +133,7 @@ type PostgresSpec struct { ClusterName string `json:"-"` Databases map[string]string `json:"databases,omitempty"` Tolerations []v1.Toleration `json:"tolerations,omitempty"` + Sidecars []Sidecar `json:"sidecars,omitempty"` } // PostgresqlList defines a list of PostgreSQL clusters. diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index d41b63e6e..26b4d378b 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -68,9 +68,10 @@ type Config struct { Auth Scalyr - WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' - EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use k8s as a DCS - DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8"` + WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' + EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use k8s as a DCS + DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8"` + Sidecars map[string]string `name:"sidecar_docker_images"` // default name `operator` enables backward compatibility with the older ServiceAccountName field PodServiceAccountName string `name:"pod_service_account_name" default:"operator"` // value of this string must be valid JSON or YAML; see initPodServiceAccount From b7b950eb28cfeae34b39db592d5880334049f0a7 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Mon, 16 Jul 2018 11:49:58 +0200 Subject: [PATCH 04/30] Use the StorageClassName field of the volumeClaimTemplate. (#338) The old way of specifying it with the annotation is deprecated and not available in recent Kubernetes versions. We will keep it there anyway until upgrading to the new go-client that is incompatible with those versions. Per report from @schmitch --- pkg/cluster/k8sres.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 49bd9fe84..162f2ef45 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -835,14 +835,19 @@ func (c *Cluster) getNumberOfInstances(spec *spec.PostgresSpec) (newcur int32) { } func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) { + + var storageClassName *string + metadata := metav1.ObjectMeta{ Name: constants.DataVolumeName, } if volumeStorageClass != "" { - // TODO: check if storage class exists + // TODO: remove the old annotation, switching completely to the StorageClassName field. metadata.Annotations = map[string]string{"volume.beta.kubernetes.io/storage-class": volumeStorageClass} + storageClassName = &volumeStorageClass } else { metadata.Annotations = map[string]string{"volume.alpha.kubernetes.io/storage-class": "default"} + storageClassName = nil } quantity, err := resource.ParseQuantity(volumeSize) @@ -859,6 +864,7 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string v1.ResourceStorage: quantity, }, }, + StorageClassName: storageClassName, }, } From e90a01050c68fef2e7e095b4c7b443a25730c25f Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Mon, 16 Jul 2018 11:50:35 +0200 Subject: [PATCH 05/30] Switchover must wait for the inner goroutine before it returns. (#343) * Switchover must wait for the inner goroutine before it returns. Otherwise, two corner cases may happen: - waitForPodLabel writes to the podLabelErr channel that has been already closed by the outer routine - the outer routine exists and the caller subscribes to the pod the inner goroutine has already subscribed to, resulting in panic. The previous commit https://github.com/zalando-incubator/postgres-operator/commit/fe47f9ebeadd54639913296735158b42d17ee012 that touched that code added the cancellation channel, but didn't bother to actually wait for the goroutine to be cancelled. Per report and review from @valer-cara. Original issue: https://github.com/zalando-incubator/postgres-operator/issues/342 --- pkg/cluster/cluster.go | 43 ++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 2662cd521..1dd5fd6b1 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -867,14 +867,19 @@ func (c *Cluster) GetStatus() *spec.ClusterStatus { } // Switchover does a switchover (via Patroni) to a candidate pod -func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) error { +func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) (err error) { + c.logger.Debugf("failing over from %q to %q", curMaster.Name, candidate) + var wg sync.WaitGroup + podLabelErr := make(chan error) stopCh := make(chan struct{}) - defer close(podLabelErr) + + wg.Add(1) go func() { + defer wg.Done() ch := c.registerPodSubscriber(candidate) defer c.unregisterPodSubscriber(candidate) @@ -882,26 +887,32 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e select { case <-stopCh: - case podLabelErr <- func() error { - _, err := c.waitForPodLabel(ch, stopCh, &role) - return err + case podLabelErr <- func() (err error) { + _, err = c.waitForPodLabel(ch, stopCh, &role) + return }(): } }() - if err := c.patroni.Switchover(curMaster, candidate.Name); err != nil { - close(stopCh) - return fmt.Errorf("could not failover: %v", err) - } - c.logger.Debugf("successfully failed over from %q to %q", curMaster.Name, candidate) - - defer close(stopCh) - - if err := <-podLabelErr; err != nil { - return fmt.Errorf("could not get master pod label: %v", err) + if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil { + c.logger.Debugf("successfully failed over from %q to %q", curMaster.Name, candidate) + if err = <-podLabelErr; err != nil { + err = fmt.Errorf("could not get master pod label: %v", err) + } + } else { + err = fmt.Errorf("could not failover: %v", err) } - return nil + // signal the role label waiting goroutine to close the shop and go home + close(stopCh) + // wait until the goroutine terminates, since unregisterPodSubscriber + // must be called before the outer return; otherwsise we risk subscribing to the same pod twice. + wg.Wait() + // close the label waiting channel no sooner than the waiting goroutine terminates. + close(podLabelErr) + + return + } // Lock locks the cluster From 3a9378d3b8edef1789e062e1f446018b9f8e6528 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Mon, 16 Jul 2018 16:20:46 +0200 Subject: [PATCH 06/30] Allow configuring the operator via the YAML manifest. (#326) * Up until now, the operator read its own configuration from the configmap. That has a number of limitations, i.e. when the configuration value is not a scalar, but a map or a list. We use a custom code based on github.com/kelseyhightower/envconfig to decode non-scalar values out of plain text keys, but that breaks when the data inside the keys contains both YAML-special elememtns (i.e. commas) and complex quotes, one good example for that is search_path inside `team_api_role_configuration`. In addition, reliance on the configmap forced a flag structure on the configuration, making it hard to write and to read (see https://github.com/zalando-incubator/postgres-operator/pull/308#issuecomment-395131778). The changes allow to supply the operator configuration in a proper YAML file. That required registering a custom CRD to support the operator configuration and provide an example at manifests/postgresql-operator-default-configuration.yaml. At the moment, both old configmap and the new CRD configuration is supported, so no compatibility issues, however, in the future I'd like to deprecate the configmap-based configuration altogether. Contrary to the configmap-based configuration, the CRD one doesn't embed defaults into the operator code, however, one can use the manifests/postgresql-operator-default-configuration.yaml as a starting point in order to build a custom configuration. Since previously `ReadyWaitInterval` and `ReadyWaitTimeout` parameters used to create the CRD were taken from the operator configuration, which is not possible if the configuration itself is stored in the CRD object, I've added the ability to specify them as environment variables `CRD_READY_WAIT_INTERVAL` and `CRD_READY_WAIT_TIMEOUT` respectively. Per review by @zerg-junior and @Jan-M. --- cmd/main.go | 20 +++ .../reference/command_line_and_environment.md | 8 + docs/reference/operator_parameters.md | 102 ++++++++++- ...gresql-operator-default-configuration.yaml | 81 +++++++++ pkg/cluster/cluster.go | 2 +- pkg/cluster/util.go | 2 +- pkg/controller/controller.go | 49 ++++-- pkg/controller/operator_config.go | 108 ++++++++++++ pkg/controller/postgresql.go | 4 +- pkg/controller/util.go | 24 ++- pkg/spec/postgresql.go | 4 +- pkg/spec/types.go | 52 +++++- pkg/util/config/crd_config.go | 160 ++++++++++++++++++ pkg/util/constants/crd.go | 13 +- 14 files changed, 584 insertions(+), 45 deletions(-) create mode 100644 manifests/postgresql-operator-default-configuration.yaml create mode 100644 pkg/controller/operator_config.go create mode 100644 pkg/util/config/crd_config.go diff --git a/cmd/main.go b/cmd/main.go index 5c8aadd8f..b400630f6 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -7,6 +7,7 @@ import ( "os/signal" "sync" "syscall" + "time" "github.com/zalando-incubator/postgres-operator/pkg/controller" "github.com/zalando-incubator/postgres-operator/pkg/spec" @@ -20,6 +21,14 @@ var ( config spec.ControllerConfig ) +func mustParseDuration(d string) time.Duration { + duration, err := time.ParseDuration(d) + if err != nil { + panic(err) + } + return duration +} + func init() { flag.StringVar(&kubeConfigFile, "kubeconfig", "", "Path to kubeconfig file with authorization and master location information.") flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.") @@ -38,6 +47,17 @@ func init() { log.Printf("Fully qualified configmap name: %v", config.ConfigMapName) } + if crd_interval := os.Getenv("CRD_READY_WAIT_INTERVAL"); crd_interval != "" { + config.CRDReadyWaitInterval = mustParseDuration(crd_interval) + } else { + config.CRDReadyWaitInterval = 4 * time.Second + } + + if crd_timeout := os.Getenv("CRD_READY_WAIT_TIMEOUT"); crd_timeout != "" { + config.CRDReadyWaitTimeout = mustParseDuration(crd_timeout) + } else { + config.CRDReadyWaitTimeout = 30 * time.Second + } } func main() { diff --git a/docs/reference/command_line_and_environment.md b/docs/reference/command_line_and_environment.md index 1c4a6e8d7..7324e8a39 100644 --- a/docs/reference/command_line_and_environment.md +++ b/docs/reference/command_line_and_environment.md @@ -48,3 +48,11 @@ The following environment variables are accepted by the operator: * **SCALYR_API_KEY** the value of the Scalyr API key to supply to the pods. Overrides the `scalyr_api_key` operator parameter. + +* **CRD_READY_WAIT_TIMEOUT** + defines the timeout for the complete postgres CRD creation. When not set + default is 30s. + +* **CRD_READY_WAIT_INTERVAL** + defines the interval between consecutive attempts waiting for the postgres + CRD to be created. The default is 5s. diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index e9e220119..331a77dbd 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -1,9 +1,54 @@ +There are two mutually-exclusive methods to set the Postgres Operator +configuration. -Postgres operator is configured via a ConfigMap defined by the -`CONFIG_MAP_NAME` environment variable. Variable names are underscore-separated -words. +* ConfigMaps-based, the legacy one. The configuration is supplied in a + key-value configmap, defined by the `CONFIG_MAP_NAME` environment variable. + Non-scalar values, i.e. lists or maps, are encoded in the value strings using + the comma-based syntax for lists and coma-separated `key:value` syntax for + maps. String values containing ':' should be enclosed in quotes. The + configuration is flat, parameter group names below are not reflected in the + configuration structure. There is an + [example](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/configmap.yaml) + +* CRD-based configuration. The configuration is stored in the custom YAML + manifest, an instance of the custom resource definition (CRD) called + `postgresql-operator-configuration`. This CRD is registered by the operator + during the start when `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` variable is + set to a non-empty value. The CRD-based configuration is a regular YAML + document; non-scalar keys are simply represented in the usual YAML way. The + usage of the CRD-based configuration is triggered by setting the + `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` variable, which should point to the + `postgresql-operator-configuration` object name in the operators namespace. + There are no default values built-in in the operator, each parameter that is + not supplied in the configuration receives an empty value. In order to + create your own configuration just copy the [default + one](https://github.com/zalando-incubator/postgres-operator/blob/wip/operator_configuration_via_crd/manifests/postgresql-operator-default-configuration.yaml) + and change it. + +CRD-based configuration is more natural and powerful then the one based on +ConfigMaps and should be used unless there is a compatibility requirement to +use an already existing configuration. Even in that case, it should be rather +straightforward to convert the configmap based configuration into the CRD-based +one and restart the operator. The ConfigMaps-based configuration will be +deprecated and subsequently removed in future releases. + +Note that for the CRD-based configuration configuration groups below correspond +to the non-leaf keys in the target YAML (i.e. for the Kubernetes resources the +key is `kubernetes`). The key is mentioned alongside the group description. The +ConfigMap-based configuration is flat and does not allow non-leaf keys. + +Since in the CRD-based case the operator needs to create a CRD first, which is +controlled by the `resource_check_interval` and `resource_check_timeout` +parameters, those parameters have no effect and are replaced by the +`CRD_READY_WAIT_INTERVAL` and `CRD_READY_WAIT_TIMEOUT` environment variables. +They will be deprecated and removed in the future. + +Variable names are underscore-separated words. ## General + +Those are top-level keys, containing both leaf keys and groups. + * **etcd_host** Etcd connection string for Patroni defined as `host:port`. Not required when Patroni native Kubernetes support is used. The default is empty (use @@ -38,6 +83,10 @@ words. period between consecutive sync requests. The default is `5m`. ## Postgres users + +Parameters describing Postgres users. In a CRD-configuration, they are grouped +under the `users` key. + * **super_username** postgres `superuser` name to be created by `initdb`. The default is `postgres`. @@ -47,6 +96,11 @@ words. `standby`. ## Kubernetes resources + +Parameters to configure cluster-related Kubernetes objects created by the +operator, as well as some timeouts associated with them. In a CRD-based +configuration they are grouped under the `kubernetes` key. + * **pod_service_account_name** service account used by Patroni running on individual Pods to communicate with the operator. Required even if native Kubernetes support in Patroni is @@ -127,6 +181,11 @@ words. operator. The default is empty. ## Kubernetes resource requests + +This group allows you to configure resource requests for the Postgres pods. +Those parameters are grouped under the `postgres_pod_resources` key in a +CRD-based configuration. + * **default_cpu_request** CPU request value for the postgres containers, unless overridden by cluster-specific settings. The default is `100m`. @@ -144,6 +203,13 @@ words. settings. The default is `1Gi`. ## Operator timeouts + +This set of parameters define various timeouts related to some operator +actions, affecting pod operations and CRD creation. In the CRD-based +configuration `resource_check_interval` and `resource_check_timeout` have no +effect, and the parameters are grouped under the `timeouts` key in the +CRD-based configuration. + * **resource_check_interval** interval to wait between consecutive attempts to check for the presence of some Kubernetes resource (i.e. `StatefulSet` or `PodDisruptionBudget`). The @@ -171,6 +237,10 @@ words. the timeout for the complete postgres CRD creation. The default is `30s`. ## Load balancer related options + +Those options affect the behavior of load balancers created by the operator. +In the CRD-based configuration they are grouped under the `load_balancer` key. + * **db_hosted_zone** DNS zone for the cluster DNS name when the load balancer is configured for the cluster. Only used when combined with @@ -202,6 +272,12 @@ words. No other placeholders are allowed. ## AWS or GSC interaction + +The options in this group configure operator interactions with non-Kubernetes +objects from AWS or Google cloud. They have no effect unless you are using +either. In the CRD-based configuration those options are grouped under the +`aws_or_gcp` key. + * **wal_s3_bucket** S3 bucket to use for shipping WAL segments with WAL-E. A bucket has to be present and accessible by Patroni managed pods. At the moment, supported @@ -218,9 +294,12 @@ words. [kube2iam](https://github.com/jtblin/kube2iam) project on AWS. The default is empty. * **aws_region** - AWS region used to store ESB volumes. + AWS region used to store ESB volumes. The default is `eu-central-1`. ## Debugging the operator + +Options to aid debugging of the operator itself. Grouped under the `debug` key. + * **debug_logging** boolean parameter that toggles verbose debug logs from the operator. The default is `true`. @@ -230,7 +309,12 @@ words. access to the postgres database, i.e. creating databases and users. The default is `true`. -### Automatic creation of human users in the database +## Automatic creation of human users in the database + +Options to automate creation of human users with the aid of the teams API +service. In the CRD-based configuration those are grouped under the `teams_api` +key. + * **enable_teams_api** boolean parameter that toggles usage of the Teams API by the operator. The default is `true`. @@ -276,6 +360,9 @@ words. infrastructure role. The default is `admin`. ## Logging and REST API + +Parameters affecting logging and REST API listener. In the CRD-based configuration they are grouped under the `logging_rest_api` key. + * **api_port** REST API listener listens to this port. The default is `8080`. @@ -286,6 +373,11 @@ words. number of entries in the cluster history ring buffer. The default is `1000`. ## Scalyr options + +Those parameters define the resource requests/limits and properties of the +scalyr sidecar. In the CRD-based configuration they are grouped under the +`scalyr` key. + * **scalyr_api_key** API key for the Scalyr sidecar. The default is empty. diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml new file mode 100644 index 000000000..7dcf75091 --- /dev/null +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -0,0 +1,81 @@ +apiVersion: "acid.zalan.do/v1" +kind: postgresql-operator-configuration +metadata: + name: postgresql-operator-default-configuration +configuration: + etcd_host: "" + docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8 + workers: 4 + min_instances: -1 + max_instances: -1 + resync_period: 5m + #sidecar_docker_images: + # example: "exampleimage:exampletag" + users: + super_username: postgres + replication_username: standby + kubernetes: + pod_service_account_name: operator + pod_terminate_grace_period: 5m + pdb_name_format: "postgres-{cluster}-pdb" + secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + oauth_token_secret_name: postgresql-operator + pod_role_label: spilo-role + cluster_labels: + application: spilo + cluster_name_label: cluster-name + # watched_namespace:"" + # node_readiness_label: "" + # toleration: {} + # infrastructure_roles_secret_name: "" + # pod_environment_configmap: "" + postgres_pod_resources: + default_cpu_request: 100m + default_memory_request: 100Mi + default_cpu_limit: "3" + default_memory_limit: 1Gi + timeouts: + resource_check_interval: 3s + resource_check_timeout: 10m + pod_label_wait_timeout: 10m + pod_deletion_wait_timeout: 10m + ready_wait_interval: 4s + ready_wait_timeout: 30s + load_balancer: + enable_master_load_balancer: false + enable_replica_load_balancer: false + master_dns_name_format: "{cluster}.{team}.{hostedzone}" + replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" + aws_or_gcp: + # db_hosted_zone: "" + # wal_s3_bucket: "" + # log_s3_bucket: "" + # kube_iam_role: "" + aws_region: eu-central-1 + debug: + debug_logging: true + enable_database_access: true + teams_api: + enable_teams_api: false + team_api_role_configuration: + log_statement: all + enable_team_superuser: false + team_admin_role: admin + pam_role_name: zalandos + # pam_configuration: "" + protected_role_names: + - admin + # teams_api_url: "" + logging_rest_api: + api_port: 8008 + ring_log_lines: 100 + cluster_history_entries: 1000 + scalyr: + scalyr_cpu_request: 100m + scalyr_memory_request: 50Mi + scalyr_cpu_limit: "1" + scalyr_memory_limit: 1Gi + # scalyr_api_key: "" + # scalyr_image: "" + # scalyr_server_url: "" + diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 1dd5fd6b1..1f97aae0d 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -155,7 +155,7 @@ func (c *Cluster) setStatus(status spec.PostgresStatus) { _, err = c.KubeClient.CRDREST.Patch(types.MergePatchType). Namespace(c.Namespace). - Resource(constants.CRDResource). + Resource(constants.PostgresCRDResource). Name(c.Name). Body(request). DoRaw() diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index e7db26d82..ab8189d96 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -424,7 +424,7 @@ func (c *Cluster) credentialSecretNameForCluster(username string, clusterName st return c.OpConfig.SecretNameTemplate.Format( "username", strings.Replace(username, "_", "-", -1), "cluster", clusterName, - "tprkind", constants.CRDKind, + "tprkind", constants.PostgresCRDKind, "tprgroup", constants.CRDGroup) } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 84a07811b..d02d5ea8a 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -101,23 +101,24 @@ func (c *Controller) initOperatorConfig() { c.logger.Infoln("no ConfigMap specified. Loading default values") } - configMapData["watched_namespace"] = c.getEffectiveNamespace(os.Getenv("WATCHED_NAMESPACE"), configMapData["watched_namespace"]) - - if c.config.NoDatabaseAccess { - configMapData["enable_database_access"] = "false" - } - if c.config.NoTeamsAPI { - configMapData["enable_teams_api"] = "false" - } - c.opConfig = config.NewFromMap(configMapData) c.warnOnDeprecatedOperatorParameters() +} + +func (c *Controller) modifyConfigFromEnvironment() { + c.opConfig.WatchedNamespace = c.getEffectiveNamespace(os.Getenv("WATCHED_NAMESPACE"), c.opConfig.WatchedNamespace) + + if c.config.NoDatabaseAccess { + c.opConfig.EnableDBAccess = c.config.NoDatabaseAccess + } + if c.config.NoTeamsAPI { + c.opConfig.EnableTeamsAPI = c.config.NoTeamsAPI + } scalyrAPIKey := os.Getenv("SCALYR_API_KEY") if scalyrAPIKey != "" { c.opConfig.ScalyrAPIKey = scalyrAPIKey } - } // warningOnDeprecatedParameters emits warnings upon finding deprecated parmaters @@ -163,20 +164,34 @@ func (c *Controller) initPodServiceAccount() { func (c *Controller) initController() { c.initClients() - c.initOperatorConfig() + + if configObjectName := os.Getenv("POSTGRES_OPERATOR_CONFIGURATION_OBJECT"); configObjectName != "" { + if err := c.createConfigurationCRD(); err != nil { + c.logger.Fatalf("could not register Operator Configuration CustomResourceDefinition: %v", err) + } + if cfg, err := c.readOperatorConfigurationFromCRD(spec.GetOperatorNamespace(), configObjectName); err != nil { + c.logger.Fatalf("unable to read operator configuration: %v", err) + } else { + c.opConfig = c.importConfigurationFromCRD(&cfg.Configuration) + } + } else { + c.initOperatorConfig() + } + + c.modifyConfigFromEnvironment() + + if err := c.createPostgresCRD(); err != nil { + c.logger.Fatalf("could not register Postgres CustomResourceDefinition: %v", err) + } + c.initPodServiceAccount() - c.initSharedInformers() - c.logger.Infof("config: %s", c.opConfig.MustMarshal()) - if c.opConfig.DebugLogging { c.logger.Logger.Level = logrus.DebugLevel } - if err := c.createCRD(); err != nil { - c.logger.Fatalf("could not register CustomResourceDefinition: %v", err) - } + c.logger.Infof("config: %s", c.opConfig.MustMarshal()) if infraRoles, err := c.getInfrastructureRoles(&c.opConfig.InfrastructureRolesSecretName); err != nil { c.logger.Warningf("could not get infrastructure roles: %v", err) diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go new file mode 100644 index 000000000..11ad32959 --- /dev/null +++ b/pkg/controller/operator_config.go @@ -0,0 +1,108 @@ +package controller + +import ( + "encoding/json" + "fmt" + + "github.com/zalando-incubator/postgres-operator/pkg/util/config" + "github.com/zalando-incubator/postgres-operator/pkg/util/constants" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "time" +) + +func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*config.OperatorConfiguration, error) { + var ( + opConfig config.OperatorConfiguration + ) + + req := c.KubeClient.CRDREST.Get(). + Name(configObjectName). + Namespace(configObjectNamespace). + Resource(constants.OperatorConfigCRDResource). + VersionedParams(&metav1.ListOptions{ResourceVersion: "0"}, metav1.ParameterCodec) + + data, err := req.DoRaw() + if err != nil { + return nil, fmt.Errorf("could not get operator configuration object %s: %v", configObjectName, err) + } + if err = json.Unmarshal(data, &opConfig); err != nil { + return nil, fmt.Errorf("could not unmarshal operator configuration object %s, %v", configObjectName, err) + } + + return &opConfig, nil +} + +// importConfigurationFromCRD is a transitional function that converts CRD configuration to the one based on the configmap +func (c *Controller) importConfigurationFromCRD(fromCRD *config.OperatorConfigurationData) *config.Config { + result := &config.Config{} + + result.EtcdHost = fromCRD.EtcdHost + result.DockerImage = fromCRD.DockerImage + result.Workers = fromCRD.Workers + result.MinInstances = fromCRD.MinInstances + result.MaxInstances = fromCRD.MaxInstances + result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod) + result.Sidecars = fromCRD.Sidecars + + result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername + result.ReplicationUsername = fromCRD.PostgresUsersConfiguration.ReplicationUsername + + result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName + result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition + result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod) + result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace + result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat + result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate + result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName + result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName + result.PodRoleLabel = fromCRD.Kubernetes.PodRoleLabel + result.ClusterLabels = fromCRD.Kubernetes.ClusterLabels + result.ClusterNameLabel = fromCRD.Kubernetes.ClusterNameLabel + result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel + + result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest + result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest + result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit + result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit + + result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval) + result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout) + result.PodLabelWaitTimeout = time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout) + result.PodDeletionWaitTimeout = time.Duration(fromCRD.Timeouts.PodDeletionWaitTimeout) + result.ReadyWaitInterval = time.Duration(fromCRD.Timeouts.ReadyWaitInterval) + result.ReadyWaitTimeout = time.Duration(fromCRD.Timeouts.ReadyWaitTimeout) + + result.DbHostedZone = fromCRD.LoadBalancer.DbHostedZone + result.EnableMasterLoadBalancer = fromCRD.LoadBalancer.EnableMasterLoadBalancer + result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer + result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat + result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat + + result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket + result.AWSRegion = fromCRD.AWSGCP.AWSRegion + result.LogS3Bucket = fromCRD.AWSGCP.LogS3Bucket + result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole + + result.DebugLogging = fromCRD.OperatorDebug.DebugLogging + result.EnableDBAccess = fromCRD.OperatorDebug.EnableDBAccess + result.EnableTeamsAPI = fromCRD.TeamsAPI.EnableTeamsAPI + result.TeamsAPIUrl = fromCRD.TeamsAPI.TeamsAPIUrl + result.TeamAPIRoleConfiguration = fromCRD.TeamsAPI.TeamAPIRoleConfiguration + result.EnableTeamSuperuser = fromCRD.TeamsAPI.EnableTeamSuperuser + result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole + result.PamRoleName = fromCRD.TeamsAPI.PamRoleName + + result.APIPort = fromCRD.LoggingRESTAPI.APIPort + result.RingLogLines = fromCRD.LoggingRESTAPI.RingLogLines + result.ClusterHistoryEntries = fromCRD.LoggingRESTAPI.ClusterHistoryEntries + + result.ScalyrAPIKey = fromCRD.Scalyr.ScalyrAPIKey + result.ScalyrImage = fromCRD.Scalyr.ScalyrImage + result.ScalyrServerURL = fromCRD.Scalyr.ScalyrServerURL + result.ScalyrCPURequest = fromCRD.Scalyr.ScalyrCPURequest + result.ScalyrMemoryRequest = fromCRD.Scalyr.ScalyrMemoryRequest + result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit + result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit + + return result +} diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index cb689b70a..bf7fe8889 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -48,7 +48,7 @@ func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object req := c.KubeClient.CRDREST. Get(). Namespace(c.opConfig.WatchedNamespace). - Resource(constants.CRDResource). + Resource(constants.PostgresCRDResource). VersionedParams(&options, metav1.ParameterCodec) b, err := req.DoRaw() @@ -117,7 +117,7 @@ func (c *Controller) clusterWatchFunc(options metav1.ListOptions) (watch.Interfa r, err := c.KubeClient.CRDREST. Get(). Namespace(c.opConfig.WatchedNamespace). - Resource(constants.CRDResource). + Resource(constants.PostgresCRDResource). VersionedParams(&options, metav1.ParameterCodec). FieldsSelectorParam(nil). Stream() diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 5e46e93eb..ef1472a2a 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -47,20 +47,20 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 { return c.clusterWorkers[clusterName] } -func (c *Controller) createCRD() error { +func (c *Controller) createOperatorCRD(plural, singular, short string) error { crd := &apiextv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ - Name: constants.CRDResource + "." + constants.CRDGroup, + Name: plural + "." + constants.CRDGroup, }, Spec: apiextv1beta1.CustomResourceDefinitionSpec{ Group: constants.CRDGroup, Version: constants.CRDApiVersion, Names: apiextv1beta1.CustomResourceDefinitionNames{ - Plural: constants.CRDResource, - Singular: constants.CRDKind, - ShortNames: []string{constants.CRDShort}, - Kind: constants.CRDKind, - ListKind: constants.CRDKind + "List", + Plural: plural, + Singular: singular, + ShortNames: []string{short}, + Kind: singular, + ListKind: singular + "List", }, Scope: apiextv1beta1.NamespaceScoped, }, @@ -75,7 +75,7 @@ func (c *Controller) createCRD() error { c.logger.Infof("customResourceDefinition %q has been registered", crd.Name) } - return wait.Poll(c.opConfig.CRD.ReadyWaitInterval, c.opConfig.CRD.ReadyWaitTimeout, func() (bool, error) { + return wait.Poll(c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, func() (bool, error) { c, err := c.KubeClient.CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{}) if err != nil { return false, err @@ -98,6 +98,14 @@ func (c *Controller) createCRD() error { }) } +func (c *Controller) createPostgresCRD() error { + return c.createOperatorCRD(constants.PostgresCRDResource, constants.PostgresCRDKind, constants.PostgresCRDShort) +} + +func (c *Controller) createConfigurationCRD() error { + return c.createOperatorCRD(constants.OperatorConfigCRDResource, constants.OperatorConfigCRDKind, constants.OperatorConfigCRDShort) +} + func readDecodedRole(s string) (*spec.PgUser, error) { var result spec.PgUser if err := yaml.Unmarshal([]byte(s), &result); err != nil { diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go index d59ccd22c..55713632a 100644 --- a/pkg/spec/postgresql.go +++ b/pkg/spec/postgresql.go @@ -150,7 +150,7 @@ var ( ) // Clone makes a deepcopy of the Postgresql structure. The Error field is nulled-out, -// as there is no guaratee that the actual implementation of the error interface +// as there is no guarantee that the actual implementation of the error interface // will not contain any private fields not-reachable to deepcopy. This should be ok, // since Error is never read from a Kubernetes object. func (p *Postgresql) Clone() *Postgresql { @@ -200,7 +200,7 @@ func (m *MaintenanceWindow) MarshalJSON() ([]byte, error) { m.EndTime.Format("15:04"))), nil } -// UnmarshalJSON convets a JSON to the maintenance window definition. +// UnmarshalJSON converts a JSON to the maintenance window definition. func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error { var ( got MaintenanceWindow diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 204d16aa7..32d709811 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -2,6 +2,7 @@ package spec import ( "database/sql" + "encoding/json" "fmt" "io/ioutil" "log" @@ -162,10 +163,12 @@ type ControllerConfig struct { RestConfig *rest.Config `json:"-"` InfrastructureRoles map[string]PgUser - NoDatabaseAccess bool - NoTeamsAPI bool - ConfigMapName NamespacedName - Namespace string + NoDatabaseAccess bool + NoTeamsAPI bool + CRDReadyWaitInterval time.Duration + CRDReadyWaitTimeout time.Duration + ConfigMapName NamespacedName + Namespace string } // cached value for the GetOperatorNamespace @@ -185,6 +188,19 @@ func (n *NamespacedName) Decode(value string) error { return n.DecodeWorker(value, GetOperatorNamespace()) } +func (n *NamespacedName) UnmarshalJSON(data []byte) error { + result := NamespacedName{} + var tmp string + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + if err := result.Decode(tmp); err != nil { + return err + } + *n = result + return nil +} + // DecodeWorker separates the decode logic to (unit) test // from obtaining the operator namespace that depends on k8s mounting files at runtime func (n *NamespacedName) DecodeWorker(value, operatorNamespace string) error { @@ -235,3 +251,31 @@ func GetOperatorNamespace() string { } return operatorNamespace } + +type Duration time.Duration + +func (d *Duration) UnmarshalJSON(b []byte) error { + var ( + v interface{} + err error + ) + if err = json.Unmarshal(b, &v); err != nil { + return err + } + switch val := v.(type) { + case string: + t, err := time.ParseDuration(val) + if err != nil { + return err + } + *d = Duration(t) + return nil + case float64: + t := time.Duration(val) + *d = Duration(t) + return nil + default: + return fmt.Errorf("could not recognize type %T as a valid type to unmarshal to Duration", val) + } + return nil +} diff --git a/pkg/util/config/crd_config.go b/pkg/util/config/crd_config.go new file mode 100644 index 000000000..e268c41f6 --- /dev/null +++ b/pkg/util/config/crd_config.go @@ -0,0 +1,160 @@ +package config + +import ( + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/zalando-incubator/postgres-operator/pkg/spec" +) + +type OperatorConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Configuration OperatorConfigurationData `json:"configuration"` + Error error `json:"-"` +} + +type OperatorConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []OperatorConfiguration `json:"items"` +} + +type PostgresUsersConfiguration struct { + SuperUsername string `json:"super_username,omitempty"` + ReplicationUsername string `json:"replication_username,omitempty"` +} + +type KubernetesMetaConfiguration struct { + PodServiceAccountName string `json:"pod_service_account_name,omitempty"` + // TODO: change it to the proper json + PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` + PodTerminateGracePeriod spec.Duration `json:"pod_terminate_grace_period,omitempty"` + WatchedNamespace string `json:"watched_namespace,omitempty"` + PDBNameFormat stringTemplate `json:"pdb_name_format,omitempty"` + SecretNameTemplate stringTemplate `json:"secret_name_template,omitempty"` + OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` + InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` + PodRoleLabel string `json:"pod_role_label,omitempty"` + ClusterLabels map[string]string `json:"cluster_labels,omitempty"` + ClusterNameLabel string `json:"cluster_name_label,omitempty"` + NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` + // TODO: use a proper toleration structure? + PodToleration map[string]string `json:"toleration,omitempty"` + // TODO: use namespacedname + PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"` +} + +type PostgresPodResourcesDefaults struct { + DefaultCPURequest string `json:"default_cpu_request,omitempty"` + DefaultMemoryRequest string `json:"default_memory_request,omitempty"` + DefaultCPULimit string `json:"default_cpu_limit,omitempty"` + DefaultMemoryLimit string `json:"default_memory_limit,omitempty"` +} + +type OperatorTimeouts struct { + ResourceCheckInterval spec.Duration `json:"resource_check_interval,omitempty"` + ResourceCheckTimeout spec.Duration `json:"resource_check_timeout,omitempty"` + PodLabelWaitTimeout spec.Duration `json:"pod_label_wait_timeout,omitempty"` + PodDeletionWaitTimeout spec.Duration `json:"pod_deletion_wait_timeout,omitempty"` + ReadyWaitInterval spec.Duration `json:"ready_wait_interval,omitempty"` + ReadyWaitTimeout spec.Duration `json:"ready_wait_timeout,omitempty"` +} + +type LoadBalancerConfiguration struct { + DbHostedZone string `json:"db_hosted_zone,omitempty"` + EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"` + EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"` + MasterDNSNameFormat stringTemplate `json:"master_dns_name_format,omitempty"` + ReplicaDNSNameFormat stringTemplate `json:"replica_dns_name_format,omitempty"` +} + +type AWSGCPConfiguration struct { + WALES3Bucket string `json:"wal_s3_bucket,omitempty"` + AWSRegion string `json:"aws_region,omitempty"` + LogS3Bucket string `json:"log_s3_bucket,omitempty"` + KubeIAMRole string `json:"kube_iam_role,omitempty"` +} + +type OperatorDebugConfiguration struct { + DebugLogging bool `json:"debug_logging,omitempty"` + EnableDBAccess bool `json:"enable_database_access,omitempty"` +} + +type TeamsAPIConfiguration struct { + EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` + TeamsAPIUrl string `json:"teams_api_url,omitempty"` + TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` + EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` + TeamAdminRole string `json:"team_admin_role,omitempty"` + PamRoleName string `json:"pam_role_name,omitempty"` + PamConfiguration string `json:"pam_configuration,omitempty"` + ProtectedRoles []string `json:"protected_role_names,omitempty"` +} + +type LoggingRESTAPIConfiguration struct { + APIPort int `json:"api_port,omitempty"` + RingLogLines int `json:"ring_log_lines,omitempty"` + ClusterHistoryEntries int `json:"cluster_history_entries,omitempty"` +} + +type ScalyrConfiguration struct { + ScalyrAPIKey string `json:"scalyr_api_key,omitempty"` + ScalyrImage string `json:"scalyr_image,omitempty"` + ScalyrServerURL string `json:"scalyr_server_url,omitempty"` + ScalyrCPURequest string `json:"scalyr_cpu_request,omitempty"` + ScalyrMemoryRequest string `json:"scalyr_memory_request,omitempty"` + ScalyrCPULimit string `json:"scalyr_cpu_limit,omitempty"` + ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"` +} + +type OperatorConfigurationData struct { + EtcdHost string `json:"etcd_host,omitempty"` + DockerImage string `json:"docker_image,omitempty"` + Workers uint32 `json:"workers,omitempty"` + MinInstances int32 `json:"min_instances,omitempty"` + MaxInstances int32 `json:"max_instances,omitempty"` + ResyncPeriod spec.Duration `json:"resync_period,omitempty"` + Sidecars map[string]string `json:"sidecar_docker_images,omitempty"` + PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` + Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` + PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"` + Timeouts OperatorTimeouts `json:"timeouts"` + LoadBalancer LoadBalancerConfiguration `json:"load_balancer"` + AWSGCP AWSGCPConfiguration `json:"aws_or_gcp"` + OperatorDebug OperatorDebugConfiguration `json:"debug"` + TeamsAPI TeamsAPIConfiguration `json:"teams_api"` + LoggingRESTAPI LoggingRESTAPIConfiguration `json:"logging_rest_api"` + Scalyr ScalyrConfiguration `json:"scalyr"` +} + +type OperatorConfigurationUsers struct { + SuperUserName string `json:"superuser_name,omitempty"` + Replication string `json:"replication_user_name,omitempty"` + ProtectedRoles []string `json:"protected_roles,omitempty"` + TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` +} + +type OperatorConfigurationCopy OperatorConfiguration +type OperatorConfigurationListCopy OperatorConfigurationList + +func (opc *OperatorConfiguration) UnmarshalJSON(data []byte) error { + var ref OperatorConfigurationCopy + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + *opc = OperatorConfiguration(ref) + return nil +} + +func (opcl *OperatorConfigurationList) UnmarshalJSON(data []byte) error { + var ref OperatorConfigurationListCopy + if err := json.Unmarshal(data, &ref); err != nil { + return nil + } + *opcl = OperatorConfigurationList(ref) + return nil +} diff --git a/pkg/util/constants/crd.go b/pkg/util/constants/crd.go index 94db5033a..113264f01 100644 --- a/pkg/util/constants/crd.go +++ b/pkg/util/constants/crd.go @@ -2,9 +2,12 @@ package constants // Different properties of the PostgreSQL Custom Resource Definition const ( - CRDKind = "postgresql" - CRDResource = "postgresqls" - CRDShort = "pg" - CRDGroup = "acid.zalan.do" - CRDApiVersion = "v1" + PostgresCRDKind = "postgresql" + PostgresCRDResource = "postgresqls" + PostgresCRDShort = "pg" + CRDGroup = "acid.zalan.do" + CRDApiVersion = "v1" + OperatorConfigCRDKind = "postgresql-operator-configuration" + OperatorConfigCRDResource = "postgresql-operator-configurations" + OperatorConfigCRDShort = "pgopconfig" ) From 417f13c0bdda2fb67c1f5ce4c5cb7640e33ecadb Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Thu, 19 Jul 2018 16:40:40 +0200 Subject: [PATCH 07/30] Submit RBAC credentials during initial Event processing (#344) * During initial Event processing submit the service account for pods and bind it to a cluster role that allows Patroni to successfully start. The cluster role is assumed to be created by the k8s cluster administrator. --- docs/administrator.md | 4 +- docs/reference/operator_parameters.md | 6 +- manifests/operator-service-account-rbac.yaml | 15 ++++ pkg/cluster/cluster.go | 48 ++---------- pkg/controller/controller.go | 54 ++++++++++++- pkg/controller/operator_config.go | 4 +- pkg/controller/postgresql.go | 81 ++++++++++++++++++++ pkg/util/config/config.go | 27 +++---- pkg/util/config/crd_config.go | 23 +++--- pkg/util/k8sutil/k8sutil.go | 3 + 10 files changed, 193 insertions(+), 72 deletions(-) diff --git a/docs/administrator.md b/docs/administrator.md index bb775ed02..5fbae8fe4 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -90,13 +90,13 @@ namespace. The operator performs **no** further syncing of this account. ## Role-based access control for the operator -The `manifests/operator-rbac.yaml` defines cluster roles and bindings needed +The `manifests/operator-service-account-rbac.yaml` defines cluster roles and bindings needed for the operator to function under access control restrictions. To deploy the operator with this RBAC policy use: ```bash $ kubectl create -f manifests/configmap.yaml - $ kubectl create -f manifests/operator-rbac.yaml + $ kubectl create -f manifests/operator-service-account-rbac.yaml $ kubectl create -f manifests/postgres-operator.yaml $ kubectl create -f manifests/minimal-postgres-manifest.yaml ``` diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 331a77dbd..fd8e797b3 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -110,8 +110,10 @@ configuration they are grouped under the `kubernetes` key. * **pod_service_account_definition** The operator tries to create the pod Service Account in the namespace that doesn't define such an account using the YAML definition provided by this - option. If not defined, a simple definition that contains only the name will - be used. The default is empty. + option. If not defined, a simple definition that contains only the name will be used. The default is empty. + +* **pod_service_account_role_binding_definition** + This definition must bind pod service account to a role with permission sufficient for the pods to start and for Patroni to access k8s endpoints; service account on its own lacks any such rights starting with k8s v1.8. If not excplicitly defined by the user, a simple definition that binds the account to the operator's own 'zalando-postgres-operator' cluster role will be used. The default is empty. * **pod_terminate_grace_period** Patroni pods are [terminated diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index 2be3cc4d2..8a1bfb857 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -123,6 +123,21 @@ rules: verbs: - get - create +- apiGroups: + - "rbac.authorization.k8s.io" + resources: + - rolebindings + verbs: + - get + - create +- apiGroups: + - "rbac.authorization.k8s.io" + resources: + - clusterroles + verbs: + - bind + resourceNames: + - zalando-postgres-operator --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 1f97aae0d..f1979ab8a 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -28,6 +28,7 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/util/patroni" "github.com/zalando-incubator/postgres-operator/pkg/util/teams" "github.com/zalando-incubator/postgres-operator/pkg/util/users" + rbacv1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" ) var ( @@ -39,10 +40,11 @@ var ( // Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication. type Config struct { - OpConfig config.Config - RestConfig *rest.Config - InfrastructureRoles map[string]spec.PgUser // inherited from the controller - PodServiceAccount *v1.ServiceAccount + OpConfig config.Config + RestConfig *rest.Config + InfrastructureRoles map[string]spec.PgUser // inherited from the controller + PodServiceAccount *v1.ServiceAccount + PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding } type kubeResources struct { @@ -199,39 +201,6 @@ func (c *Cluster) initUsers() error { return nil } -/* - Ensures the service account required by StatefulSets to create pods exists in a namespace before a PG cluster is created there so that a user does not have to deploy the account manually. - - The operator does not sync these accounts after creation. -*/ -func (c *Cluster) createPodServiceAccounts() error { - - podServiceAccountName := c.Config.OpConfig.PodServiceAccountName - _, err := c.KubeClient.ServiceAccounts(c.Namespace).Get(podServiceAccountName, metav1.GetOptions{}) - - if err != nil { - - c.setProcessName(fmt.Sprintf("creating pod service account in the namespace %v", c.Namespace)) - - c.logger.Infof("the pod service account %q cannot be retrieved in the namespace %q. Trying to deploy the account.", podServiceAccountName, c.Namespace) - - // get a separate copy of service account - // to prevent a race condition when setting a namespace for many clusters - sa := *c.PodServiceAccount - _, err = c.KubeClient.ServiceAccounts(c.Namespace).Create(&sa) - if err != nil { - return fmt.Errorf("cannot deploy the pod service account %q defined in the config map to the %q namespace: %v", podServiceAccountName, c.Namespace, err) - } - - c.logger.Infof("successfully deployed the pod service account %q to the %q namespace", podServiceAccountName, c.Namespace) - - } else { - c.logger.Infof("successfully found the service account %q used to create pods to the namespace %q", podServiceAccountName, c.Namespace) - } - - return nil -} - // Create creates the new kubernetes objects associated with the cluster. func (c *Cluster) Create() error { c.mu.Lock() @@ -298,11 +267,6 @@ func (c *Cluster) Create() error { } c.logger.Infof("pod disruption budget %q has been successfully created", util.NameFromMeta(pdb.ObjectMeta)) - if err = c.createPodServiceAccounts(); err != nil { - return fmt.Errorf("could not create pod service account %v : %v", c.OpConfig.PodServiceAccountName, err) - } - c.logger.Infof("pod service accounts have been successfully synced") - if c.Statefulset != nil { return fmt.Errorf("statefulset already exists in the cluster") } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index d02d5ea8a..7d1a6ed2f 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/pkg/api/v1" + rbacv1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" "k8s.io/client-go/tools/cache" "github.com/zalando-incubator/postgres-operator/pkg/apiserver" @@ -52,7 +53,9 @@ type Controller struct { workerLogs map[uint32]ringlog.RingLogger - PodServiceAccount *v1.ServiceAccount + PodServiceAccount *v1.ServiceAccount + PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding + namespacesWithDefinedRBAC sync.Map } // NewController creates a new controller @@ -162,6 +165,53 @@ func (c *Controller) initPodServiceAccount() { // actual service accounts are deployed at the time of Postgres/Spilo cluster creation } +func (c *Controller) initRoleBinding() { + + // service account on its own lacks any rights starting with k8s v1.8 + // operator binds it to the cluster role with sufficient priviliges + // we assume the role is created by the k8s administrator + if c.opConfig.PodServiceAccountRoleBindingDefinition == "" { + c.opConfig.PodServiceAccountRoleBindingDefinition = ` + { + "apiVersion": "rbac.authorization.k8s.io/v1beta1", + "kind": "RoleBinding", + "metadata": { + "name": "zalando-postgres-operator" + }, + "roleRef": { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "name": "zalando-postgres-operator" + }, + "subjects": [ + { + "kind": "ServiceAccount", + "name": "operator" + } + ] + }` + } + c.logger.Info("Parse role bindings") + // re-uses k8s internal parsing. See k8s client-go issue #193 for explanation + decode := scheme.Codecs.UniversalDeserializer().Decode + obj, groupVersionKind, err := decode([]byte(c.opConfig.PodServiceAccountRoleBindingDefinition), nil, nil) + + switch { + case err != nil: + panic(fmt.Errorf("Unable to parse the definiton of the role binding for the pod service account definiton from the operator config map: %v", err)) + case groupVersionKind.Kind != "RoleBinding": + panic(fmt.Errorf("role binding definiton in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) + default: + c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding) + c.PodServiceAccountRoleBinding.Namespace = "" + c.PodServiceAccountRoleBinding.Subjects[0].Name = c.PodServiceAccount.Name + c.logger.Info("successfully parsed") + + } + + // actual roles bindings are deployed at the time of Postgres/Spilo cluster creation +} + func (c *Controller) initController() { c.initClients() @@ -176,6 +226,8 @@ func (c *Controller) initController() { } } else { c.initOperatorConfig() + c.initPodServiceAccount() + c.initRoleBinding() } c.modifyConfigFromEnvironment() diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 11ad32959..fb448105b 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -4,10 +4,11 @@ import ( "encoding/json" "fmt" + "time" + "github.com/zalando-incubator/postgres-operator/pkg/util/config" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "time" ) func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*config.OperatorConfiguration, error) { @@ -49,6 +50,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *config.OperatorConfigur result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition + result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod) result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index bf7fe8889..9f42075ed 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -20,6 +20,7 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" + "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" "github.com/zalando-incubator/postgres-operator/pkg/util/ringlog" ) @@ -179,6 +180,11 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { c.warnOnDeprecatedPostgreSQLSpecParameters(&event.NewSpec.Spec) c.mergeDeprecatedPostgreSQLSpecParameters(&event.NewSpec.Spec) } + + if err := c.submitRBACCredentials(event); err != nil { + c.logger.Warnf("Pods and/or Patroni may misfunction due to the lack of permissions: %v", err) + } + } switch event.EventType { @@ -457,3 +463,78 @@ func (c *Controller) postgresqlDelete(obj interface{}) { c.queueClusterEvent(pg, nil, spec.EventDelete) } + +/* + Ensures the pod service account and role bindings exists in a namespace before a PG cluster is created there so that a user does not have to deploy these credentials manually. + StatefulSets require the service account to create pods; Patroni requires relevant RBAC bindings to access endpoints. + + The operator does not sync accounts/role bindings after creation. +*/ +func (c *Controller) submitRBACCredentials(event spec.ClusterEvent) error { + + namespace := event.NewSpec.GetNamespace() + if _, ok := c.namespacesWithDefinedRBAC.Load(namespace); ok { + return nil + } + + if err := c.createPodServiceAccount(namespace); err != nil { + return fmt.Errorf("could not create pod service account %v : %v", c.opConfig.PodServiceAccountName, err) + } + + if err := c.createRoleBindings(namespace); err != nil { + return fmt.Errorf("could not create role binding %v : %v", c.PodServiceAccountRoleBinding.Name, err) + } + + c.namespacesWithDefinedRBAC.Store(namespace, true) + return nil +} + +func (c *Controller) createPodServiceAccount(namespace string) error { + + podServiceAccountName := c.opConfig.PodServiceAccountName + _, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{}) + if k8sutil.ResourceNotFound(err) { + + c.logger.Infof(fmt.Sprintf("creating pod service account in the namespace %v", namespace)) + + // get a separate copy of service account + // to prevent a race condition when setting a namespace for many clusters + sa := *c.PodServiceAccount + if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil { + return fmt.Errorf("cannot deploy the pod service account %v defined in the config map to the %v namespace: %v", podServiceAccountName, namespace, err) + } + + c.logger.Infof("successfully deployed the pod service account %v to the %v namespace", podServiceAccountName, namespace) + } else if k8sutil.ResourceAlreadyExists(err) { + return nil + } + + return err +} + +func (c *Controller) createRoleBindings(namespace string) error { + + podServiceAccountName := c.opConfig.PodServiceAccountName + podServiceAccountRoleBindingName := c.PodServiceAccountRoleBinding.Name + + _, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{}) + if k8sutil.ResourceNotFound(err) { + + c.logger.Infof("Creating the role binding %v in the namespace %v", podServiceAccountRoleBindingName, namespace) + + // get a separate copy of role binding + // to prevent a race condition when setting a namespace for many clusters + rb := *c.PodServiceAccountRoleBinding + _, err = c.KubeClient.RoleBindings(namespace).Create(&rb) + if err != nil { + return fmt.Errorf("cannot bind the pod service account %q defined in the config map to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err) + } + + c.logger.Infof("successfully deployed the role binding for the pod service account %q to the %q namespace", podServiceAccountName, namespace) + + } else if k8sutil.ResourceAlreadyExists(err) { + return nil + } + + return err +} diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 26b4d378b..e9017bfab 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -75,19 +75,20 @@ type Config struct { // default name `operator` enables backward compatibility with the older ServiceAccountName field PodServiceAccountName string `name:"pod_service_account_name" default:"operator"` // value of this string must be valid JSON or YAML; see initPodServiceAccount - PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` - DbHostedZone string `name:"db_hosted_zone" default:"db.example.com"` - AWSRegion string `name:"aws_region" default:"eu-central-1"` - WALES3Bucket string `name:"wal_s3_bucket"` - LogS3Bucket string `name:"log_s3_bucket"` - KubeIAMRole string `name:"kube_iam_role"` - DebugLogging bool `name:"debug_logging" default:"true"` - EnableDBAccess bool `name:"enable_database_access" default:"true"` - EnableTeamsAPI bool `name:"enable_teams_api" default:"true"` - EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"` - TeamAdminRole string `name:"team_admin_role" default:"admin"` - EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` - EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` + PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` + PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""` + DbHostedZone string `name:"db_hosted_zone" default:"db.example.com"` + AWSRegion string `name:"aws_region" default:"eu-central-1"` + WALES3Bucket string `name:"wal_s3_bucket"` + LogS3Bucket string `name:"log_s3_bucket"` + KubeIAMRole string `name:"kube_iam_role"` + DebugLogging bool `name:"debug_logging" default:"true"` + EnableDBAccess bool `name:"enable_database_access" default:"true"` + EnableTeamsAPI bool `name:"enable_teams_api" default:"true"` + EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"` + TeamAdminRole string `name:"team_admin_role" default:"admin"` + EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` + EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` // deprecated and kept for backward compatibility EnableLoadBalancer *bool `name:"enable_load_balancer"` MasterDNSNameFormat stringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` diff --git a/pkg/util/config/crd_config.go b/pkg/util/config/crd_config.go index e268c41f6..ee3c4b712 100644 --- a/pkg/util/config/crd_config.go +++ b/pkg/util/config/crd_config.go @@ -31,17 +31,18 @@ type PostgresUsersConfiguration struct { type KubernetesMetaConfiguration struct { PodServiceAccountName string `json:"pod_service_account_name,omitempty"` // TODO: change it to the proper json - PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` - PodTerminateGracePeriod spec.Duration `json:"pod_terminate_grace_period,omitempty"` - WatchedNamespace string `json:"watched_namespace,omitempty"` - PDBNameFormat stringTemplate `json:"pdb_name_format,omitempty"` - SecretNameTemplate stringTemplate `json:"secret_name_template,omitempty"` - OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` - InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` - PodRoleLabel string `json:"pod_role_label,omitempty"` - ClusterLabels map[string]string `json:"cluster_labels,omitempty"` - ClusterNameLabel string `json:"cluster_name_label,omitempty"` - NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` + PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` + PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` + PodTerminateGracePeriod spec.Duration `json:"pod_terminate_grace_period,omitempty"` + WatchedNamespace string `json:"watched_namespace,omitempty"` + PDBNameFormat stringTemplate `json:"pdb_name_format,omitempty"` + SecretNameTemplate stringTemplate `json:"secret_name_template,omitempty"` + OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` + InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` + PodRoleLabel string `json:"pod_role_label,omitempty"` + ClusterLabels map[string]string `json:"cluster_labels,omitempty"` + ClusterNameLabel string `json:"cluster_name_label,omitempty"` + NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` // TODO: use a proper toleration structure? PodToleration map[string]string `json:"toleration,omitempty"` // TODO: use namespacedname diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 142d4f822..dd96aa5a7 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -13,6 +13,7 @@ import ( "k8s.io/client-go/kubernetes/typed/apps/v1beta1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" "k8s.io/client-go/pkg/api" "k8s.io/client-go/pkg/api/v1" policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" @@ -35,6 +36,7 @@ type KubernetesClient struct { v1core.NamespacesGetter v1core.ServiceAccountsGetter v1beta1.StatefulSetsGetter + rbacv1beta1.RoleBindingsGetter policyv1beta1.PodDisruptionBudgetsGetter apiextbeta1.CustomResourceDefinitionsGetter @@ -83,6 +85,7 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { kubeClient.StatefulSetsGetter = client.AppsV1beta1() kubeClient.PodDisruptionBudgetsGetter = client.PolicyV1beta1() kubeClient.RESTClient = client.CoreV1().RESTClient() + kubeClient.RoleBindingsGetter = client.RbacV1beta1() cfg2 := *cfg cfg2.GroupVersion = &schema.GroupVersion{ From accbe20804b4e193d02ea2adbb8279acd93cebbf Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Thu, 19 Jul 2018 18:22:30 +0200 Subject: [PATCH 08/30] Upgrade version to enable RBAC in multiple namespace (#348) --- manifests/postgres-operator.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index bea8efeaa..d361c0b88 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -12,7 +12,7 @@ spec: serviceAccountName: zalando-postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:1352c4a + image: registry.opensource.zalan.do/acid/postgres-operator:417f13c imagePullPolicy: IfNotPresent env: # provided additional ENV vars can overwrite individual config map entries From 12871aad1ab641fbb7adafe2328b4acb7117a878 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Fri, 20 Jul 2018 14:12:25 +0200 Subject: [PATCH 09/30] Avoid showing an extra error when resizing volume fails (#350) Do not show 'persistent volumes are not compatible' errors for the volumes that failed to be resized because of the other reasons (i.e. the new size is smaller than the existing one). --- pkg/cluster/volumes.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index 6f539f4ee..2b7537071 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -91,7 +91,8 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.VolumeResizer) error { c.setProcessName("resizing volumes") - totalCompatible := 0 + var totalIncompatible int + newQuantity, err := resource.ParseQuantity(newVolume.Size) if err != nil { return fmt.Errorf("could not parse volume size: %v", err) @@ -100,7 +101,6 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume if err != nil { return fmt.Errorf("could not list persistent volumes: %v", err) } - for _, pv := range pvs { volumeSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage]) if volumeSize >= newSize { @@ -109,11 +109,12 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume } continue } + compatible := false for _, resizer := range resizers { if !resizer.VolumeBelongsToProvider(pv) { continue } - totalCompatible++ + compatible = true if !resizer.IsConnectedToProvider() { err := resizer.ConnectToProvider() if err != nil { @@ -146,9 +147,13 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume } c.logger.Debugf("successfully updated persistent volume %q", pv.Name) } + if !compatible { + c.logger.Warningf("volume %q is incompatible with all available resizing providers", pv.Name) + totalIncompatible++ + } } - if len(pvs) > 0 && totalCompatible == 0 { - return fmt.Errorf("could not resize EBS volumes: persistent volumes are not compatible with existing resizing providers") + if totalIncompatible > 0 { + return fmt.Errorf("could not resize EBS volumes: some persistent volumes are not compatible with existing resizing providers") } return nil } From 1a0e5357dca5b00571637b1db487e909a1b49ad4 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Tue, 24 Jul 2018 11:16:24 +0200 Subject: [PATCH 10/30] Improve generation of Scalyr container environment. (#346) * Improve generting of Scalyr container environment. Avoid duplicating POD_NAME and POD_NAMESPACE that already bundled every sidecar. Do not complain on the lack of SCLALYR_SERVER_HOST, since it is set to https://upload.eu.scalyr.com in the container we use. Do not mentioned SCALYR_SERVER_HOST in the error messages, since it is derived from the cluster name automatically. --- pkg/cluster/k8sres.go | 35 ++++++++--------------------------- 1 file changed, 8 insertions(+), 27 deletions(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 162f2ef45..35b6d471e 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -744,35 +744,16 @@ func getEffectiveDockerImage(globalDockerImage, clusterDockerImage string) strin func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string, containerResources *spec.Resources, logger *logrus.Entry) *spec.Sidecar { - if APIKey == "" || serverURL == "" || dockerImage == "" { - if APIKey != "" || serverURL != "" || dockerImage != "" { - logger.Warningf("Incomplete configuration for the Scalyr sidecar: " + - "all of SCALYR_API_KEY, SCALYR_SERVER_HOST and SCALYR_SERVER_URL must be defined") + if APIKey == "" || dockerImage == "" { + if APIKey == "" && dockerImage != "" { + logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined") } return nil } - return &spec.Sidecar{ + scalarSpec := &spec.Sidecar{ Name: "scalyr-sidecar", DockerImage: dockerImage, Env: []v1.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.name", - }, - }, - }, - { - Name: "POD_NAMESPACE", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.namespace", - }, - }, - }, { Name: "SCALYR_API_KEY", Value: APIKey, @@ -781,13 +762,13 @@ func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage strin Name: "SCALYR_SERVER_HOST", Value: clusterName, }, - { - Name: "SCALYR_SERVER_URL", - Value: serverURL, - }, }, Resources: *containerResources, } + if serverURL != "" { + scalarSpec.Env = append(scalarSpec.Env, v1.EnvVar{Name: "SCALYR_SERVER_URL", Value: serverURL}) + } + return scalarSpec } // mergeSidecar merges globally-defined sidecars with those defined in the cluster manifest From 0181a1b5b1e0f454ccb05b15258171c4811b0da8 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Tue, 24 Jul 2018 11:21:45 +0200 Subject: [PATCH 11/30] Introduce a repair scan to fix failing clusters (#304) A repair is a sync scan that acts only on those clusters that indicate that the last add, update or sync operation on them has failed. It is supposed to kick in more frequently than the repair scan. The repair scan still remains to be useful to fix the consequences of external actions (i.e. someone deletes a postgres-related service by mistake) unbeknownst to the operator. The repair scan is controlled by the new repair_period parameter in the operator configuration. It has to be at least 2 times more frequent than a sync scan to have any effect (a normal sync scan will update both last synced and last repaired attributes of the controller, since repair is just a sync underneath). A repair scan could be queued for a cluster that is already being synced if the sync period exceeds the interval between repairs. In that case a repair event will be discarded once the corresponding worker finds out that the cluster is not failing anymore. Review by @zerg-junior --- docs/administrator.md | 9 +++ docs/reference/operator_parameters.md | 5 +- ...gresql-operator-default-configuration.yaml | 4 +- pkg/cluster/cluster.go | 7 ++ pkg/controller/controller.go | 5 +- pkg/controller/operator_config.go | 1 + pkg/controller/postgresql.go | 70 +++++++++++++++---- pkg/spec/postgresql.go | 10 +++ pkg/spec/types.go | 1 + pkg/util/config/config.go | 3 +- pkg/util/config/crd_config.go | 1 + 11 files changed, 97 insertions(+), 19 deletions(-) diff --git a/docs/administrator.md b/docs/administrator.md index 5fbae8fe4..a7dff68ef 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -199,3 +199,12 @@ cluster manifest. In the case any of these variables are omitted from the manifest, the operator configmap's settings `enable_master_load_balancer` and `enable_replica_load_balancer` apply. Note that the operator settings affect all Postgresql services running in a namespace watched by the operator. + +## Running periodic 'autorepair' scans of Kubernetes objects + +The Postgres operator periodically scans all Kubernetes objects belonging to +each cluster and repairs all discrepancies between them and the definitions +generated from the current cluster manifest. There are two types of scans: a +`sync scan`, running every `resync_period` seconds for every cluster, and the +`repair scan`, coming every `repair_period` only for those clusters that didn't +report success as a result of the last operation applied to them. diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index fd8e797b3..dce1df4cc 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -80,7 +80,10 @@ Those are top-level keys, containing both leaf keys and groups. are applied. The default is `-1`. * **resync_period** - period between consecutive sync requests. The default is `5m`. + period between consecutive sync requests. The default is `30m`. + +* **repair_period** + period between consecutive repair requests. The default is `5m`. ## Postgres users diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 7dcf75091..05fa935e9 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -8,7 +8,9 @@ configuration: workers: 4 min_instances: -1 max_instances: -1 - resync_period: 5m + resync_period: 30m + repair_period: 5m + #sidecar_docker_images: # example: "exampleimage:exampletag" users: diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index f1979ab8a..2548ebc22 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -630,6 +630,13 @@ func (c *Cluster) Delete() { } } +func (c *Cluster) NeedsRepair() (bool, spec.PostgresStatus) { + c.specMu.RLock() + defer c.specMu.RUnlock() + return !c.Status.Success(), c.Status + +} + // ReceivePodEvent is called back by the controller in order to add the cluster's pod event to the queue. func (c *Cluster) ReceivePodEvent(event spec.PodEvent) { if err := c.podEventsQueue.Add(event); err != nil { diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 7d1a6ed2f..6615be677 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -48,8 +48,9 @@ type Controller struct { nodesInformer cache.SharedIndexInformer podCh chan spec.PodEvent - clusterEventQueues []*cache.FIFO // [workerID]Queue - lastClusterSyncTime int64 + clusterEventQueues []*cache.FIFO // [workerID]Queue + lastClusterSyncTime int64 + lastClusterRepairTime int64 workerLogs map[uint32]ringlog.RingLogger diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index fb448105b..1b7318d1e 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -43,6 +43,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *config.OperatorConfigur result.MinInstances = fromCRD.MinInstances result.MaxInstances = fromCRD.MaxInstances result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod) + result.RepairPeriod = time.Duration(fromCRD.RepairPeriod) result.Sidecars = fromCRD.Sidecars result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index 9f42075ed..c037260ab 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -42,9 +42,14 @@ func (c *Controller) clusterResync(stopCh <-chan struct{}, wg *sync.WaitGroup) { // TODO: make a separate function to be called from InitSharedInformers // clusterListFunc obtains a list of all PostgreSQL clusters and runs sync when necessary +// NB: as this function is called directly by the informer, it needs to avoid acquiring locks +// on individual cluster structures. Therefore, it acts on the manifests obtained from Kubernetes +// and not on the internal state of the clusters. func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object, error) { - var list spec.PostgresqlList - var activeClustersCnt, failedClustersCnt int + var ( + list spec.PostgresqlList + event spec.EventType + ) req := c.KubeClient.CRDREST. Get(). @@ -61,19 +66,41 @@ func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object c.logger.Warningf("could not unmarshal list of clusters: %v", err) } - timeFromPreviousSync := time.Now().Unix() - atomic.LoadInt64(&c.lastClusterSyncTime) - if timeFromPreviousSync < int64(c.opConfig.ResyncPeriod.Seconds()) { - c.logger.Infof("not running SYNC, previous sync happened %d seconds ago", timeFromPreviousSync) - return &list, err + currentTime := time.Now().Unix() + timeFromPreviousSync := currentTime - atomic.LoadInt64(&c.lastClusterSyncTime) + timeFromPreviousRepair := currentTime - atomic.LoadInt64(&c.lastClusterRepairTime) + if timeFromPreviousSync >= int64(c.opConfig.ResyncPeriod.Seconds()) { + event = spec.EventSync + } else if timeFromPreviousRepair >= int64(c.opConfig.RepairPeriod.Seconds()) { + event = spec.EventRepair } + if event != "" { + c.queueEvents(&list, event) + } else { + c.logger.Infof("not enough time passed since the last sync (%s seconds) or repair (%s seconds)", + timeFromPreviousSync, timeFromPreviousRepair) + } + return &list, err +} +// queueEvents queues a sync or repair event for every cluster with a valid manifest +func (c *Controller) queueEvents(list *spec.PostgresqlList, event spec.EventType) { + var activeClustersCnt, failedClustersCnt, clustersToRepair int for i, pg := range list.Items { if pg.Error != nil { failedClustersCnt++ continue } - c.queueClusterEvent(nil, &list.Items[i], spec.EventSync) activeClustersCnt++ + // check if that cluster needs repair + if event == spec.EventRepair { + if pg.Status.Success() { + continue + } else { + clustersToRepair++ + } + } + c.queueClusterEvent(nil, &list.Items[i], event) } if len(list.Items) > 0 { if failedClustersCnt > 0 && activeClustersCnt == 0 { @@ -83,13 +110,18 @@ func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object } else { c.logger.Infof("there are %d clusters running and %d are in the failed state", activeClustersCnt, failedClustersCnt) } + if clustersToRepair > 0 { + c.logger.Infof("%d clusters are scheduled for a repair scan", clustersToRepair) + } } else { c.logger.Infof("no clusters running") } - - atomic.StoreInt64(&c.lastClusterSyncTime, time.Now().Unix()) - - return &list, err + if event == spec.EventRepair || event == spec.EventSync { + atomic.StoreInt64(&c.lastClusterRepairTime, time.Now().Unix()) + if event == spec.EventSync { + atomic.StoreInt64(&c.lastClusterSyncTime, time.Now().Unix()) + } + } } type crdDecoder struct { @@ -155,7 +187,7 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { lg := c.logger.WithField("worker", event.WorkerID) - if event.EventType == spec.EventAdd || event.EventType == spec.EventSync { + if event.EventType == spec.EventAdd || event.EventType == spec.EventSync || event.EventType == spec.EventRepair { clusterName = util.NameFromMeta(event.NewSpec.ObjectMeta) } else { clusterName = util.NameFromMeta(event.OldSpec.ObjectMeta) @@ -171,6 +203,16 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { defer c.curWorkerCluster.Store(event.WorkerID, nil) + if event.EventType == spec.EventRepair { + runRepair, lastOperationStatus := cl.NeedsRepair() + if !runRepair { + lg.Debugf("Observed cluster status %s, repair is not required", lastOperationStatus) + return + } + lg.Debugf("Observed cluster status %s, running sync scan to repair the cluster", lastOperationStatus) + event.EventType = spec.EventSync + } + if event.EventType == spec.EventAdd || event.EventType == spec.EventUpdate || event.EventType == spec.EventSync { // handle deprecated parameters by possibly assigning their values to the new ones. if event.OldSpec != nil { @@ -406,8 +448,8 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po if eventType != spec.EventDelete { return } - - for _, evType := range []spec.EventType{spec.EventAdd, spec.EventSync, spec.EventUpdate} { + // A delete event discards all prior requests for that cluster. + for _, evType := range []spec.EventType{spec.EventAdd, spec.EventSync, spec.EventUpdate, spec.EventRepair} { obj, exists, err := c.clusterEventQueues[workerID].GetByKey(queueClusterKey(evType, uid)) if err != nil { lg.Warningf("could not get event from the queue: %v", err) diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go index 55713632a..4b973e503 100644 --- a/pkg/spec/postgresql.go +++ b/pkg/spec/postgresql.go @@ -335,3 +335,13 @@ func (pl *PostgresqlList) UnmarshalJSON(data []byte) error { return nil } + +func (status PostgresStatus) Success() bool { + return status != ClusterStatusAddFailed && + status != ClusterStatusUpdateFailed && + status != ClusterStatusSyncFailed +} + +func (status PostgresStatus) String() string { + return string(status) +} diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 32d709811..98490e1b0 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -30,6 +30,7 @@ const ( EventUpdate EventType = "UPDATE" EventDelete EventType = "DELETE" EventSync EventType = "SYNC" + EventRepair EventType = "REPAIR" fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" ) diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index e9017bfab..683f98a17 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -14,7 +14,8 @@ import ( type CRD struct { ReadyWaitInterval time.Duration `name:"ready_wait_interval" default:"4s"` ReadyWaitTimeout time.Duration `name:"ready_wait_timeout" default:"30s"` - ResyncPeriod time.Duration `name:"resync_period" default:"5m"` + ResyncPeriod time.Duration `name:"resync_period" default:"30m"` + RepairPeriod time.Duration `name:"repair_period" default:"5m"` } // Resources describes kubernetes resource specific configuration parameters diff --git a/pkg/util/config/crd_config.go b/pkg/util/config/crd_config.go index ee3c4b712..cd08ea14e 100644 --- a/pkg/util/config/crd_config.go +++ b/pkg/util/config/crd_config.go @@ -119,6 +119,7 @@ type OperatorConfigurationData struct { MinInstances int32 `json:"min_instances,omitempty"` MaxInstances int32 `json:"max_instances,omitempty"` ResyncPeriod spec.Duration `json:"resync_period,omitempty"` + RepairPeriod spec.Duration `json:"repair_period,omitempty"` Sidecars map[string]string `json:"sidecar_docker_images,omitempty"` PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` From f27833b5eb2d73f8faeb34447446c09b384a1b68 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Fri, 27 Jul 2018 10:24:05 +0200 Subject: [PATCH 12/30] Fix disabling database access and teams API via command-line options. (#351) --- pkg/controller/controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 6615be677..96f1d2087 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -114,10 +114,10 @@ func (c *Controller) modifyConfigFromEnvironment() { c.opConfig.WatchedNamespace = c.getEffectiveNamespace(os.Getenv("WATCHED_NAMESPACE"), c.opConfig.WatchedNamespace) if c.config.NoDatabaseAccess { - c.opConfig.EnableDBAccess = c.config.NoDatabaseAccess + c.opConfig.EnableDBAccess = false } if c.config.NoTeamsAPI { - c.opConfig.EnableTeamsAPI = c.config.NoTeamsAPI + c.opConfig.EnableTeamsAPI = false } scalyrAPIKey := os.Getenv("SCALYR_API_KEY") if scalyrAPIKey != "" { From d2d3f21dc2a7fbf6c8abe3aa8fc26a7bdf83d20b Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Wed, 1 Aug 2018 11:08:01 +0200 Subject: [PATCH 13/30] Client go upgrade v6 (#352) There are shortcuts in this code, i.e. we created the deepcopy function by using the deepcopy package instead of the generated code, that will be addressed once migrated to client-go v8. Also, some objects, particularly statefulsets, are still taken from v1beta, this will also be addressed in further commits once the changes are stabilized. --- glide.lock | 185 +++++++++++++++++++--------------- glide.yaml | 38 ++----- pkg/cluster/cluster.go | 8 +- pkg/cluster/cluster_test.go | 2 +- pkg/cluster/exec.go | 7 +- pkg/cluster/k8sres.go | 6 +- pkg/cluster/pod.go | 2 +- pkg/cluster/resources.go | 6 +- pkg/cluster/sync.go | 2 +- pkg/cluster/util.go | 6 +- pkg/cluster/volumes.go | 2 +- pkg/controller/controller.go | 4 +- pkg/controller/node.go | 2 +- pkg/controller/node_test.go | 2 +- pkg/controller/pod.go | 2 +- pkg/controller/postgresql.go | 2 +- pkg/controller/util.go | 2 +- pkg/controller/util_test.go | 2 +- pkg/spec/postgresql.go | 52 +++++++++- pkg/spec/types.go | 6 +- pkg/util/config/crd_config.go | 45 +++++++++ pkg/util/k8sutil/k8sutil.go | 9 +- pkg/util/patroni/patroni.go | 2 +- pkg/util/volumes/ebs.go | 2 +- pkg/util/volumes/volumes.go | 2 +- 25 files changed, 244 insertions(+), 154 deletions(-) diff --git a/glide.lock b/glide.lock index 27ee9beba..2b51833d5 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,8 @@ -hash: 688e15147f1217da635b83ee33f20a3741a400a493787d79992d1650f6e4c514 -updated: 2018-05-17T10:46:49.090929+02:00 +hash: f2f7f9d5d3c6f0f370fcec00e6c4a7c8fe84c0e75579d9bf7e40f19fe837b7c2 +updated: 2018-07-25T15:45:34.017577+02:00 imports: - name: github.com/aws/aws-sdk-go - version: ee7b4b1162937cba700de23bd90acb742982e626 + version: 468b9714c11f10b22e533253b35eb9c28f4be691 subpackages: - aws - aws/awserr @@ -14,6 +14,7 @@ imports: - aws/credentials/ec2rolecreds - aws/credentials/endpointcreds - aws/credentials/stscreds + - aws/csm - aws/defaults - aws/ec2metadata - aws/endpoints @@ -22,6 +23,7 @@ imports: - aws/signer/v4 - internal/sdkio - internal/sdkrand + - internal/sdkuri - internal/shareddefaults - private/protocol - private/protocol/ec2query @@ -32,14 +34,9 @@ imports: - service/ec2 - service/sts - name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + version: 782f4967f2dc4564575ca782fe2d04090b5faca8 subpackages: - spew -- name: github.com/docker/distribution - version: cd27f179f2c10c5d300e6d09025b538c475b0d51 - subpackages: - - digest - - reference - name: github.com/docker/spdystream version: 449fdfce4d962303d702fec724ef0ad181c92528 subpackages: @@ -48,24 +45,18 @@ imports: version: ff4f55a206334ef123e4f79bbf348980da81ca46 subpackages: - log -- name: github.com/emicklei/go-restful-swagger12 - version: dcef7f55730566d41eae5db10e7d6981829720f6 - name: github.com/ghodss/yaml version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/go-ini/ini - version: c787282c39ac1fc618827141a1f762240def08a3 -- name: github.com/go-openapi/analysis - version: b44dc874b601d9e4e2f6e19140e794ba24bead3b + version: d58d458bec3cb5adec4b7ddb41131855eac0b33f - name: github.com/go-openapi/jsonpointer version: 46af16f9f7b149af66e5d1bd010e3574dc06de98 - name: github.com/go-openapi/jsonreference version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 -- name: github.com/go-openapi/loads - version: 18441dfa706d924a39a030ee2c3b1d8d81917b38 - name: github.com/go-openapi/spec - version: 6aced65f8501fe1217321abf0749d354824ba2ff + version: 7abd5745472fff5eb3685386d5fb8bf38683154d - name: github.com/go-openapi/swag - version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72 + version: f3f9494671f93fcff853e3c6e9e948b3eb71e590 - name: github.com/gogo/protobuf version: c0656edd0d9eab7c66d1eb0c568f9039345796f7 subpackages: @@ -73,8 +64,28 @@ imports: - sortkeys - name: github.com/golang/glog version: 44145f04b68cf362d9c4df2182967c2275eaefed +- name: github.com/golang/protobuf + version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 + subpackages: + - proto + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/timestamp +- name: github.com/google/btree + version: 7d79101e329e5a3adf994758c578dab82b90c017 - name: github.com/google/gofuzz version: 44d81051d367757e1c7c6a5a86423ece9afcf63c +- name: github.com/googleapis/gnostic + version: 0c5108395e2debce0d731cf0287ddf7242066aba + subpackages: + - OpenAPIv2 + - compiler + - extensions +- name: github.com/gregjones/httpcache + version: 787624de3eb7bd915c329cba748687a3b22666a6 + subpackages: + - diskcache - name: github.com/hashicorp/golang-lru version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 subpackages: @@ -84,57 +95,63 @@ imports: - name: github.com/imdario/mergo version: 6633656539c1639d9d78127b7d47c622b5d7b6dc - name: github.com/jmespath/go-jmespath - version: bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d + version: c2b33e8439af944379acbdd9c3a5fe0bc44bd8a5 +- name: github.com/json-iterator/go + version: f2b4162afba35581b6d4a50d3b8f34e33c144682 - name: github.com/juju/ratelimit version: 5b9ff866471762aa2ab2dced63c9fb6f53921342 - name: github.com/kr/text - version: 7cafcd837844e784b526369c9bce262804aebc60 + version: e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f - name: github.com/lib/pq - version: b77235e3890a962fe8a6f8c4c7198679ca7814e7 + version: 90697d60dd844d5ef6ff15135d0203f65d2f53b8 subpackages: - oid - name: github.com/mailru/easyjson - version: d5b7844b561a7bc640052f1b935f7b800330d7e0 + version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d subpackages: - buffer - jlexer - jwriter +- name: github.com/modern-go/concurrent + version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 +- name: github.com/modern-go/reflect2 + version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd - name: github.com/mohae/deepcopy version: c48cc78d482608239f6c4c92a4abd87eb8761c90 - name: github.com/motomux/pretty version: b2aad2c9a95d14eb978f29baa6e3a5c3c20eef30 +- name: github.com/peterbourgon/diskv + version: 5f041e8faa004a95c88a202771f4cc3e991971e6 - name: github.com/PuerkitoBio/purell version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 - name: github.com/PuerkitoBio/urlesc version: 5bd2802263f21d8788851d5305584c82a5c75d7e - name: github.com/Sirupsen/logrus - version: c155da19408a8799da419ed3eeb0cb5db0ad5dbc + version: 3e01752db0189b9157070a0e1668a620f9a85da2 - name: github.com/spf13/pflag version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7 -- name: github.com/ugorji/go - version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74 - subpackages: - - codec - name: golang.org/x/crypto - version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3 + version: c126467f60eb25f8f27e5a981f32a87e3965053f subpackages: - ssh/terminal - name: golang.org/x/net - version: f2499483f923065a842d38eb4c7f1927e6fc6e6d + version: 1c05540f6879653db88113bc4a2b70aec4bd491f subpackages: + - context - http2 - http2/hpack - idna - lex/httplex - name: golang.org/x/sys - version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 + version: 95c6576299259db960f6c5b9b69ea52422860fce subpackages: - unix - windows - name: golang.org/x/text - version: 2910a502d2bf9e43193af9d68ca516529614eed3 + version: b19bf474d317b857955b12035d2c5acb57ce8b01 subpackages: - cases + - internal - internal/tag - language - runes @@ -147,9 +164,40 @@ imports: - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 - name: gopkg.in/yaml.v2 - version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 + version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 +- name: k8s.io/api + version: 11147472b7c934c474a2c484af3c0c5210b7a3af + subpackages: + - admissionregistration/v1alpha1 + - admissionregistration/v1beta1 + - apps/v1 + - apps/v1beta1 + - apps/v1beta2 + - authentication/v1 + - authentication/v1beta1 + - authorization/v1 + - authorization/v1beta1 + - autoscaling/v1 + - autoscaling/v2beta1 + - batch/v1 + - batch/v1beta1 + - batch/v2alpha1 + - certificates/v1beta1 + - core/v1 + - events/v1beta1 + - extensions/v1beta1 + - networking/v1 + - policy/v1beta1 + - rbac/v1 + - rbac/v1alpha1 + - rbac/v1beta1 + - scheduling/v1alpha1 + - settings/v1alpha1 + - storage/v1 + - storage/v1alpha1 + - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: fcd622fe88a4a6efcb5aea9e94ee87324ac1b036 + version: 913221cf6cd1c328ae50ba5f25027268f6be38cf subpackages: - pkg/apis/apiextensions - pkg/apis/apiextensions/v1beta1 @@ -157,24 +205,19 @@ imports: - pkg/client/clientset/clientset/scheme - pkg/client/clientset/clientset/typed/apiextensions/v1beta1 - name: k8s.io/apimachinery - version: 1cb2cdd78d38df243e686d1b572b76e190469842 + version: fb40df2b502912cbe3a93aa61c2b2487f39cb42f subpackages: - - pkg/api/equality - pkg/api/errors - pkg/api/meta - pkg/api/resource - - pkg/apimachinery - - pkg/apimachinery/announced - - pkg/apimachinery/registered + - pkg/apis/meta/internalversion - pkg/apis/meta/v1 - pkg/apis/meta/v1/unstructured - pkg/apis/meta/v1alpha1 - pkg/conversion - pkg/conversion/queryparams - - pkg/conversion/unstructured - pkg/fields - pkg/labels - - pkg/openapi - pkg/runtime - pkg/runtime/schema - pkg/runtime/serializer @@ -195,7 +238,6 @@ imports: - pkg/util/intstr - pkg/util/json - pkg/util/net - - pkg/util/rand - pkg/util/remotecommand - pkg/util/runtime - pkg/util/sets @@ -208,68 +250,39 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/client-go - version: d92e8497f71b7b4e0494e5bd204b48d34bd6f254 + version: 78700dec6369ba22221b72770783300f143df150 subpackages: - discovery - kubernetes - kubernetes/scheme - kubernetes/typed/admissionregistration/v1alpha1 + - kubernetes/typed/admissionregistration/v1beta1 + - kubernetes/typed/apps/v1 - kubernetes/typed/apps/v1beta1 + - kubernetes/typed/apps/v1beta2 - kubernetes/typed/authentication/v1 - kubernetes/typed/authentication/v1beta1 - kubernetes/typed/authorization/v1 - kubernetes/typed/authorization/v1beta1 - kubernetes/typed/autoscaling/v1 - - kubernetes/typed/autoscaling/v2alpha1 + - kubernetes/typed/autoscaling/v2beta1 - kubernetes/typed/batch/v1 + - kubernetes/typed/batch/v1beta1 - kubernetes/typed/batch/v2alpha1 - kubernetes/typed/certificates/v1beta1 - kubernetes/typed/core/v1 + - kubernetes/typed/events/v1beta1 - kubernetes/typed/extensions/v1beta1 - kubernetes/typed/networking/v1 - kubernetes/typed/policy/v1beta1 + - kubernetes/typed/rbac/v1 - kubernetes/typed/rbac/v1alpha1 - kubernetes/typed/rbac/v1beta1 + - kubernetes/typed/scheduling/v1alpha1 - kubernetes/typed/settings/v1alpha1 - kubernetes/typed/storage/v1 + - kubernetes/typed/storage/v1alpha1 - kubernetes/typed/storage/v1beta1 - - pkg/api - - pkg/api/v1 - - pkg/api/v1/ref - - pkg/apis/admissionregistration - - pkg/apis/admissionregistration/v1alpha1 - - pkg/apis/apps - - pkg/apis/apps/v1beta1 - - pkg/apis/authentication - - pkg/apis/authentication/v1 - - pkg/apis/authentication/v1beta1 - - pkg/apis/authorization - - pkg/apis/authorization/v1 - - pkg/apis/authorization/v1beta1 - - pkg/apis/autoscaling - - pkg/apis/autoscaling/v1 - - pkg/apis/autoscaling/v2alpha1 - - pkg/apis/batch - - pkg/apis/batch/v1 - - pkg/apis/batch/v2alpha1 - - pkg/apis/certificates - - pkg/apis/certificates/v1beta1 - - pkg/apis/extensions - - pkg/apis/extensions/v1beta1 - - pkg/apis/networking - - pkg/apis/networking/v1 - - pkg/apis/policy - - pkg/apis/policy/v1beta1 - - pkg/apis/rbac - - pkg/apis/rbac/v1alpha1 - - pkg/apis/rbac/v1beta1 - - pkg/apis/settings - - pkg/apis/settings/v1alpha1 - - pkg/apis/storage - - pkg/apis/storage/v1 - - pkg/apis/storage/v1beta1 - - pkg/util - - pkg/util/parsers - pkg/version - rest - rest/watch @@ -280,11 +293,23 @@ imports: - tools/clientcmd/api/latest - tools/clientcmd/api/v1 - tools/metrics + - tools/pager + - tools/reference - tools/remotecommand - transport + - transport/spdy + - util/buffer - util/cert - util/exec - util/flowcontrol - util/homedir - util/integer +- name: k8s.io/code-generator + version: 0ab89e584187c20cc7c1a3f30db69f3b4ab64196 +- name: k8s.io/gengo + version: 906d99f89cd644eecf75ab547b29bf9f876f0b59 +- name: k8s.io/kube-openapi + version: 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 + subpackages: + - pkg/common testImports: [] diff --git a/glide.yaml b/glide.yaml index 3d79d8f68..1b7b5b827 100644 --- a/glide.yaml +++ b/glide.yaml @@ -10,38 +10,14 @@ import: - service/ec2 - package: github.com/lib/pq - package: github.com/motomux/pretty -- package: k8s.io/apiextensions-apiserver - subpackages: - - pkg/client/clientset/clientset - package: k8s.io/apimachinery - version: release-1.7 - subpackages: - - pkg/api/errors - - pkg/api/resource - - pkg/apis/meta/v1 - - pkg/labels - - pkg/runtime - - pkg/runtime/schema - - pkg/runtime/serializer - - pkg/types - - pkg/util/intstr - - pkg/util/remotecommand - - pkg/watch + version: kubernetes-1.9.9 +- package: k8s.io/apiextensions-apiserver + version: kubernetes-1.9.9 - package: k8s.io/client-go - version: ^4.0.0 - subpackages: - - kubernetes - - kubernetes/scheme - - kubernetes/typed/apps/v1beta1 - - kubernetes/typed/core/v1 - - kubernetes/typed/extensions/v1beta1 - - pkg/api - - pkg/api/v1 - - pkg/apis/apps/v1beta1 - - pkg/apis/extensions/v1beta1 - - rest - - tools/cache - - tools/clientcmd - - tools/remotecommand + version: ^6.0.0 +- package: k8s.io/code-generator + version: kubernetes-1.9.9 +- package: k8s.io/gengo - package: gopkg.in/yaml.v2 - package: github.com/mohae/deepcopy diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 2548ebc22..e15f5a63f 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -14,9 +14,9 @@ import ( "github.com/Sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/apps/v1beta1" - policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/api/apps/v1beta1" + policybeta1 "k8s.io/api/policy/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -28,7 +28,7 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/util/patroni" "github.com/zalando-incubator/postgres-operator/pkg/util/teams" "github.com/zalando-incubator/postgres-operator/pkg/util/users" - rbacv1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" ) var ( diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 34f64e655..7786e4563 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -7,7 +7,7 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/util/config" "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" "github.com/zalando-incubator/postgres-operator/pkg/util/teams" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "reflect" "testing" ) diff --git a/pkg/cluster/exec.go b/pkg/cluster/exec.go index 81843aca6..36d8a884c 100644 --- a/pkg/cluster/exec.go +++ b/pkg/cluster/exec.go @@ -6,9 +6,8 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "k8s.io/client-go/tools/remotecommand" "github.com/zalando-incubator/postgres-operator/pkg/spec" @@ -54,15 +53,15 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) ( Stderr: true, }, scheme.ParameterCodec) - exec, err := remotecommand.NewExecutor(c.RestConfig, "POST", req.URL()) + exec, err := remotecommand.NewSPDYExecutor(c.RestConfig, "POST", req.URL()) if err != nil { return "", fmt.Errorf("failed to init executor: %v", err) } err = exec.Stream(remotecommand.StreamOptions{ - SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols, Stdout: &execOut, Stderr: &execErr, + Tty: false, }) if err != nil { diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 35b6d471e..26308cd30 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -10,9 +10,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/apps/v1beta1" - policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/api/apps/v1beta1" + policybeta1 "k8s.io/api/policy/v1beta1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index 66c2950bc..b0b73315d 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -5,7 +5,7 @@ import ( "math/rand" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index cc3c4e6f9..c965d65d5 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -7,9 +7,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/apps/v1beta1" - policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/api/apps/v1beta1" + policybeta1 "k8s.io/api/policy/v1beta1" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index bd2823b5b..d56dd9a9a 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -5,7 +5,7 @@ import ( "reflect" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + policybeta1 "k8s.io/api/policy/v1beta1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index ab8189d96..8f2c6c389 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -13,9 +13,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/apps/v1beta1" - policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/api/apps/v1beta1" + policybeta1 "k8s.io/api/policy/v1beta1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index 2b7537071..71f35cf05 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -7,7 +7,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 96f1d2087..df2fa9d04 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -9,8 +9,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/pkg/api/v1" - rbacv1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + "k8s.io/api/core/v1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/client-go/tools/cache" "github.com/zalando-incubator/postgres-operator/pkg/apiserver" diff --git a/pkg/controller/node.go b/pkg/controller/node.go index a4a558a2c..c18f6f0ad 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -5,7 +5,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/cluster" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/controller/node_test.go b/pkg/controller/node_test.go index 0f86628bd..6cb4d2b0d 100644 --- a/pkg/controller/node_test.go +++ b/pkg/controller/node_test.go @@ -5,7 +5,7 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/spec" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" ) const ( diff --git a/pkg/controller/pod.go b/pkg/controller/pod.go index 070e6da2d..0d16523fc 100644 --- a/pkg/controller/pod.go +++ b/pkg/controller/pod.go @@ -4,7 +4,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index c037260ab..f4d56ad6d 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -147,12 +147,12 @@ func (d *crdDecoder) Decode() (action watch.EventType, object runtime.Object, er func (c *Controller) clusterWatchFunc(options metav1.ListOptions) (watch.Interface, error) { options.Watch = true + // MIGRATION: FieldsSelectorParam(nil) r, err := c.KubeClient.CRDREST. Get(). Namespace(c.opConfig.WatchedNamespace). Resource(constants.PostgresCRDResource). VersionedParams(&options, metav1.ParameterCodec). - FieldsSelectorParam(nil). Stream() if err != nil { diff --git a/pkg/controller/util.go b/pkg/controller/util.go index ef1472a2a..fceea3ba8 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -6,7 +6,7 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/cluster" "github.com/zalando-incubator/postgres-operator/pkg/spec" diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index 693383143..c29240566 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -8,7 +8,7 @@ import ( b64 "encoding/base64" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go index 4b973e503..a164da75a 100644 --- a/pkg/spec/postgresql.go +++ b/pkg/spec/postgresql.go @@ -9,7 +9,8 @@ import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" ) // MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster. @@ -154,14 +155,34 @@ var ( // will not contain any private fields not-reachable to deepcopy. This should be ok, // since Error is never read from a Kubernetes object. func (p *Postgresql) Clone() *Postgresql { - if p == nil { - return nil - } + if p == nil {return nil} c := deepcopy.Copy(p).(*Postgresql) c.Error = nil return c } +func (in *Postgresql) DeepCopyInto(out *Postgresql) { + if in != nil { + out = deepcopy.Copy(in).(*Postgresql) + } + return +} + +func (in *Postgresql) DeepCopy() *Postgresql { + if in == nil { return nil } + out := new(Postgresql) + in.DeepCopyInto(out) + return out +} + +func (in *Postgresql) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + + func parseTime(s string) (time.Time, error) { parts := strings.Split(s, ":") if len(parts) != 2 { @@ -287,6 +308,29 @@ func validateCloneClusterDescription(clone *CloneDescription) error { type postgresqlListCopy PostgresqlList type postgresqlCopy Postgresql + +func (in *PostgresqlList) DeepCopy() *PostgresqlList { + if in == nil { return nil } + out := new(PostgresqlList) + in.DeepCopyInto(out) + return out +} + +func (in *PostgresqlList) DeepCopyInto(out *PostgresqlList) { + if in != nil { + out = deepcopy.Copy(in).(*PostgresqlList) + } + return +} + +func (in *PostgresqlList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + + // UnmarshalJSON converts a JSON into the PostgreSQL object. func (p *Postgresql) UnmarshalJSON(data []byte) error { var tmp postgresqlCopy diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 98490e1b0..37d01fc9a 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -12,9 +12,9 @@ import ( "github.com/Sirupsen/logrus" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/pkg/api/v1" - "k8s.io/client-go/pkg/apis/apps/v1beta1" - policyv1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/api/apps/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/client-go/rest" ) diff --git a/pkg/util/config/crd_config.go b/pkg/util/config/crd_config.go index cd08ea14e..465c27637 100644 --- a/pkg/util/config/crd_config.go +++ b/pkg/util/config/crd_config.go @@ -6,6 +6,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" + "github.com/mohae/deepcopy" + "k8s.io/apimachinery/pkg/runtime" ) type OperatorConfiguration struct { @@ -160,3 +162,46 @@ func (opcl *OperatorConfigurationList) UnmarshalJSON(data []byte) error { *opcl = OperatorConfigurationList(ref) return nil } + +func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { + if in != nil { + out = deepcopy.Copy(in).(*OperatorConfiguration) + } + return +} + +func (in *OperatorConfiguration) DeepCopy() *OperatorConfiguration { + if in == nil { return nil } + out := new(OperatorConfiguration) + in.DeepCopyInto(out) + return out +} + +func (in *OperatorConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +func (in *OperatorConfigurationList) DeepCopyInto(out *OperatorConfigurationList) { + if in != nil { + out = deepcopy.Copy(in).(*OperatorConfigurationList) + } + return +} + +func (in *OperatorConfigurationList) DeepCopy() *OperatorConfigurationList { + if in == nil { return nil } + out := new(OperatorConfigurationList) + in.DeepCopyInto(out) + return out +} + +func (in *OperatorConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index dd96aa5a7..f4af5fea1 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -4,19 +4,19 @@ import ( "fmt" "reflect" + "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextbeta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/typed/apps/v1beta1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - "k8s.io/client-go/pkg/api" - "k8s.io/client-go/pkg/api/v1" - policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -93,7 +93,8 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { Version: constants.CRDApiVersion, } cfg2.APIPath = constants.K8sAPIPath - cfg2.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + // MIGRATION: api.codecs -> scheme.Codecs? + cfg2.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} crd, err := rest.RESTClientFor(&cfg2) if err != nil { diff --git a/pkg/util/patroni/patroni.go b/pkg/util/patroni/patroni.go index 5e5dc4aeb..28011c858 100644 --- a/pkg/util/patroni/patroni.go +++ b/pkg/util/patroni/patroni.go @@ -9,7 +9,7 @@ import ( "time" "github.com/Sirupsen/logrus" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" ) const ( diff --git a/pkg/util/volumes/ebs.go b/pkg/util/volumes/ebs.go index 57a334258..00801fa9c 100644 --- a/pkg/util/volumes/ebs.go +++ b/pkg/util/volumes/ebs.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" "github.com/zalando-incubator/postgres-operator/pkg/util/retryutil" diff --git a/pkg/util/volumes/volumes.go b/pkg/util/volumes/volumes.go index 3680733d9..94c0fffc8 100644 --- a/pkg/util/volumes/volumes.go +++ b/pkg/util/volumes/volumes.go @@ -1,7 +1,7 @@ package volumes import ( - "k8s.io/client-go/pkg/api/v1" + "k8s.io/api/core/v1" ) // VolumeResizer defines the set of methods used to implememnt provider-specific resizing of persistent volumes. From d0f4148cd3d9ecfcb45f706f88386f4d87271f59 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Thu, 2 Aug 2018 11:13:23 +0200 Subject: [PATCH 14/30] Fix a link to the CRD manifest. (#356) Per a gripe from @angapov: https://github.com/zalando-incubator/postgres-operator/issues/355 --- docs/reference/operator_parameters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index dce1df4cc..06dabadec 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -22,7 +22,7 @@ configuration. There are no default values built-in in the operator, each parameter that is not supplied in the configuration receives an empty value. In order to create your own configuration just copy the [default - one](https://github.com/zalando-incubator/postgres-operator/blob/wip/operator_configuration_via_crd/manifests/postgresql-operator-default-configuration.yaml) + one](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml) and change it. CRD-based configuration is more natural and powerful then the one based on From ac7b13231466556266e730dcabda5b9d0ea8c965 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Fri, 3 Aug 2018 11:09:45 +0200 Subject: [PATCH 15/30] Refactoring inspired by gometalinter. (#357) Among other things, fix a few issues with deepcopy implementation. --- pkg/apiserver/apiserver.go | 66 +++++---- pkg/cluster/cluster.go | 2 +- pkg/cluster/k8sres.go | 54 ++++---- pkg/cluster/pg.go | 17 ++- pkg/cluster/pod.go | 55 ++++---- pkg/cluster/resources.go | 4 +- pkg/cluster/sync.go | 216 ++++++++++++++++-------------- pkg/cluster/util.go | 4 +- pkg/controller/controller.go | 4 +- pkg/controller/postgresql_test.go | 4 +- pkg/controller/util.go | 32 ++--- pkg/spec/postgresql.go | 64 ++++----- pkg/spec/types.go | 1 - pkg/util/config/crd_config.go | 58 ++++---- pkg/util/patroni/patroni.go | 4 +- 15 files changed, 312 insertions(+), 273 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 1dceefd52..82eb7ba9c 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -92,12 +92,14 @@ func New(controller controllerInformer, port int, logger *logrus.Logger) *Server // Run starts the HTTP server func (s *Server) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { + + var err error + defer wg.Done() go func() { - err := s.http.ListenAndServe() - if err != http.ErrServerClosed { - s.logger.Fatalf("Could not start http server: %v", err) + if err2 := s.http.ListenAndServe(); err2 != http.ErrServerClosed { + s.logger.Fatalf("Could not start http server: %v", err2) } }() s.logger.Infof("listening on %s", s.http.Addr) @@ -106,21 +108,27 @@ func (s *Server) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) defer cancel() - err := s.http.Shutdown(ctx) + if err = s.http.Shutdown(ctx); err == nil { + s.logger.Infoln("Http server shut down") + return + } if err == context.DeadlineExceeded { s.logger.Warningf("Shutdown timeout exceeded. closing http server") - s.http.Close() - } else if err != nil { - s.logger.Errorf("Could not shutdown http server: %v", err) + if err = s.http.Close(); err != nil { + s.logger.Errorf("could not close http connection: %v", err) + } + return } - s.logger.Infoln("Http server shut down") + s.logger.Errorf("Could not shutdown http server: %v", err) } func (s *Server) respond(obj interface{}, err error, w http.ResponseWriter) { w.Header().Set("Content-Type", "application/json") if err != nil { w.WriteHeader(http.StatusInternalServerError) - json.NewEncoder(w).Encode(map[string]interface{}{"error": err.Error()}) + if err2 := json.NewEncoder(w).Encode(map[string]interface{}{"error": err.Error()}); err2 != nil { + s.logger.Errorf("could not encode error response %q: %v", err, err2) + } return } @@ -186,6 +194,14 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) { s.respond(resp, err, w) } +func mustConvertToUint32(s string) uint32{ + result, err := strconv.Atoi(s); + if err != nil { + panic(fmt.Errorf("mustConvertToUint32 called for %s: %v", s, err)) + } + return uint32(result) +} + func (s *Server) workers(w http.ResponseWriter, req *http.Request) { var ( resp interface{} @@ -195,30 +211,30 @@ func (s *Server) workers(w http.ResponseWriter, req *http.Request) { if workerAllQueue.MatchString(req.URL.Path) { s.allQueues(w, req) return - } else if matches := util.FindNamedStringSubmatch(workerLogsURL, req.URL.Path); matches != nil { - workerID, _ := strconv.Atoi(matches["id"]) + } + if workerAllStatus.MatchString(req.URL.Path) { + s.allWorkers(w, req) + return + } + + err = fmt.Errorf("page not found") + + if matches := util.FindNamedStringSubmatch(workerLogsURL, req.URL.Path); matches != nil { + workerID := mustConvertToUint32(matches["id"]) + resp, err = s.controller.WorkerLogs(workerID) - resp, err = s.controller.WorkerLogs(uint32(workerID)) } else if matches := util.FindNamedStringSubmatch(workerEventsQueueURL, req.URL.Path); matches != nil { - workerID, _ := strconv.Atoi(matches["id"]) + workerID := mustConvertToUint32(matches["id"]) + resp, err = s.controller.ListQueue(workerID) - resp, err = s.controller.ListQueue(uint32(workerID)) } else if matches := util.FindNamedStringSubmatch(workerStatusURL, req.URL.Path); matches != nil { var workerStatus *spec.WorkerStatus - workerID, _ := strconv.Atoi(matches["id"]) - workerStatus, err = s.controller.WorkerStatus(uint32(workerID)) - if workerStatus == nil { - resp = "idle" - } else { + workerID := mustConvertToUint32(matches["id"]) + resp = "idle" + if workerStatus, err = s.controller.WorkerStatus(workerID); workerStatus != nil { resp = workerStatus } - } else if workerAllStatus.MatchString(req.URL.Path) { - s.allWorkers(w, req) - return - } else { - s.respond(nil, fmt.Errorf("page not found"), w) - return } s.respond(resp, err, w) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index e15f5a63f..c2af3f9c8 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -284,7 +284,7 @@ func (c *Cluster) Create() error { } c.logger.Infof("pods are ready") - // create database objects unless we are running without pods or disabled that feature explicitely + // create database objects unless we are running without pods or disabled that feature explicitly if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0) { if err = c.createRoles(); err != nil { return fmt.Errorf("could not create users: %v", err) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 26308cd30..917be763f 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -88,7 +88,7 @@ func (c *Cluster) makeDefaultResources() spec.Resources { defaultRequests := spec.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest} defaultLimits := spec.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit} - return spec.Resources{defaultRequests, defaultLimits} + return spec.Resources{ResourceRequest:defaultRequests, ResourceLimits:defaultLimits} } func generateResourceRequirements(resources spec.Resources, defaultResources spec.Resources) (*v1.ResourceRequirements, error) { @@ -537,10 +537,10 @@ func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus. for i, va := range input { if names[va.Name] == 0 { - names[va.Name] += 1 + names[va.Name]++ result = append(result, input[i]) } else if names[va.Name] == 1 { - names[va.Name] += 1 + names[va.Name]++ logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored", va.Name, containerName) } @@ -626,6 +626,12 @@ func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) spec func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.StatefulSet, error) { + var ( + err error + sidecarContainers []v1.Container + podTemplate *v1.PodTemplateSpec + volumeClaimTemplate *v1.PersistentVolumeClaim + ) defaultResources := c.makeDefaultResources() resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources) @@ -633,21 +639,19 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu return nil, fmt.Errorf("could not generate resource requirements: %v", err) } - if err != nil { - return nil, fmt.Errorf("could not generate Scalyr sidecar resource requirements: %v", err) - } customPodEnvVarsList := make([]v1.EnvVar, 0) if c.OpConfig.PodEnvironmentConfigMap != "" { - if cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap, metav1.GetOptions{}); err != nil { + var cm *v1.ConfigMap + cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap, metav1.GetOptions{}) + if err != nil { return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) - } else { - for k, v := range cm.Data { - customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) - } - sort.Slice(customPodEnvVarsList, - func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) } + for k, v := range cm.Data { + customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) + } + sort.Slice(customPodEnvVarsList, + func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) } spiloConfiguration := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger) @@ -686,16 +690,15 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu } // generate sidecar containers - sidecarContainers, err := generateSidecarContainers(sideCars, volumeMounts, defaultResources, - c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger) - if err != nil { - return nil, fmt.Errorf("could not generate sidecar containers: %v", err) + if sidecarContainers, err = generateSidecarContainers(sideCars, volumeMounts, defaultResources, + c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger); err != nil { + return nil, fmt.Errorf("could not generate sidecar containers: %v", err) } tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) // generate pod template for the statefulset, based on the spilo container and sidecards - podTemplate, err := generatePodTemplate( + if podTemplate, err = generatePodTemplate( c.Namespace, c.labelsSet(true), spiloContainer, @@ -704,14 +707,13 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu nodeAffinity(c.OpConfig.NodeReadinessLabel), int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), c.OpConfig.PodServiceAccountName, - c.OpConfig.KubeIAMRole) - - if err != nil { - return nil, fmt.Errorf("could not generate pod template: %v", err) + c.OpConfig.KubeIAMRole); err != nil { + return nil, fmt.Errorf("could not generate pod template: %v", err) } - volumeClaimTemplate, err := generatePersistentVolumeClaimTemplate(spec.Volume.Size, spec.Volume.StorageClass) - if err != nil { - return nil, fmt.Errorf("could not generate volume claim template: %v", err) + + if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size, + spec.Volume.StorageClass); err != nil { + return nil, fmt.Errorf("could not generate volume claim template: %v", err) } numberOfInstances := c.getNumberOfInstances(spec) @@ -1033,7 +1035,7 @@ func (c *Cluster) generateCloneEnvironment(description *spec.CloneDescription) [ result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"}) result = append(result, v1.EnvVar{Name: "CLONE_WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp}) - result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.Uid)}) + result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.UID)}) result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""}) } diff --git a/pkg/cluster/pg.go b/pkg/cluster/pg.go index 09c2e16c1..f570ac81c 100644 --- a/pkg/cluster/pg.go +++ b/pkg/cluster/pg.go @@ -153,12 +153,10 @@ func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUser // getDatabases returns the map of current databases with owners // The caller is responsible for opening and closing the database connection -func (c *Cluster) getDatabases() (map[string]string, error) { +func (c *Cluster) getDatabases() (dbs map[string]string, err error) { var ( rows *sql.Rows - err error ) - dbs := make(map[string]string) if rows, err = c.pgDb.Query(getDatabasesSQL); err != nil { return nil, fmt.Errorf("could not query database: %v", err) @@ -166,21 +164,26 @@ func (c *Cluster) getDatabases() (map[string]string, error) { defer func() { if err2 := rows.Close(); err2 != nil { - err = fmt.Errorf("error when closing query cursor: %v", err2) + if err != nil { + err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err) + } else { + err = fmt.Errorf("error when closing query cursor: %v", err2) + } } }() + dbs = make(map[string]string) + for rows.Next() { var datname, owner string - err := rows.Scan(&datname, &owner) - if err != nil { + if err = rows.Scan(&datname, &owner); err != nil { return nil, fmt.Errorf("error when processing row: %v", err) } dbs[datname] = owner } - return dbs, err + return } // executeCreateDatabase creates new database with the given owner. diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index b0b73315d..8d15e6a9a 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -9,6 +9,7 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" + "k8s.io/api/apps/v1beta1" ) func (c *Cluster) listPods() ([]v1.Pod, error) { @@ -182,6 +183,8 @@ func (c *Cluster) masterCandidate(oldNodeName string) (*v1.Pod, error) { func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { var ( masterCandidatePod *v1.Pod + err error + eol bool ) oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{}) @@ -192,9 +195,10 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { c.logger.Infof("migrating master pod %q", podName) - if eol, err := c.podIsEndOfLife(oldMaster); err != nil { + if eol, err = c.podIsEndOfLife(oldMaster); err != nil { return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err) - } else if !eol { + } + if !eol { c.logger.Debugf("pod is already on a live node") return nil } @@ -205,41 +209,44 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { } // we must have a statefulset in the cluster for the migration to work if c.Statefulset == nil { - sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("could not retrieve cluster statefulset: %v", err) + var sset *v1beta1.StatefulSet + if sset, err = c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), + metav1.GetOptions{}); err != nil { + return fmt.Errorf("could not retrieve cluster statefulset: %v", err) } c.Statefulset = sset } // We may not have a cached statefulset if the initial cluster sync has aborted, revert to the spec in that case. - if *c.Statefulset.Spec.Replicas == 1 { - c.logger.Warningf("single master pod for cluster %q, migration will cause longer downtime of the master instance", c.clusterName()) - } else { - masterCandidatePod, err = c.masterCandidate(oldMaster.Spec.NodeName) - if err != nil { + if *c.Statefulset.Spec.Replicas > 1 { + if masterCandidatePod, err = c.masterCandidate(oldMaster.Spec.NodeName); err != nil { return fmt.Errorf("could not get new master candidate: %v", err) } + } else { + c.logger.Warningf("single master pod for cluster %q, migration will cause longer downtime of the master instance", c.clusterName()) } + // there are two cases for each postgres cluster that has its master pod on the node to migrate from: // - the cluster has some replicas - migrate one of those if necessary and failover to it // - there are no replicas - just terminate the master and wait until it respawns // in both cases the result is the new master up and running on a new node. - if masterCandidatePod != nil { - pod, err := c.movePodFromEndOfLifeNode(masterCandidatePod) - if err != nil { - return fmt.Errorf("could not move pod: %v", err) - } - masterCandidateName := util.NameFromMeta(pod.ObjectMeta) - if err := c.Switchover(oldMaster, masterCandidateName); err != nil { - return fmt.Errorf("could not failover to pod %q: %v", masterCandidateName, err) - } - } else { + if masterCandidatePod == nil { if _, err = c.movePodFromEndOfLifeNode(oldMaster); err != nil { return fmt.Errorf("could not move pod: %v", err) } + return nil } + + if masterCandidatePod, err = c.movePodFromEndOfLifeNode(masterCandidatePod); err != nil { + return fmt.Errorf("could not move pod: %v", err) + } + + masterCandidateName := util.NameFromMeta(masterCandidatePod.ObjectMeta) + if err := c.Switchover(oldMaster, masterCandidateName); err != nil { + return fmt.Errorf("could not failover to pod %q: %v", masterCandidateName, err) + } + return nil } @@ -281,12 +288,12 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) { if err := c.waitForPodDeletion(ch); err != nil { return nil, err } - if pod, err := c.waitForPodLabel(ch, stopChan, nil); err != nil { + pod, err := c.waitForPodLabel(ch, stopChan, nil) + if err != nil { return nil, err - } else { - c.logger.Infof("pod %q has been recreated", podName) - return pod, nil } + c.logger.Infof("pod %q has been recreated", podName) + return pod, nil } func (c *Cluster) recreatePods() error { diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index c965d65d5..6aab9d3fc 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -168,7 +168,7 @@ func (c *Cluster) getRollingUpdateFlagFromStatefulSet(sset *v1beta1.StatefulSet, if flag, err = strconv.ParseBool(stringFlag); err != nil { c.logger.Warnf("error when parsing %q annotation for the statefulset %q: expected boolean value, got %q\n", RollingUpdateStatefulsetAnnotationKey, - types.NamespacedName{sset.Namespace, sset.Name}, + types.NamespacedName{Namespace: sset.Namespace, Name: sset.Name}, stringFlag) flag = defaultValue } @@ -491,7 +491,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset if len(endPointAddresses) > 0 { result = append(result, v1.EndpointSubset{ Addresses: endPointAddresses, - Ports: []v1.EndpointPort{{"postgresql", 5432, "TCP"}}, + Ports: []v1.EndpointPort{{Name: "postgresql", Port: 5432, Protocol: "TCP"}}, }) } else if role == Master { c.logger.Warningf("master is not running, generated master endpoint does not contain any addresses") diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index d56dd9a9a..d487695a5 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -6,6 +6,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" policybeta1 "k8s.io/api/policy/v1beta1" + "k8s.io/api/policy/v1beta1" + "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" @@ -70,7 +72,7 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) { } } - // create database objects unless we are running without pods or disabled that feature explicitely + // create database objects unless we are running without pods or disabled that feature explicitly if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&newSpec.Spec) <= 0) { c.logger.Debugf("syncing roles") if err = c.syncRoles(); err != nil { @@ -110,118 +112,119 @@ func (c *Cluster) syncServices() error { } func (c *Cluster) syncService(role PostgresRole) error { + var ( + svc *v1.Service + err error + ) c.setProcessName("syncing %s service", role) - svc, err := c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}) - if err == nil { + if svc, err = c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}); err == nil { c.Services[role] = svc desiredSvc := c.generateService(role, &c.Spec) - match, reason := k8sutil.SameService(svc, desiredSvc) - if match { - return nil + if match, reason := k8sutil.SameService(svc, desiredSvc); !match { + c.logServiceChanges(role, svc, desiredSvc, false, reason) + if err = c.updateService(role, desiredSvc); err != nil { + return fmt.Errorf("could not update %s service to match desired state: %v", role, err) + } + c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta)) } - c.logServiceChanges(role, svc, desiredSvc, false, reason) - - if err := c.updateService(role, desiredSvc); err != nil { - return fmt.Errorf("could not update %s service to match desired state: %v", role, err) - } - c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta)) - return nil - } else if !k8sutil.ResourceNotFound(err) { + } + if !k8sutil.ResourceNotFound(err) { return fmt.Errorf("could not get %s service: %v", role, err) } + // no existing service, create new one c.Services[role] = nil - c.logger.Infof("could not find the cluster's %s service", role) - if svc, err := c.createService(role); err != nil { - if k8sutil.ResourceAlreadyExists(err) { - c.logger.Infof("%s service %q already exists", role, util.NameFromMeta(svc.ObjectMeta)) - svc, err := c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}) - if err == nil { - c.Services[role] = svc - } else { - c.logger.Infof("could not fetch existing %s service: %v", role, err) - } - } else { + if svc, err = c.createService(role); err == nil { + c.logger.Infof("created missing %s service %q", role, util.NameFromMeta(svc.ObjectMeta)) + } else { + if !k8sutil.ResourceAlreadyExists(err) { return fmt.Errorf("could not create missing %s service: %v", role, err) } - } else { - c.logger.Infof("created missing %s service %q", role, util.NameFromMeta(svc.ObjectMeta)) - c.Services[role] = svc + c.logger.Infof("%s service %q already exists", role, util.NameFromMeta(svc.ObjectMeta)) + if svc, err = c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}); err != nil { + return fmt.Errorf("could not fetch existing %s service: %v", role, err) + } } - + c.Services[role] = svc return nil } func (c *Cluster) syncEndpoint(role PostgresRole) error { + var ( + ep *v1.Endpoints + err error + ) c.setProcessName("syncing %s endpoint", role) - ep, err := c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}) - if err == nil { - + if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}); err == nil { + // TODO: No syncing of endpoints here, is this covered completely by updateService? c.Endpoints[role] = ep return nil - } else if !k8sutil.ResourceNotFound(err) { + } + if !k8sutil.ResourceNotFound(err) { return fmt.Errorf("could not get %s endpoint: %v", role, err) } + // no existing endpoint, create new one c.Endpoints[role] = nil - c.logger.Infof("could not find the cluster's %s endpoint", role) - if ep, err := c.createEndpoint(role); err != nil { - if k8sutil.ResourceAlreadyExists(err) { - c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta)) - ep, err := c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}) - if err == nil { - c.Endpoints[role] = ep - } else { - c.logger.Infof("could not fetch existing %s endpoint: %v", role, err) - } - } else { + if ep, err = c.createEndpoint(role); err == nil { + c.logger.Infof("created missing %s endpoint %q", role, util.NameFromMeta(ep.ObjectMeta)) + } else { + if !k8sutil.ResourceAlreadyExists(err) { return fmt.Errorf("could not create missing %s endpoint: %v", role, err) } - } else { - c.logger.Infof("created missing %s endpoint %q", role, util.NameFromMeta(ep.ObjectMeta)) - c.Endpoints[role] = ep + c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta)) + if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}); err != nil { + return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err) + } } - + c.Endpoints[role] = ep return nil } func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { - pdb, err := c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}) - if err == nil { + var ( + pdb *v1beta1.PodDisruptionBudget + err error + ) + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { c.PodDisruptionBudget = pdb newPDB := c.generatePodDisruptionBudget() if match, reason := k8sutil.SamePDB(pdb, newPDB); !match { c.logPDBChanges(pdb, newPDB, isUpdate, reason) - if err := c.updatePodDisruptionBudget(newPDB); err != nil { + if err = c.updatePodDisruptionBudget(newPDB); err != nil { return err } } else { c.PodDisruptionBudget = pdb } - return nil - } else if !k8sutil.ResourceNotFound(err) { + + } + if !k8sutil.ResourceNotFound(err) { return fmt.Errorf("could not get pod disruption budget: %v", err) } + // no existing pod disruption budget, create new one c.PodDisruptionBudget = nil - c.logger.Infof("could not find the cluster's pod disruption budget") + if pdb, err = c.createPodDisruptionBudget(); err != nil { - if k8sutil.ResourceAlreadyExists(err) { - c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta)) - } else { + if !k8sutil.ResourceAlreadyExists(err) { return fmt.Errorf("could not create pod disruption budget: %v", err) } - } else { - c.logger.Infof("created missing pod disruption budget %q", util.NameFromMeta(pdb.ObjectMeta)) - c.PodDisruptionBudget = pdb + c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta)) + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil { + return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta)) + } } + c.logger.Infof("created missing pod disruption budget %q", util.NameFromMeta(pdb.ObjectMeta)) + c.PodDisruptionBudget = pdb + return nil } @@ -315,6 +318,11 @@ func (c *Cluster) syncStatefulSet() error { // checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters // (like max_connections) has changed and if necessary sets it via the Patroni API func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() error { + var ( + err error + pods []v1.Pod + ) + // we need to extract those options from the cluster manifest. optionsToSet := make(map[string]string) pgOptions := c.Spec.Parameters @@ -325,47 +333,55 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() error { } } - if len(optionsToSet) > 0 { - pods, err := c.listPods() - if err != nil { - return err - } - if len(pods) == 0 { - return fmt.Errorf("could not call Patroni API: cluster has no pods") - } - for _, pod := range pods { - podName := util.NameFromMeta(pod.ObjectMeta) - c.logger.Debugf("calling Patroni API on a pod %s to set the following Postgres options: %v", - podName, optionsToSet) - if err := c.patroni.SetPostgresParameters(&pod, optionsToSet); err == nil { - return nil - } else { - c.logger.Warningf("could not patch postgres parameters with a pod %s: %v", podName, err) - } - } - return fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)", - len(pods)) + if len(optionsToSet) == 0 { + return nil } - return nil + + if pods, err = c.listPods(); err != nil { + return err + } + if len(pods) == 0 { + return fmt.Errorf("could not call Patroni API: cluster has no pods") + } + // try all pods until the first one that is successful, as it doesn't matter which pod + // carries the request to change configuration through + for _, pod := range pods { + podName := util.NameFromMeta(pod.ObjectMeta) + c.logger.Debugf("calling Patroni API on a pod %s to set the following Postgres options: %v", + podName, optionsToSet) + if err = c.patroni.SetPostgresParameters(&pod, optionsToSet); err == nil { + return nil + } + c.logger.Warningf("could not patch postgres parameters with a pod %s: %v", podName, err) + } + return fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)", + len(pods)) } func (c *Cluster) syncSecrets() error { + var ( + err error + secret *v1.Secret + ) c.setProcessName("syncing secrets") secrets := c.generateUserSecrets() for secretUsername, secretSpec := range secrets { - secret, err := c.KubeClient.Secrets(secretSpec.Namespace).Create(secretSpec) + if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(secretSpec); err == nil { + c.Secrets[secret.UID] = secret + c.logger.Debugf("created new secret %q, uid: %q", util.NameFromMeta(secret.ObjectMeta), secret.UID) + continue + } if k8sutil.ResourceAlreadyExists(err) { var userMap map[string]spec.PgUser - curSecret, err2 := c.KubeClient.Secrets(secretSpec.Namespace).Get(secretSpec.Name, metav1.GetOptions{}) - if err2 != nil { - return fmt.Errorf("could not get current secret: %v", err2) + if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Get(secretSpec.Name, metav1.GetOptions{}); err != nil { + return fmt.Errorf("could not get current secret: %v", err) } - if secretUsername != string(curSecret.Data["username"]) { + if secretUsername != string(secret.Data["username"]) { c.logger.Warningf("secret %q does not contain the role %q", secretSpec.Name, secretUsername) continue } - c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(curSecret.ObjectMeta)) + c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta)) if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name { secretUsername = constants.SuperuserKeyName userMap = c.systemUsers @@ -377,35 +393,28 @@ func (c *Cluster) syncSecrets() error { } pwdUser := userMap[secretUsername] // if this secret belongs to the infrastructure role and the password has changed - replace it in the secret - if pwdUser.Password != string(curSecret.Data["password"]) && pwdUser.Origin == spec.RoleOriginInfrastructure { + if pwdUser.Password != string(secret.Data["password"]) && pwdUser.Origin == spec.RoleOriginInfrastructure { c.logger.Debugf("updating the secret %q from the infrastructure roles", secretSpec.Name) - if _, err := c.KubeClient.Secrets(secretSpec.Namespace).Update(secretSpec); err != nil { + if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(secretSpec); err != nil { return fmt.Errorf("could not update infrastructure role secret for role %q: %v", secretUsername, err) } } else { // for non-infrastructure role - update the role with the password from the secret - pwdUser.Password = string(curSecret.Data["password"]) + pwdUser.Password = string(secret.Data["password"]) userMap[secretUsername] = pwdUser } - - continue } else { - if err != nil { - return fmt.Errorf("could not create secret for user %q: %v", secretUsername, err) - } - c.Secrets[secret.UID] = secret - c.logger.Debugf("created new secret %q, uid: %q", util.NameFromMeta(secret.ObjectMeta), secret.UID) + return fmt.Errorf("could not create secret for user %q: %v", secretUsername, err) } } return nil } -func (c *Cluster) syncRoles() error { +func (c *Cluster) syncRoles() (err error) { c.setProcessName("syncing roles") var ( - err error dbUsers spec.PgUserMap userNames []string ) @@ -414,9 +423,14 @@ func (c *Cluster) syncRoles() error { if err != nil { return fmt.Errorf("could not init db connection: %v", err) } + defer func() { - if err := c.closeDbConn(); err != nil { - c.logger.Errorf("could not close db connection: %v", err) + if err2 := c.closeDbConn(); err2 != nil { + if err == nil { + err = fmt.Errorf("could not close database connection: %v", err2) + } else { + err = fmt.Errorf("could not close database connection: %v (prior error: %v)", err2, err) + } } }() diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 8f2c6c389..3d474e01c 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -372,7 +372,7 @@ func (c *Cluster) waitStatefulsetPodsReady() error { } // Returns labels used to create or list k8s objects such as pods -// For backward compatability, shouldAddExtraLabels must be false +// For backward compatibility, shouldAddExtraLabels must be false // when listing k8s objects. See operator PR #252 func (c *Cluster) labelsSet(shouldAddExtraLabels bool) labels.Set { lbls := make(map[string]string) @@ -390,7 +390,7 @@ func (c *Cluster) labelsSet(shouldAddExtraLabels bool) labels.Set { } func (c *Cluster) labelsSelector() *metav1.LabelSelector { - return &metav1.LabelSelector{c.labelsSet(false), nil} + return &metav1.LabelSelector{MatchLabels: c.labelsSet(false), MatchExpressions: nil} } func (c *Cluster) roleLabelsSet(role PostgresRole) labels.Set { diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index df2fa9d04..01a8e60da 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -151,9 +151,9 @@ func (c *Controller) initPodServiceAccount() { switch { case err != nil: - panic(fmt.Errorf("Unable to parse pod service account definiton from the operator config map: %v", err)) + panic(fmt.Errorf("Unable to parse pod service account definition from the operator config map: %v", err)) case groupVersionKind.Kind != "ServiceAccount": - panic(fmt.Errorf("pod service account definiton in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) + panic(fmt.Errorf("pod service account definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) default: c.PodServiceAccount = obj.(*v1.ServiceAccount) if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName { diff --git a/pkg/controller/postgresql_test.go b/pkg/controller/postgresql_test.go index 7fa7d842f..d5d5669af 100644 --- a/pkg/controller/postgresql_test.go +++ b/pkg/controller/postgresql_test.go @@ -7,8 +7,8 @@ import ( ) var ( - True bool = true - False bool = false + True = true + False = false ) func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) { diff --git a/pkg/controller/util.go b/pkg/controller/util.go index fceea3ba8..4479fe718 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -114,7 +114,7 @@ func readDecodedRole(s string) (*spec.PgUser, error) { return &result, nil } -func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (result map[string]spec.PgUser, err error) { +func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (map[string]spec.PgUser, error) { if *rolesSecret == (spec.NamespacedName{}) { // we don't have infrastructure roles defined, bail out return nil, nil @@ -129,7 +129,7 @@ func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (r } secretData := infraRolesSecret.Data - result = make(map[string]spec.PgUser) + result := make(map[string]spec.PgUser) Users: // in worst case we would have one line per user for i := 1; i <= len(secretData); i++ { @@ -171,22 +171,22 @@ Users: if infraRolesMap, err := c.KubeClient.ConfigMaps(rolesSecret.Namespace).Get(rolesSecret.Name, metav1.GetOptions{}); err == nil { // we have a configmap with username - json description, let's read and decode it for role, s := range infraRolesMap.Data { - if roleDescr, err := readDecodedRole(s); err != nil { + roleDescr, err := readDecodedRole(s) + if err != nil { return nil, fmt.Errorf("could not decode role description: %v", err) - } else { - // check if we have a a password in a configmap - c.logger.Debugf("found role description for role %q: %+v", role, roleDescr) - if passwd, ok := secretData[role]; ok { - roleDescr.Password = string(passwd) - delete(secretData, role) - } else { - c.logger.Warningf("infrastructure role %q has no password defined and is ignored", role) - continue - } - roleDescr.Name = role - roleDescr.Origin = spec.RoleOriginInfrastructure - result[role] = *roleDescr } + // check if we have a a password in a configmap + c.logger.Debugf("found role description for role %q: %+v", role, roleDescr) + if passwd, ok := secretData[role]; ok { + roleDescr.Password = string(passwd) + delete(secretData, role) + } else { + c.logger.Warningf("infrastructure role %q has no password defined and is ignored", role) + continue + } + roleDescr.Name = role + roleDescr.Origin = spec.RoleOriginInfrastructure + result[role] = *roleDescr } } diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go index a164da75a..61dbf6f8c 100644 --- a/pkg/spec/postgresql.go +++ b/pkg/spec/postgresql.go @@ -58,7 +58,7 @@ type Patroni struct { // CloneDescription describes which cluster the new should clone and up to which point in time type CloneDescription struct { ClusterName string `json:"cluster,omitempty"` - Uid string `json:"uid,omitempty"` + UID string `json:"uid,omitempty"` EndTimestamp string `json:"timestamp,omitempty"` } @@ -119,7 +119,7 @@ type PostgresSpec struct { EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"` EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"` - // deprecated load balancer settings mantained for backward compatibility + // deprecated load balancer settings maintained for backward compatibility // see "Load balancers" operator docs UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"` ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"` @@ -161,22 +161,22 @@ func (p *Postgresql) Clone() *Postgresql { return c } -func (in *Postgresql) DeepCopyInto(out *Postgresql) { - if in != nil { - out = deepcopy.Copy(in).(*Postgresql) +func (p *Postgresql) DeepCopyInto(out *Postgresql) { + if p != nil { + *out = deepcopy.Copy(*p).(Postgresql) } return } -func (in *Postgresql) DeepCopy() *Postgresql { - if in == nil { return nil } +func (p *Postgresql) DeepCopy() *Postgresql { + if p == nil { return nil } out := new(Postgresql) - in.DeepCopyInto(out) + p.DeepCopyInto(out) return out } -func (in *Postgresql) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { +func (p *Postgresql) DeepCopyObject() runtime.Object { + if c := p.DeepCopy(); c != nil { return c } return nil @@ -309,28 +309,6 @@ type postgresqlListCopy PostgresqlList type postgresqlCopy Postgresql -func (in *PostgresqlList) DeepCopy() *PostgresqlList { - if in == nil { return nil } - out := new(PostgresqlList) - in.DeepCopyInto(out) - return out -} - -func (in *PostgresqlList) DeepCopyInto(out *PostgresqlList) { - if in != nil { - out = deepcopy.Copy(in).(*PostgresqlList) - } - return -} - -func (in *PostgresqlList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - - // UnmarshalJSON converts a JSON into the PostgreSQL object. func (p *Postgresql) UnmarshalJSON(data []byte) error { var tmp postgresqlCopy @@ -380,6 +358,28 @@ func (pl *PostgresqlList) UnmarshalJSON(data []byte) error { return nil } +func (pl *PostgresqlList) DeepCopy() *PostgresqlList { + if pl == nil { return nil } + out := new(PostgresqlList) + pl.DeepCopyInto(out) + return out +} + +func (pl *PostgresqlList) DeepCopyInto(out *PostgresqlList) { + if pl != nil { + *out = deepcopy.Copy(*pl).(PostgresqlList) + } + return +} + +func (pl *PostgresqlList) DeepCopyObject() runtime.Object { + if c := pl.DeepCopy(); c != nil { + return c + } + return nil +} + + func (status PostgresStatus) Success() bool { return status != ClusterStatusAddFailed && status != ClusterStatusUpdateFailed && diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 37d01fc9a..fc0dfe237 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -278,5 +278,4 @@ func (d *Duration) UnmarshalJSON(b []byte) error { default: return fmt.Errorf("could not recognize type %T as a valid type to unmarshal to Duration", val) } - return nil } diff --git a/pkg/util/config/crd_config.go b/pkg/util/config/crd_config.go index 465c27637..b8b2e13c3 100644 --- a/pkg/util/config/crd_config.go +++ b/pkg/util/config/crd_config.go @@ -154,6 +154,27 @@ func (opc *OperatorConfiguration) UnmarshalJSON(data []byte) error { return nil } +func (opc *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { + if opc != nil { + *out = deepcopy.Copy(*opc).(OperatorConfiguration) + } + return +} + +func (opc *OperatorConfiguration) DeepCopy() *OperatorConfiguration { + if opc == nil { return nil } + out := new(OperatorConfiguration) + opc.DeepCopyInto(out) + return out +} + +func (opc *OperatorConfiguration) DeepCopyObject() runtime.Object { + if c := opc.DeepCopy(); c != nil { + return c + } + return nil +} + func (opcl *OperatorConfigurationList) UnmarshalJSON(data []byte) error { var ref OperatorConfigurationListCopy if err := json.Unmarshal(data, &ref); err != nil { @@ -163,43 +184,22 @@ func (opcl *OperatorConfigurationList) UnmarshalJSON(data []byte) error { return nil } -func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { - if in != nil { - out = deepcopy.Copy(in).(*OperatorConfiguration) +func (opcl *OperatorConfigurationList) DeepCopyInto(out *OperatorConfigurationList) { + if opcl != nil { + *out = deepcopy.Copy(*opcl).(OperatorConfigurationList) } return } -func (in *OperatorConfiguration) DeepCopy() *OperatorConfiguration { - if in == nil { return nil } - out := new(OperatorConfiguration) - in.DeepCopyInto(out) - return out -} - -func (in *OperatorConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -func (in *OperatorConfigurationList) DeepCopyInto(out *OperatorConfigurationList) { - if in != nil { - out = deepcopy.Copy(in).(*OperatorConfigurationList) - } - return -} - -func (in *OperatorConfigurationList) DeepCopy() *OperatorConfigurationList { - if in == nil { return nil } +func (opcl *OperatorConfigurationList) DeepCopy() *OperatorConfigurationList { + if opcl == nil { return nil } out := new(OperatorConfigurationList) - in.DeepCopyInto(out) + opcl.DeepCopyInto(out) return out } -func (in *OperatorConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { +func (opcl *OperatorConfigurationList) DeepCopyObject() runtime.Object { + if c := opcl.DeepCopy(); c != nil { return c } return nil diff --git a/pkg/util/patroni/patroni.go b/pkg/util/patroni/patroni.go index 28011c858..f2de75ed3 100644 --- a/pkg/util/patroni/patroni.go +++ b/pkg/util/patroni/patroni.go @@ -80,12 +80,10 @@ func (p *Patroni) Switchover(master *v1.Pod, candidate string) error { return fmt.Errorf("could not encode json: %v", err) } return p.httpPostOrPatch(http.MethodPost, apiURL(master)+failoverPath, buf) - - return nil } //TODO: add an option call /patroni to check if it is necessary to restart the server -// SetPostgresParameters sets Postgres options via Patroni patch API call. +//SetPostgresParameters sets Postgres options via Patroni patch API call. func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]string) error { buf := &bytes.Buffer{} err := json.NewEncoder(buf).Encode(map[string]map[string]interface{}{"postgresql": {"parameters": parameters}}) From 59f0c5551ecf858c320504d524fcdf92aea87295 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Fri, 3 Aug 2018 14:03:37 +0200 Subject: [PATCH 16/30] Allow configuring pod priority globally and per cluster. (#353) * Allow configuring pod priority globally and per cluster. Allow to specify pod priority class for all pods managed by the operator, as well as for those belonging to individual clusters. Controlled by the pod_priority_class_name operator configuration parameter and the podPriorityClassName manifest option. See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass for the explanation on how to define priority classes since Kubernetes 1.8. Some import order changes are due to go fmt. Removal of OrphanDependents deprecated field. Code review by @zerg-junior --- docs/reference/cluster_manifest.md | 9 ++++++- docs/reference/operator_parameters.md | 37 ++++++++++++++++++--------- pkg/cluster/cluster.go | 20 +++++++-------- pkg/cluster/exec.go | 8 +++--- pkg/cluster/k8sres.go | 26 +++++++++++-------- pkg/cluster/pod.go | 2 +- pkg/cluster/resources.go | 10 ++++---- pkg/cluster/sync.go | 3 +-- pkg/cluster/util.go | 6 ++--- pkg/cluster/volumes.go | 2 +- pkg/controller/controller.go | 4 +-- pkg/controller/node.go | 2 +- pkg/controller/node_test.go | 2 +- pkg/controller/operator_config.go | 1 + pkg/controller/pod.go | 2 +- pkg/controller/util.go | 2 +- pkg/controller/util_test.go | 2 +- pkg/spec/postgresql.go | 20 +++++++-------- pkg/spec/types.go | 4 +-- pkg/util/config/config.go | 1 + pkg/util/config/crd_config.go | 1 + 21 files changed, 96 insertions(+), 68 deletions(-) diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index b046f0493..b26fc3661 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -89,7 +89,14 @@ Those are parameters grouped directly under the `spec` key in the manifest. examples](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) for details on tolerations and possible values of those keys. When set, this value overrides the `pod_toleration` setting from the operator. Optional. - + +* **podPriorityClassName** + a name of the [priority + class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) + that should be assigned to the cluster pods. When not specified, the value + is taken from the `pod_priority_class_name` operator parameter, if not set + then the default priority class is taken. The priority class itself must be defined in advance. + ## Postgres parameters Those parameters are grouped under the `postgresql` top-level key. diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 06dabadec..d14d3d9d7 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -116,10 +116,15 @@ configuration they are grouped under the `kubernetes` key. option. If not defined, a simple definition that contains only the name will be used. The default is empty. * **pod_service_account_role_binding_definition** - This definition must bind pod service account to a role with permission sufficient for the pods to start and for Patroni to access k8s endpoints; service account on its own lacks any such rights starting with k8s v1.8. If not excplicitly defined by the user, a simple definition that binds the account to the operator's own 'zalando-postgres-operator' cluster role will be used. The default is empty. + This definition must bind pod service account to a role with permission + sufficient for the pods to start and for Patroni to access k8s endpoints; + service account on its own lacks any such rights starting with k8s v1.8. If + not excplicitly defined by the user, a simple definition that binds the + account to the operator's own 'zalando-postgres-operator' cluster role will + be used. The default is empty. * **pod_terminate_grace_period** - Patroni pods are [terminated + Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) after this timeout. The default is `5m`. @@ -151,7 +156,7 @@ configuration they are grouped under the `kubernetes` key. name of the secret containing infrastructure roles names and passwords. * **pod_role_label** - name of the label assigned to the postgres pods (and services/endpoints) by + name of the label assigned to the Postgres pods (and services/endpoints) by the operator. The default is `spilo-role`. * **cluster_labels** @@ -168,7 +173,7 @@ configuration they are grouped under the `kubernetes` key. considered `ready`. The operator uses values of those labels to detect the start of the Kubernetes cluster upgrade procedure and move master pods off the nodes to be decommissioned. When the set is not empty, the operator also - assigns the `Affinity` clause to the postgres pods to be scheduled only on + assigns the `Affinity` clause to the Postgres pods to be scheduled only on `ready` nodes. The default is empty. * **toleration** @@ -184,6 +189,13 @@ configuration they are grouped under the `kubernetes` key. All variables from that ConfigMap are injected to the pod's environment, on conflicts they are overridden by the environment variables generated by the operator. The default is empty. + +* **pod_priority_class_name** + a name of the [priority + class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) + that should be assigned to the Postgres pods. The priority class itself must be defined in advance. + Default is empty (use the default priority class). + ## Kubernetes resource requests @@ -231,8 +243,8 @@ CRD-based configuration. possible issues faster. The default is `10m`. * **pod_deletion_wait_timeout** - timeout when waiting for the pods to be deleted when removing the cluster or - recreating pods. The default is `10m`. + timeout when waiting for the Postgres pods to be deleted when removing the + cluster or recreating pods. The default is `10m`. * **ready_wait_interval** the interval between consecutive attempts waiting for the postgres CRD to be @@ -285,18 +297,19 @@ either. In the CRD-based configuration those options are grouped under the * **wal_s3_bucket** S3 bucket to use for shipping WAL segments with WAL-E. A bucket has to be - present and accessible by Patroni managed pods. At the moment, supported - services by Spilo are S3 and GCS. The default is empty. + present and accessible by Postgres pods. At the moment, supported services by + Spilo are S3 and GCS. The default is empty. * **log_s3_bucket** S3 bucket to use for shipping postgres daily logs. Works only with S3 on AWS. - The bucket has to be present and accessible by Patroni managed pods. At the - moment Spilo does not yet support this. The default is empty. + The bucket has to be present and accessible by Postgres pods. At the moment + Spilo does not yet support this. The default is empty. * **kube_iam_role** - AWS IAM role to supply in the `iam.amazonaws.com/role` annotation of Patroni + AWS IAM role to supply in the `iam.amazonaws.com/role` annotation of Postgres pods. Only used when combined with - [kube2iam](https://github.com/jtblin/kube2iam) project on AWS. The default is empty. + [kube2iam](https://github.com/jtblin/kube2iam) project on AWS. The default is + empty. * **aws_region** AWS region used to store ESB volumes. The default is `eu-central-1`. diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index c2af3f9c8..9ac43e513 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -12,11 +12,11 @@ import ( "time" "github.com/Sirupsen/logrus" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/api/core/v1" - "k8s.io/api/apps/v1beta1" - policybeta1 "k8s.io/api/policy/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -91,7 +91,7 @@ type compareStatefulsetResult struct { // New creates a new cluster. This function should be called from a controller. func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec spec.Postgresql, logger *logrus.Entry) *Cluster { - orphanDependents := true + deletePropagationPolicy := metav1.DeletePropagationOrphan podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) { e, ok := obj.(spec.PodEvent) @@ -113,7 +113,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec spec.Postgresql Services: make(map[PostgresRole]*v1.Service), Endpoints: make(map[PostgresRole]*v1.Endpoints)}, userSyncStrategy: users.DefaultUserSyncStrategy{}, - deleteOptions: &metav1.DeleteOptions{OrphanDependents: &orphanDependents}, + deleteOptions: &metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}, podEventsQueue: podEventsQueue, KubeClient: kubeClient, } @@ -601,7 +601,7 @@ func (c *Cluster) Delete() { } for _, obj := range c.Secrets { - if delete, user := c.shouldDeleteSecret(obj); !delete { + if doDelete, user := c.shouldDeleteSecret(obj); !doDelete { c.logger.Warningf("not removing secret %q for the system user %q", obj.GetName(), user) continue } @@ -951,11 +951,11 @@ func (c *Cluster) deletePatroniClusterEndpoints() error { return util.NameFromMeta(ep.ObjectMeta), err } - delete := func(name string) error { + deleteEndpointFn := func(name string) error { return c.KubeClient.Endpoints(c.Namespace).Delete(name, c.deleteOptions) } - return c.deleteClusterObject(get, delete, "endpoint") + return c.deleteClusterObject(get, deleteEndpointFn, "endpoint") } func (c *Cluster) deletePatroniClusterConfigMaps() error { @@ -964,9 +964,9 @@ func (c *Cluster) deletePatroniClusterConfigMaps() error { return util.NameFromMeta(cm.ObjectMeta), err } - delete := func(name string) error { + deleteConfigMapFn := func(name string) error { return c.KubeClient.ConfigMaps(c.Namespace).Delete(name, c.deleteOptions) } - return c.deleteClusterObject(get, delete, "configmap") + return c.deleteClusterObject(get, deleteConfigMapFn, "configmap") } diff --git a/pkg/cluster/exec.go b/pkg/cluster/exec.go index 36d8a884c..ef1ad0795 100644 --- a/pkg/cluster/exec.go +++ b/pkg/cluster/exec.go @@ -5,9 +5,9 @@ import ( "fmt" "strings" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/api/core/v1" "k8s.io/client-go/tools/remotecommand" "github.com/zalando-incubator/postgres-operator/pkg/spec" @@ -59,9 +59,9 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) ( } err = exec.Stream(remotecommand.StreamOptions{ - Stdout: &execOut, - Stderr: &execErr, - Tty: false, + Stdout: &execOut, + Stderr: &execErr, + Tty: false, }) if err != nil { diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 917be763f..e34c81138 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -6,6 +6,7 @@ import ( "sort" "github.com/Sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -15,6 +16,7 @@ import ( policybeta1 "k8s.io/api/policy/v1beta1" "github.com/zalando-incubator/postgres-operator/pkg/spec" + "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" "k8s.io/apimachinery/pkg/labels" ) @@ -399,6 +401,7 @@ func generatePodTemplate( terminateGracePeriod int64, podServiceAccountName string, kubeIAMRole string, + priorityClassName string, ) (*v1.PodTemplateSpec, error) { terminateGracePeriodSeconds := terminateGracePeriod @@ -416,6 +419,10 @@ func generatePodTemplate( podSpec.Affinity = nodeAffinity } + if priorityClassName != "" { + podSpec.PriorityClassName = priorityClassName + } + template := v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, @@ -662,7 +669,7 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu c.containerName(), c.logger) // pickup the docker image for the spilo container - effectiveDockerImage := getEffectiveDockerImage(c.OpConfig.DockerImage, spec.DockerImage) + effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage) volumeMounts := generateVolumeMounts() @@ -696,6 +703,7 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu } tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) + effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) // generate pod template for the statefulset, based on the spilo container and sidecards if podTemplate, err = generatePodTemplate( @@ -707,8 +715,13 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu nodeAffinity(c.OpConfig.NodeReadinessLabel), int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), c.OpConfig.PodServiceAccountName, - c.OpConfig.KubeIAMRole); err != nil { - return nil, fmt.Errorf("could not generate pod template: %v", err) + c.OpConfig.KubeIAMRole, + effectivePodPriorityClassName); err != nil{ + return nil, fmt.Errorf("could not generate pod template: %v", err) + } + + if err != nil { + return nil, fmt.Errorf("could not generate pod template: %v", err) } if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size, @@ -737,13 +750,6 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu return statefulSet, nil } -func getEffectiveDockerImage(globalDockerImage, clusterDockerImage string) string { - if clusterDockerImage == "" { - return globalDockerImage - } - return clusterDockerImage -} - func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string, containerResources *spec.Resources, logger *logrus.Entry) *spec.Sidecar { if APIKey == "" || dockerImage == "" { diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index 8d15e6a9a..de6770faa 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -4,8 +4,8 @@ import ( "fmt" "math/rand" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 6aab9d3fc..f15c4f40a 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -5,11 +5,11 @@ import ( "strconv" "strings" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/api/core/v1" - "k8s.io/api/apps/v1beta1" - policybeta1 "k8s.io/api/policy/v1beta1" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" @@ -272,10 +272,10 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *v1beta1.StatefulSet) error c.logger.Debugf("replacing statefulset") // Delete the current statefulset without deleting the pods - orphanDepencies := true + deletePropagationPolicy := metav1.DeletePropagationOrphan oldStatefulset := c.Statefulset - options := metav1.DeleteOptions{OrphanDependents: &orphanDepencies} + options := metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy} if err := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Delete(oldStatefulset.Name, &options); err != nil { return fmt.Errorf("could not delete statefulset %q: %v", statefulSetName, err) } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index d487695a5..3b5c3b3d3 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -6,7 +6,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" policybeta1 "k8s.io/api/policy/v1beta1" - "k8s.io/api/policy/v1beta1" "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" @@ -188,7 +187,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { var ( - pdb *v1beta1.PodDisruptionBudget + pdb *policybeta1.PodDisruptionBudget err error ) if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 3d474e01c..e38b8ef73 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -11,11 +11,11 @@ import ( "strings" "time" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/api/core/v1" - "k8s.io/api/apps/v1beta1" - policybeta1 "k8s.io/api/policy/v1beta1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index 71f35cf05..c44ac4a03 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -5,9 +5,9 @@ import ( "strconv" "strings" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 01a8e60da..484485b1b 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -6,11 +6,11 @@ import ( "sync" "github.com/Sirupsen/logrus" + "k8s.io/api/core/v1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/api/core/v1" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/client-go/tools/cache" "github.com/zalando-incubator/postgres-operator/pkg/apiserver" diff --git a/pkg/controller/node.go b/pkg/controller/node.go index c18f6f0ad..dc919c450 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -1,11 +1,11 @@ package controller import ( + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/cluster" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/controller/node_test.go b/pkg/controller/node_test.go index 6cb4d2b0d..06e20093b 100644 --- a/pkg/controller/node_test.go +++ b/pkg/controller/node_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/zalando-incubator/postgres-operator/pkg/spec" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 1b7318d1e..bdac26a19 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -62,6 +62,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *config.OperatorConfigur result.ClusterLabels = fromCRD.Kubernetes.ClusterLabels result.ClusterNameLabel = fromCRD.Kubernetes.ClusterNameLabel result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel + result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest diff --git a/pkg/controller/pod.go b/pkg/controller/pod.go index 0d16523fc..c801f6b21 100644 --- a/pkg/controller/pod.go +++ b/pkg/controller/pod.go @@ -1,10 +1,10 @@ package controller import ( + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 4479fe718..46c369fdd 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -3,10 +3,10 @@ package controller import ( "fmt" + "k8s.io/api/core/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/cluster" "github.com/zalando-incubator/postgres-operator/pkg/spec" diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index c29240566..58253633b 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -6,9 +6,9 @@ import ( "testing" b64 "encoding/base64" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/api/core/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go index 61dbf6f8c..d2ce1efa2 100644 --- a/pkg/spec/postgresql.go +++ b/pkg/spec/postgresql.go @@ -8,8 +8,8 @@ import ( "strings" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -127,14 +127,15 @@ type PostgresSpec struct { // load balancers' source ranges are the same for master and replica services AllowedSourceRanges []string `json:"allowedSourceRanges"` - NumberOfInstances int32 `json:"numberOfInstances"` - Users map[string]UserFlags `json:"users"` - MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` - Clone CloneDescription `json:"clone"` - ClusterName string `json:"-"` - Databases map[string]string `json:"databases,omitempty"` - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - Sidecars []Sidecar `json:"sidecars,omitempty"` + NumberOfInstances int32 `json:"numberOfInstances"` + Users map[string]UserFlags `json:"users"` + MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` + Clone CloneDescription `json:"clone"` + ClusterName string `json:"-"` + Databases map[string]string `json:"databases,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + Sidecars []Sidecar `json:"sidecars,omitempty"` + PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` } // PostgresqlList defines a list of PostgreSQL clusters. @@ -182,7 +183,6 @@ func (p *Postgresql) DeepCopyObject() runtime.Object { return nil } - func parseTime(s string) (time.Time, error) { parts := strings.Split(s, ":") if len(parts) != 2 { diff --git a/pkg/spec/types.go b/pkg/spec/types.go index fc0dfe237..7474ca952 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -11,10 +11,10 @@ import ( "time" "github.com/Sirupsen/logrus" - "k8s.io/apimachinery/pkg/types" - "k8s.io/api/core/v1" "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" ) diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 683f98a17..34d282a96 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -25,6 +25,7 @@ type Resources struct { PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"` PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"` PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` + PodPriorityClassName string `name:"pod_priority_class_name"` ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"` PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` diff --git a/pkg/util/config/crd_config.go b/pkg/util/config/crd_config.go index b8b2e13c3..10817f1e3 100644 --- a/pkg/util/config/crd_config.go +++ b/pkg/util/config/crd_config.go @@ -49,6 +49,7 @@ type KubernetesMetaConfiguration struct { PodToleration map[string]string `json:"toleration,omitempty"` // TODO: use namespacedname PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"` + PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` } type PostgresPodResourcesDefaults struct { From 50f079c6330ae6e151cb6b3397a4be1d8371a8cf Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Mon, 6 Aug 2018 08:59:00 +0200 Subject: [PATCH 17/30] [WIP] Draft codeowners, update maintainers (#358) * Draft codeowners, update maintainers * Minor reformatting --- CODEOWNERS | 2 ++ MAINTAINERS | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 CODEOWNERS diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..c7016c764 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,2 @@ +# global owners +* @alexeyklyukin @erthalion @zerg-junior diff --git a/MAINTAINERS b/MAINTAINERS index c713d6570..4f4ca87ba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,2 +1,3 @@ -Murat Kabilov Oleksii Kliukin +Dmitrii Dolgov +Sergey Dudoladov From b06186eb417d3dbada4d52ce878a5dba0477d3d8 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Mon, 6 Aug 2018 12:09:19 +0200 Subject: [PATCH 18/30] Linter-induced code refactoring, run round 2. (#360) Run more linters in the gometalinter, i.e. deadcode, megacheck, nakedret, dup. More consistent code formatting, remove two dead functions, eliminate naked a bunch of naked returns, refactor a few functions to avoid code duplication. --- pkg/apiserver/apiserver.go | 12 ++++----- pkg/cluster/cluster.go | 20 +++++++------- pkg/cluster/k8sres.go | 28 ++++++++++---------- pkg/cluster/pg.go | 24 ++++++++--------- pkg/cluster/pod.go | 7 +++-- pkg/cluster/resources.go | 24 ----------------- pkg/cluster/sync.go | 50 +++++++++++++++-------------------- pkg/controller/controller.go | 6 ++--- pkg/controller/pod.go | 45 +++++++++++-------------------- pkg/controller/postgresql.go | 6 +++-- pkg/spec/postgresql.go | 16 ++++++----- pkg/util/config/crd_config.go | 17 ++++++------ pkg/util/patroni/patroni.go | 14 ++++++++-- pkg/util/teams/teams.go | 29 +++++++------------- pkg/util/users/users.go | 38 ++++++++++++-------------- 15 files changed, 144 insertions(+), 192 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 82eb7ba9c..ac26a9114 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -157,7 +157,7 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) { ) if matches := util.FindNamedStringSubmatch(clusterStatusURL, req.URL.Path); matches != nil { - namespace, _ := matches["namespace"] + namespace := matches["namespace"] resp, err = s.controller.ClusterStatus(matches["team"], namespace, matches["cluster"]) } else if matches := util.FindNamedStringSubmatch(teamURL, req.URL.Path); matches != nil { teamClusters := s.controller.TeamClusterList() @@ -174,10 +174,10 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) { resp, err = clusterNames, nil } else if matches := util.FindNamedStringSubmatch(clusterLogsURL, req.URL.Path); matches != nil { - namespace, _ := matches["namespace"] + namespace := matches["namespace"] resp, err = s.controller.ClusterLogs(matches["team"], namespace, matches["cluster"]) } else if matches := util.FindNamedStringSubmatch(clusterHistoryURL, req.URL.Path); matches != nil { - namespace, _ := matches["namespace"] + namespace := matches["namespace"] resp, err = s.controller.ClusterHistory(matches["team"], namespace, matches["cluster"]) } else if req.URL.Path == clustersURL { clusterNamesPerTeam := make(map[string][]string) @@ -194,8 +194,8 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) { s.respond(resp, err, w) } -func mustConvertToUint32(s string) uint32{ - result, err := strconv.Atoi(s); +func mustConvertToUint32(s string) uint32 { + result, err := strconv.Atoi(s) if err != nil { panic(fmt.Errorf("mustConvertToUint32 called for %s: %v", s, err)) } @@ -244,8 +244,6 @@ func (s *Server) databases(w http.ResponseWriter, req *http.Request) { databaseNamesPerCluster := s.controller.ClusterDatabasesMap() s.respond(databaseNamesPerCluster, nil, w) - return - } func (s *Server) allQueues(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 9ac43e513..8414da8d1 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -450,8 +450,8 @@ func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []st return needsRollUpdate, reasons } -func compareResources(a *v1.ResourceRequirements, b *v1.ResourceRequirements) (equal bool) { - equal = true +func compareResources(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool { + equal := true if a != nil { equal = compareResoucesAssumeFirstNotNil(a, b) } @@ -459,7 +459,7 @@ func compareResources(a *v1.ResourceRequirements, b *v1.ResourceRequirements) (e equal = compareResoucesAssumeFirstNotNil(b, a) } - return + return equal } func compareResoucesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool { @@ -786,7 +786,8 @@ func (c *Cluster) initInfrastructureRoles() error { } // resolves naming conflicts between existing and new roles by chosing either of them. -func (c *Cluster) resolveNameConflict(currentRole, newRole *spec.PgUser) (result spec.PgUser) { +func (c *Cluster) resolveNameConflict(currentRole, newRole *spec.PgUser) spec.PgUser { + var result spec.PgUser if newRole.Origin >= currentRole.Origin { result = *newRole } else { @@ -794,7 +795,7 @@ func (c *Cluster) resolveNameConflict(currentRole, newRole *spec.PgUser) (result } c.logger.Debugf("resolved a conflict of role %q between %s and %s to %s", newRole.Name, newRole.Origin, currentRole.Origin, result.Origin) - return + return result } func (c *Cluster) shouldAvoidProtectedOrSystemRole(username, purpose string) bool { @@ -838,8 +839,9 @@ func (c *Cluster) GetStatus() *spec.ClusterStatus { } // Switchover does a switchover (via Patroni) to a candidate pod -func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) (err error) { +func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) error { + var err error c.logger.Debugf("failing over from %q to %q", curMaster.Name, candidate) var wg sync.WaitGroup @@ -858,8 +860,8 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) ( select { case <-stopCh: - case podLabelErr <- func() (err error) { - _, err = c.waitForPodLabel(ch, stopCh, &role) + case podLabelErr <- func() (err2 error) { + _, err2 = c.waitForPodLabel(ch, stopCh, &role) return }(): } @@ -882,7 +884,7 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) ( // close the label waiting channel no sooner than the waiting goroutine terminates. close(podLabelErr) - return + return err } diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index e34c81138..a71fa5e35 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -7,13 +7,13 @@ import ( "github.com/Sirupsen/logrus" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/api/core/v1" - "k8s.io/api/apps/v1beta1" - policybeta1 "k8s.io/api/policy/v1beta1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" @@ -90,7 +90,7 @@ func (c *Cluster) makeDefaultResources() spec.Resources { defaultRequests := spec.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest} defaultLimits := spec.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit} - return spec.Resources{ResourceRequest:defaultRequests, ResourceLimits:defaultLimits} + return spec.Resources{ResourceRequest: defaultRequests, ResourceLimits: defaultLimits} } func generateResourceRequirements(resources spec.Resources, defaultResources spec.Resources) (*v1.ResourceRequirements, error) { @@ -366,7 +366,7 @@ func generateSidecarContainers(sidecars []spec.Sidecar, volumeMounts []v1.VolumeMount, defaultResources spec.Resources, superUserName string, credentialsSecretName string, logger *logrus.Entry) ([]v1.Container, error) { - if sidecars != nil && len(sidecars) > 0 { + if len(sidecars) > 0 { result := make([]v1.Container, 0) for index, sidecar := range sidecars { @@ -699,7 +699,7 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu // generate sidecar containers if sidecarContainers, err = generateSidecarContainers(sideCars, volumeMounts, defaultResources, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger); err != nil { - return nil, fmt.Errorf("could not generate sidecar containers: %v", err) + return nil, fmt.Errorf("could not generate sidecar containers: %v", err) } tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) @@ -716,7 +716,7 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), c.OpConfig.PodServiceAccountName, c.OpConfig.KubeIAMRole, - effectivePodPriorityClassName); err != nil{ + effectivePodPriorityClassName); err != nil { return nil, fmt.Errorf("could not generate pod template: %v", err) } @@ -726,7 +726,7 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size, spec.Volume.StorageClass); err != nil { - return nil, fmt.Errorf("could not generate volume claim template: %v", err) + return nil, fmt.Errorf("could not generate volume claim template: %v", err) } numberOfInstances := c.getNumberOfInstances(spec) @@ -804,11 +804,11 @@ func (c *Cluster) mergeSidecars(sidecars []spec.Sidecar) []spec.Sidecar { return result } -func (c *Cluster) getNumberOfInstances(spec *spec.PostgresSpec) (newcur int32) { +func (c *Cluster) getNumberOfInstances(spec *spec.PostgresSpec) int32 { min := c.OpConfig.MinInstances max := c.OpConfig.MaxInstances cur := spec.NumberOfInstances - newcur = cur + newcur := cur if max >= 0 && newcur > max { newcur = max @@ -820,7 +820,7 @@ func (c *Cluster) getNumberOfInstances(spec *spec.PostgresSpec) (newcur int32) { c.logger.Infof("adjusted number of instances from %d to %d (min: %d, max: %d)", cur, newcur, min, max) } - return + return newcur } func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) { @@ -860,8 +860,8 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string return volumeClaim, nil } -func (c *Cluster) generateUserSecrets() (secrets map[string]*v1.Secret) { - secrets = make(map[string]*v1.Secret, len(c.pgUsers)) +func (c *Cluster) generateUserSecrets() map[string]*v1.Secret { + secrets := make(map[string]*v1.Secret, len(c.pgUsers)) namespace := c.Namespace for username, pgUser := range c.pgUsers { //Skip users with no password i.e. human users (they'll be authenticated using pam) @@ -878,7 +878,7 @@ func (c *Cluster) generateUserSecrets() (secrets map[string]*v1.Secret) { } } - return + return secrets } func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) *v1.Secret { diff --git a/pkg/cluster/pg.go b/pkg/cluster/pg.go index f570ac81c..9441a4933 100644 --- a/pkg/cluster/pg.go +++ b/pkg/cluster/pg.go @@ -183,32 +183,30 @@ func (c *Cluster) getDatabases() (dbs map[string]string, err error) { dbs[datname] = owner } - return + return dbs, err } // executeCreateDatabase creates new database with the given owner. // The caller is responsible for openinging and closing the database connection. func (c *Cluster) executeCreateDatabase(datname, owner string) error { - if !c.databaseNameOwnerValid(datname, owner) { - return nil - } - c.logger.Infof("creating database %q with owner %q", datname, owner) - - if _, err := c.pgDb.Exec(fmt.Sprintf(createDatabaseSQL, datname, owner)); err != nil { - return fmt.Errorf("could not execute create database: %v", err) - } - return nil + return c.execCreateOrAlterDatabase(datname, owner, createDatabaseSQL, + "creating database", "create database") } // executeCreateDatabase changes the owner of the given database. // The caller is responsible for openinging and closing the database connection. func (c *Cluster) executeAlterDatabaseOwner(datname string, owner string) error { + return c.execCreateOrAlterDatabase(datname, owner, alterDatabaseOwnerSQL, + "changing owner for database", "alter database owner") +} + +func (c *Cluster) execCreateOrAlterDatabase(datname, owner, statement, doing, operation string) error { if !c.databaseNameOwnerValid(datname, owner) { return nil } - c.logger.Infof("changing database %q owner to %q", datname, owner) - if _, err := c.pgDb.Exec(fmt.Sprintf(alterDatabaseOwnerSQL, datname, owner)); err != nil { - return fmt.Errorf("could not execute alter database owner: %v", err) + c.logger.Infof("%s %q owner %q", doing, datname, owner) + if _, err := c.pgDb.Exec(fmt.Sprintf(statement, datname, owner)); err != nil { + return fmt.Errorf("could not execute %s: %v", operation, err) } return nil } diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index de6770faa..beb433fa0 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -183,8 +183,8 @@ func (c *Cluster) masterCandidate(oldNodeName string) (*v1.Pod, error) { func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { var ( masterCandidatePod *v1.Pod - err error - eol bool + err error + eol bool ) oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{}) @@ -212,7 +212,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { var sset *v1beta1.StatefulSet if sset, err = c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), metav1.GetOptions{}); err != nil { - return fmt.Errorf("could not retrieve cluster statefulset: %v", err) + return fmt.Errorf("could not retrieve cluster statefulset: %v", err) } c.Statefulset = sset } @@ -225,7 +225,6 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { c.logger.Warningf("single master pod for cluster %q, migration will cause longer downtime of the master instance", c.clusterName()) } - // there are two cases for each postgres cluster that has its master pod on the node to migrate from: // - the cluster has some replicas - migrate one of those if necessary and failover to it // - there are no replicas - just terminate the master and wait until it respawns diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index f15c4f40a..764dc22e5 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -633,27 +633,3 @@ func (c *Cluster) GetStatefulSet() *v1beta1.StatefulSet { func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget { return c.PodDisruptionBudget } - -func (c *Cluster) createDatabases() error { - c.setProcessName("creating databases") - - if len(c.Spec.Databases) == 0 { - return nil - } - - if err := c.initDbConn(); err != nil { - return fmt.Errorf("could not init database connection") - } - defer func() { - if err := c.closeDbConn(); err != nil { - c.logger.Errorf("could not close database connection: %v", err) - } - }() - - for datname, owner := range c.Spec.Databases { - if err := c.executeCreateDatabase(datname, owner); err != nil { - return err - } - } - return nil -} diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 3b5c3b3d3..ad42eeac5 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -2,11 +2,9 @@ package cluster import ( "fmt" - "reflect" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - policybeta1 "k8s.io/api/policy/v1beta1" "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" @@ -17,7 +15,8 @@ import ( // Sync syncs the cluster, making sure the actual Kubernetes objects correspond to what is defined in the manifest. // Unlike the update, sync does not error out if some objects do not exist and takes care of creating them. -func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) { +func (c *Cluster) Sync(newSpec *spec.Postgresql) error { + var err error c.mu.Lock() defer c.mu.Unlock() @@ -34,7 +33,7 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) { if err = c.initUsers(); err != nil { err = fmt.Errorf("could not init users: %v", err) - return + return err } c.logger.Debugf("syncing secrets") @@ -42,13 +41,13 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) { //TODO: mind the secrets of the deleted/new users if err = c.syncSecrets(); err != nil { err = fmt.Errorf("could not sync secrets: %v", err) - return + return err } c.logger.Debugf("syncing services") if err = c.syncServices(); err != nil { err = fmt.Errorf("could not sync services: %v", err) - return + return err } // potentially enlarge volumes before changing the statefulset. By doing that @@ -60,14 +59,14 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) { c.logger.Debugf("syncing persistent volumes") if err = c.syncVolumes(); err != nil { err = fmt.Errorf("could not sync persistent volumes: %v", err) - return + return err } c.logger.Debugf("syncing statefulsets") if err = c.syncStatefulSet(); err != nil { if !k8sutil.ResourceAlreadyExists(err) { err = fmt.Errorf("could not sync statefulsets: %v", err) - return + return err } } @@ -76,22 +75,22 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) { c.logger.Debugf("syncing roles") if err = c.syncRoles(); err != nil { err = fmt.Errorf("could not sync roles: %v", err) - return + return err } c.logger.Debugf("syncing databases") if err = c.syncDatabases(); err != nil { err = fmt.Errorf("could not sync databases: %v", err) - return + return err } } c.logger.Debug("syncing pod disruption budgets") if err = c.syncPodDisruptionBudget(false); err != nil { err = fmt.Errorf("could not sync pod disruption budget: %v", err) - return + return err } - return + return err } func (c *Cluster) syncServices() error { @@ -153,7 +152,7 @@ func (c *Cluster) syncService(role PostgresRole) error { func (c *Cluster) syncEndpoint(role PostgresRole) error { var ( - ep *v1.Endpoints + ep *v1.Endpoints err error ) c.setProcessName("syncing %s endpoint", role) @@ -187,8 +186,8 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { var ( - pdb *policybeta1.PodDisruptionBudget - err error + pdb *policybeta1.PodDisruptionBudget + err error ) if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { c.PodDisruptionBudget = pdb @@ -257,7 +256,9 @@ func (c *Cluster) syncStatefulSet() error { podsRollingUpdateRequired = (len(pods) > 0) if podsRollingUpdateRequired { c.logger.Warningf("found pods from the previous statefulset: trigger rolling update") - c.applyRollingUpdateFlagforStatefulSet(podsRollingUpdateRequired) + if err := c.applyRollingUpdateFlagforStatefulSet(podsRollingUpdateRequired); err != nil { + return fmt.Errorf("could not set rolling update flag for the statefulset: %v", err) + } } c.logger.Infof("created missing statefulset %q", util.NameFromMeta(sset.ObjectMeta)) @@ -318,7 +319,7 @@ func (c *Cluster) syncStatefulSet() error { // (like max_connections) has changed and if necessary sets it via the Patroni API func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() error { var ( - err error + err error pods []v1.Pod ) @@ -394,7 +395,7 @@ func (c *Cluster) syncSecrets() error { // if this secret belongs to the infrastructure role and the password has changed - replace it in the secret if pwdUser.Password != string(secret.Data["password"]) && pwdUser.Origin == spec.RoleOriginInfrastructure { c.logger.Debugf("updating the secret %q from the infrastructure roles", secretSpec.Name) - if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(secretSpec); err != nil { + if _, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(secretSpec); err != nil { return fmt.Errorf("could not update infrastructure role secret for role %q: %v", secretUsername, err) } } else { @@ -469,15 +470,6 @@ func (c *Cluster) syncVolumes() error { return nil } -func (c *Cluster) samePDBWith(pdb *policybeta1.PodDisruptionBudget) (match bool, reason string) { - match = reflect.DeepEqual(pdb.Spec, c.PodDisruptionBudget.Spec) - if !match { - reason = "new service spec doesn't match the current one" - } - - return -} - func (c *Cluster) syncDatabases() error { c.setProcessName("syncing databases") diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 484485b1b..e48375dfd 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -169,7 +169,7 @@ func (c *Controller) initPodServiceAccount() { func (c *Controller) initRoleBinding() { // service account on its own lacks any rights starting with k8s v1.8 - // operator binds it to the cluster role with sufficient priviliges + // operator binds it to the cluster role with sufficient privileges // we assume the role is created by the k8s administrator if c.opConfig.PodServiceAccountRoleBindingDefinition == "" { c.opConfig.PodServiceAccountRoleBindingDefinition = ` @@ -199,9 +199,9 @@ func (c *Controller) initRoleBinding() { switch { case err != nil: - panic(fmt.Errorf("Unable to parse the definiton of the role binding for the pod service account definiton from the operator config map: %v", err)) + panic(fmt.Errorf("Unable to parse the definition of the role binding for the pod service account definition from the operator config map: %v", err)) case groupVersionKind.Kind != "RoleBinding": - panic(fmt.Errorf("role binding definiton in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) + panic(fmt.Errorf("role binding definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind)) default: c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding) c.PodServiceAccountRoleBinding.Namespace = "" diff --git a/pkg/controller/pod.go b/pkg/controller/pod.go index c801f6b21..d3634ff27 100644 --- a/pkg/controller/pod.go +++ b/pkg/controller/pod.go @@ -40,19 +40,9 @@ func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event spe } func (c *Controller) podAdd(obj interface{}) { - pod, ok := obj.(*v1.Pod) - if !ok { - return + if pod, ok := obj.(*v1.Pod); ok { + c.preparePodEventForDispatch(pod, nil, spec.EventAdd) } - - podEvent := spec.PodEvent{ - PodName: util.NameFromMeta(pod.ObjectMeta), - CurPod: pod, - EventType: spec.EventAdd, - ResourceVersion: pod.ResourceVersion, - } - - c.dispatchPodEvent(c.podClusterName(pod), podEvent) } func (c *Controller) podUpdate(prev, cur interface{}) { @@ -66,29 +56,24 @@ func (c *Controller) podUpdate(prev, cur interface{}) { return } + c.preparePodEventForDispatch(curPod, prevPod, spec.EventUpdate) +} + +func (c *Controller) podDelete(obj interface{}) { + + if pod, ok := obj.(*v1.Pod); ok { + c.preparePodEventForDispatch(pod, nil, spec.EventDelete) + } +} + +func (c *Controller) preparePodEventForDispatch(curPod, prevPod *v1.Pod, event spec.EventType) { podEvent := spec.PodEvent{ PodName: util.NameFromMeta(curPod.ObjectMeta), - PrevPod: prevPod, CurPod: curPod, - EventType: spec.EventUpdate, + PrevPod: prevPod, + EventType: event, ResourceVersion: curPod.ResourceVersion, } c.dispatchPodEvent(c.podClusterName(curPod), podEvent) } - -func (c *Controller) podDelete(obj interface{}) { - pod, ok := obj.(*v1.Pod) - if !ok { - return - } - - podEvent := spec.PodEvent{ - PodName: util.NameFromMeta(pod.ObjectMeta), - CurPod: pod, - EventType: spec.EventDelete, - ResourceVersion: pod.ResourceVersion, - } - - c.dispatchPodEvent(c.podClusterName(pod), podEvent) -} diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index f4d56ad6d..58a7afcb2 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -10,6 +10,7 @@ import ( "time" "github.com/Sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -130,7 +131,9 @@ type crdDecoder struct { } func (d *crdDecoder) Close() { - d.close() + if err := d.close(); err != nil { + fmt.Printf("error when closing CRDDecorer: %v\n", err) + } } func (d *crdDecoder) Decode() (action watch.EventType, object runtime.Object, err error) { @@ -526,7 +529,6 @@ func (c *Controller) submitRBACCredentials(event spec.ClusterEvent) error { if err := c.createRoleBindings(namespace); err != nil { return fmt.Errorf("could not create role binding %v : %v", c.PodServiceAccountRoleBinding.Name, err) } - c.namespacesWithDefinedRBAC.Store(namespace, true) return nil } diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go index d2ce1efa2..b56b74d23 100644 --- a/pkg/spec/postgresql.go +++ b/pkg/spec/postgresql.go @@ -156,7 +156,9 @@ var ( // will not contain any private fields not-reachable to deepcopy. This should be ok, // since Error is never read from a Kubernetes object. func (p *Postgresql) Clone() *Postgresql { - if p == nil {return nil} + if p == nil { + return nil + } c := deepcopy.Copy(p).(*Postgresql) c.Error = nil return c @@ -166,11 +168,12 @@ func (p *Postgresql) DeepCopyInto(out *Postgresql) { if p != nil { *out = deepcopy.Copy(*p).(Postgresql) } - return } func (p *Postgresql) DeepCopy() *Postgresql { - if p == nil { return nil } + if p == nil { + return nil + } out := new(Postgresql) p.DeepCopyInto(out) return out @@ -308,7 +311,6 @@ func validateCloneClusterDescription(clone *CloneDescription) error { type postgresqlListCopy PostgresqlList type postgresqlCopy Postgresql - // UnmarshalJSON converts a JSON into the PostgreSQL object. func (p *Postgresql) UnmarshalJSON(data []byte) error { var tmp postgresqlCopy @@ -359,7 +361,9 @@ func (pl *PostgresqlList) UnmarshalJSON(data []byte) error { } func (pl *PostgresqlList) DeepCopy() *PostgresqlList { - if pl == nil { return nil } + if pl == nil { + return nil + } out := new(PostgresqlList) pl.DeepCopyInto(out) return out @@ -369,7 +373,6 @@ func (pl *PostgresqlList) DeepCopyInto(out *PostgresqlList) { if pl != nil { *out = deepcopy.Copy(*pl).(PostgresqlList) } - return } func (pl *PostgresqlList) DeepCopyObject() runtime.Object { @@ -379,7 +382,6 @@ func (pl *PostgresqlList) DeepCopyObject() runtime.Object { return nil } - func (status PostgresStatus) Success() bool { return status != ClusterStatusAddFailed && status != ClusterStatusUpdateFailed && diff --git a/pkg/util/config/crd_config.go b/pkg/util/config/crd_config.go index 10817f1e3..2a9090514 100644 --- a/pkg/util/config/crd_config.go +++ b/pkg/util/config/crd_config.go @@ -3,11 +3,11 @@ package config import ( "encoding/json" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/zalando-incubator/postgres-operator/pkg/spec" - "github.com/mohae/deepcopy" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + + "github.com/mohae/deepcopy" ) type OperatorConfiguration struct { @@ -159,11 +159,12 @@ func (opc *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { if opc != nil { *out = deepcopy.Copy(*opc).(OperatorConfiguration) } - return } func (opc *OperatorConfiguration) DeepCopy() *OperatorConfiguration { - if opc == nil { return nil } + if opc == nil { + return nil + } out := new(OperatorConfiguration) opc.DeepCopyInto(out) return out @@ -189,11 +190,12 @@ func (opcl *OperatorConfigurationList) DeepCopyInto(out *OperatorConfigurationLi if opcl != nil { *out = deepcopy.Copy(*opcl).(OperatorConfigurationList) } - return } func (opcl *OperatorConfigurationList) DeepCopy() *OperatorConfigurationList { - if opcl == nil { return nil } + if opcl == nil { + return nil + } out := new(OperatorConfigurationList) opcl.DeepCopyInto(out) return out @@ -205,4 +207,3 @@ func (opcl *OperatorConfigurationList) DeepCopyObject() runtime.Object { } return nil } - diff --git a/pkg/util/patroni/patroni.go b/pkg/util/patroni/patroni.go index f2de75ed3..a16fd2dfb 100644 --- a/pkg/util/patroni/patroni.go +++ b/pkg/util/patroni/patroni.go @@ -47,7 +47,7 @@ func apiURL(masterPod *v1.Pod) string { return fmt.Sprintf("http://%s:%d", masterPod.Status.PodIP, apiPort) } -func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer) error { +func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer) (err error) { request, err := http.NewRequest(method, url, body) if err != nil { return fmt.Errorf("could not create request: %v", err) @@ -59,7 +59,16 @@ func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer) if err != nil { return fmt.Errorf("could not make request: %v", err) } - defer resp.Body.Close() + defer func() { + if err2 := resp.Body.Close(); err2 != nil { + if err != nil { + err = fmt.Errorf("could not close request: %v, prior error: %v", err2, err) + } else { + err = fmt.Errorf("could not close request: %v", err2) + } + return + } + }() if resp.StatusCode != http.StatusOK { bodyBytes, err := ioutil.ReadAll(resp.Body) @@ -83,6 +92,7 @@ func (p *Patroni) Switchover(master *v1.Pod, candidate string) error { } //TODO: add an option call /patroni to check if it is necessary to restart the server + //SetPostgresParameters sets Postgres options via Patroni patch API call. func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]string) error { buf := &bytes.Buffer{} diff --git a/pkg/util/teams/teams.go b/pkg/util/teams/teams.go index 8645871ba..2223d2063 100644 --- a/pkg/util/teams/teams.go +++ b/pkg/util/teams/teams.go @@ -76,17 +76,15 @@ func (t *API) TeamInfo(teamID, token string) (tm *Team, err error) { t.logger.Debugf("request url: %s", url) req, err = http.NewRequest("GET", url, nil) if err != nil { - return + return nil, err } req.Header.Add("Authorization", "Bearer "+token) - resp, err = t.httpClient.Do(req) - if err != nil { - return + if resp, err = t.httpClient.Do(req); err != nil { + return nil, err } defer func() { - closeErr := resp.Body.Close() - if closeErr != nil { + if closeErr := resp.Body.Close(); closeErr != nil { err = fmt.Errorf("error when closing response: %v", closeErr) } }() @@ -95,27 +93,20 @@ func (t *API) TeamInfo(teamID, token string) (tm *Team, err error) { d := json.NewDecoder(resp.Body) err = d.Decode(&raw) if err != nil { - err = fmt.Errorf("team API query failed with status code %d and malformed response: %v", resp.StatusCode, err) - return + return nil, fmt.Errorf("team API query failed with status code %d and malformed response: %v", resp.StatusCode, err) } if errMessage, ok := raw["error"]; ok { - err = fmt.Errorf("team API query failed with status code %d and message: '%v'", resp.StatusCode, string(errMessage)) - return + return nil, fmt.Errorf("team API query failed with status code %d and message: '%v'", resp.StatusCode, string(errMessage)) } - err = fmt.Errorf("team API query failed with status code %d", resp.StatusCode) - - return + return nil, fmt.Errorf("team API query failed with status code %d", resp.StatusCode) } tm = &Team{} d := json.NewDecoder(resp.Body) - err = d.Decode(tm) - if err != nil { - err = fmt.Errorf("could not parse team API response: %v", err) - tm = nil - return + if err = d.Decode(tm); err != nil { + return nil, fmt.Errorf("could not parse team API response: %v", err) } - return + return tm, nil } diff --git a/pkg/util/users/users.go b/pkg/util/users/users.go index 9d435fd53..cd76c621d 100644 --- a/pkg/util/users/users.go +++ b/pkg/util/users/users.go @@ -30,8 +30,9 @@ type DefaultUserSyncStrategy struct { // ProduceSyncRequests figures out the types of changes that need to happen with the given users. func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserMap, - newUsers spec.PgUserMap) (reqs []spec.PgSyncUserRequest) { + newUsers spec.PgUserMap) []spec.PgSyncUserRequest { + var reqs []spec.PgSyncUserRequest // No existing roles are deleted or stripped of role memebership/flags for name, newUser := range newUsers { dbUser, exists := dbUsers[name] @@ -66,7 +67,7 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM } } - return + return reqs } // ExecuteSyncRequests makes actual database changes from the requests passed in its arguments. @@ -102,7 +103,7 @@ func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql return } -func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) (err error) { +func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) error { var userFlags []string var userPassword string @@ -120,16 +121,14 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D } query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword) - _, err = db.Exec(query) // TODO: Try several times - if err != nil { - err = fmt.Errorf("dB error: %v, query: %s", err, query) - return + if _, err := db.Exec(query); err != nil { // TODO: Try several times + return fmt.Errorf("dB error: %v, query: %s", err, query) } - return + return nil } -func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB) (err error) { +func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB) error { var resultStmt []string if user.Password != "" || len(user.Flags) > 0 { @@ -140,19 +139,16 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB grantStmt := produceGrantStmt(user) resultStmt = append(resultStmt, grantStmt) } - if len(resultStmt) == 0 { - return nil + + if len(resultStmt) > 0 { + query := fmt.Sprintf(doBlockStmt, strings.Join(resultStmt, ";")) + + if _, err := db.Exec(query); err != nil { // TODO: Try several times + return fmt.Errorf("dB error: %v query %s", err, query) + } } - query := fmt.Sprintf(doBlockStmt, strings.Join(resultStmt, ";")) - - _, err = db.Exec(query) // TODO: Try several times - if err != nil { - err = fmt.Errorf("dB error: %v query %s", err, query) - return - } - - return + return nil } func produceAlterStmt(user spec.PgUser) string { @@ -205,7 +201,7 @@ func quoteParameterValue(name, val string) string { // containing spaces (but something more complex, like double quotes inside double quotes or spaces // in the schema name would break the parsing code in the operator.) if start == '\'' && end == '\'' { - return fmt.Sprintf("%s", val[1:len(val)-1]) + return val[1 : len(val)-1] } return val From 14050588eee7157ae67676ad05aa15ab5b84804d Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Tue, 7 Aug 2018 12:31:08 +0200 Subject: [PATCH 19/30] Move to client-go 8. (#362) Not much changes, except for one function that has been deprecated. However, unless we find a way to use semantic version comparisons like '^' on a branch name, we would have to update the apimachinery, apiextensions-apiserver and code-generator dependencies manually. Also, slash a linter warning about RoleOriginUnknown being not used. --- glide.lock | 62 +++++++++++++++++------------------------------ glide.yaml | 8 +++--- pkg/spec/types.go | 24 ++++++++++++------ 3 files changed, 42 insertions(+), 52 deletions(-) diff --git a/glide.lock b/glide.lock index 2b51833d5..f87d10955 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,8 @@ -hash: f2f7f9d5d3c6f0f370fcec00e6c4a7c8fe84c0e75579d9bf7e40f19fe837b7c2 -updated: 2018-07-25T15:45:34.017577+02:00 +hash: ff2f80192f85899fb70880aabc4851672673f8ac3be257c6d9ff46ad33db94ca +updated: 2018-08-06T15:28:34.096941+02:00 imports: - name: github.com/aws/aws-sdk-go - version: 468b9714c11f10b22e533253b35eb9c28f4be691 + version: f70339bb6af843c8ab1974381b3f4fcaee2b1a41 subpackages: - aws - aws/awserr @@ -41,22 +41,10 @@ imports: version: 449fdfce4d962303d702fec724ef0ad181c92528 subpackages: - spdy -- name: github.com/emicklei/go-restful - version: ff4f55a206334ef123e4f79bbf348980da81ca46 - subpackages: - - log - name: github.com/ghodss/yaml version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/go-ini/ini version: d58d458bec3cb5adec4b7ddb41131855eac0b33f -- name: github.com/go-openapi/jsonpointer - version: 46af16f9f7b149af66e5d1bd010e3574dc06de98 -- name: github.com/go-openapi/jsonreference - version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 -- name: github.com/go-openapi/spec - version: 7abd5745472fff5eb3685386d5fb8bf38683154d -- name: github.com/go-openapi/swag - version: f3f9494671f93fcff853e3c6e9e948b3eb71e590 - name: github.com/gogo/protobuf version: c0656edd0d9eab7c66d1eb0c568f9039345796f7 subpackages: @@ -65,7 +53,7 @@ imports: - name: github.com/golang/glog version: 44145f04b68cf362d9c4df2182967c2275eaefed - name: github.com/golang/protobuf - version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 + version: b4deda0973fb4c70b50d226b1af49f3da59f5265 subpackages: - proto - ptypes @@ -90,28 +78,18 @@ imports: version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 subpackages: - simplelru -- name: github.com/howeyc/gopass - version: bf9dde6d0d2c004a008c27aaee91170c786f6db8 - name: github.com/imdario/mergo version: 6633656539c1639d9d78127b7d47c622b5d7b6dc - name: github.com/jmespath/go-jmespath version: c2b33e8439af944379acbdd9c3a5fe0bc44bd8a5 - name: github.com/json-iterator/go version: f2b4162afba35581b6d4a50d3b8f34e33c144682 -- name: github.com/juju/ratelimit - version: 5b9ff866471762aa2ab2dced63c9fb6f53921342 - name: github.com/kr/text version: e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f - name: github.com/lib/pq version: 90697d60dd844d5ef6ff15135d0203f65d2f53b8 subpackages: - oid -- name: github.com/mailru/easyjson - version: 2f5df55504ebc322e4d52d34df6a1f5b503bf26d - subpackages: - - buffer - - jlexer - - jwriter - name: github.com/modern-go/concurrent version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 - name: github.com/modern-go/reflect2 @@ -122,10 +100,6 @@ imports: version: b2aad2c9a95d14eb978f29baa6e3a5c3c20eef30 - name: github.com/peterbourgon/diskv version: 5f041e8faa004a95c88a202771f4cc3e991971e6 -- name: github.com/PuerkitoBio/purell - version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 -- name: github.com/PuerkitoBio/urlesc - version: 5bd2802263f21d8788851d5305584c82a5c75d7e - name: github.com/Sirupsen/logrus version: 3e01752db0189b9157070a0e1668a620f9a85da2 - name: github.com/spf13/pflag @@ -161,12 +135,16 @@ imports: - unicode/bidi - unicode/norm - width +- name: golang.org/x/time + version: f51c12702a4d776e4c1fa9b0fabab841babae631 + subpackages: + - rate - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 - name: gopkg.in/yaml.v2 version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 - name: k8s.io/api - version: 11147472b7c934c474a2c484af3c0c5210b7a3af + version: 072894a440bdee3a891dea811fe42902311cd2a3 subpackages: - admissionregistration/v1alpha1 - admissionregistration/v1beta1 @@ -192,12 +170,13 @@ imports: - rbac/v1alpha1 - rbac/v1beta1 - scheduling/v1alpha1 + - scheduling/v1beta1 - settings/v1alpha1 - storage/v1 - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: 913221cf6cd1c328ae50ba5f25027268f6be38cf + version: 06dfdaae5c2bd89e1243151ff65b9bf8ee050f28 subpackages: - pkg/apis/apiextensions - pkg/apis/apiextensions/v1beta1 @@ -205,7 +184,7 @@ imports: - pkg/client/clientset/clientset/scheme - pkg/client/clientset/clientset/typed/apiextensions/v1beta1 - name: k8s.io/apimachinery - version: fb40df2b502912cbe3a93aa61c2b2487f39cb42f + version: 103fd098999dc9c0c88536f5c9ad2e5da39373ae subpackages: - pkg/api/errors - pkg/api/meta @@ -213,7 +192,7 @@ imports: - pkg/apis/meta/internalversion - pkg/apis/meta/v1 - pkg/apis/meta/v1/unstructured - - pkg/apis/meta/v1alpha1 + - pkg/apis/meta/v1beta1 - pkg/conversion - pkg/conversion/queryparams - pkg/fields @@ -250,7 +229,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/client-go - version: 78700dec6369ba22221b72770783300f143df150 + version: 7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65 subpackages: - discovery - kubernetes @@ -279,11 +258,16 @@ imports: - kubernetes/typed/rbac/v1alpha1 - kubernetes/typed/rbac/v1beta1 - kubernetes/typed/scheduling/v1alpha1 + - kubernetes/typed/scheduling/v1beta1 - kubernetes/typed/settings/v1alpha1 - kubernetes/typed/storage/v1 - kubernetes/typed/storage/v1alpha1 - kubernetes/typed/storage/v1beta1 + - pkg/apis/clientauthentication + - pkg/apis/clientauthentication/v1alpha1 + - pkg/apis/clientauthentication/v1beta1 - pkg/version + - plugin/pkg/client/auth/exec - rest - rest/watch - tools/auth @@ -300,16 +284,14 @@ imports: - transport/spdy - util/buffer - util/cert + - util/connrotation - util/exec - util/flowcontrol - util/homedir - util/integer + - util/retry - name: k8s.io/code-generator - version: 0ab89e584187c20cc7c1a3f30db69f3b4ab64196 + version: 6702109cc68eb6fe6350b83e14407c8d7309fd1a - name: k8s.io/gengo version: 906d99f89cd644eecf75ab547b29bf9f876f0b59 -- name: k8s.io/kube-openapi - version: 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 - subpackages: - - pkg/common testImports: [] diff --git a/glide.yaml b/glide.yaml index 1b7b5b827..625a06d62 100644 --- a/glide.yaml +++ b/glide.yaml @@ -11,13 +11,13 @@ import: - package: github.com/lib/pq - package: github.com/motomux/pretty - package: k8s.io/apimachinery - version: kubernetes-1.9.9 + version: kubernetes-1.11.1 - package: k8s.io/apiextensions-apiserver - version: kubernetes-1.9.9 + version: kubernetes-1.11.1 - package: k8s.io/client-go - version: ^6.0.0 + version: ^8.0.0 - package: k8s.io/code-generator - version: kubernetes-1.9.9 + version: kubernetes-1.11.1 - package: k8s.io/gengo - package: gopkg.in/yaml.v2 - package: github.com/mohae/deepcopy diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 7474ca952..8e11f34ac 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -205,18 +205,23 @@ func (n *NamespacedName) UnmarshalJSON(data []byte) error { // DecodeWorker separates the decode logic to (unit) test // from obtaining the operator namespace that depends on k8s mounting files at runtime func (n *NamespacedName) DecodeWorker(value, operatorNamespace string) error { - name := types.NewNamespacedNameFromString(value) + var ( + name types.NamespacedName + ) - if strings.Trim(value, string(types.Separator)) != "" && name == (types.NamespacedName{}) { - name.Name = value - name.Namespace = operatorNamespace - } else if name.Namespace == "" { - name.Namespace = operatorNamespace + result := strings.SplitN(value, string(types.Separator), 2) + if len(result) < 2 { + name.Name = result[0] + } else { + name.Name = strings.TrimLeft(result[1], string(types.Separator)) + name.Namespace = result[0] } - if name.Name == "" { return fmt.Errorf("incorrect namespaced name: %v", value) } + if name.Namespace == "" { + name.Namespace = operatorNamespace + } *n = NamespacedName(name) @@ -225,6 +230,8 @@ func (n *NamespacedName) DecodeWorker(value, operatorNamespace string) error { func (r RoleOrigin) String() string { switch r { + case RoleOriginUnknown: + return "unknown" case RoleOriginManifest: return "manifest role" case RoleOriginInfrastructure: @@ -233,8 +240,9 @@ func (r RoleOrigin) String() string { return "teams API role" case RoleOriginSystem: return "system role" + default: + panic(fmt.Sprintf("bogus role origin value %d", r)) } - return "unknown" } // GetOperatorNamespace assumes serviceaccount secret is mounted by kubernetes From acf46bfa6237cfc8f0df33cb461bb3f5e9646a9e Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Wed, 8 Aug 2018 10:53:08 +0200 Subject: [PATCH 20/30] Include CREATEROLE to the list of allowed flags. (#365) Previously it has been supported by the operator, but the validity check excluded it for no reason. --- pkg/cluster/util.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index e38b8ef73..853f99ec0 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -78,7 +78,8 @@ func (c *Cluster) isSystemUsername(username string) bool { func isValidFlag(flag string) bool { for _, validFlag := range []string{constants.RoleFlagSuperuser, constants.RoleFlagLogin, constants.RoleFlagCreateDB, - constants.RoleFlagInherit, constants.RoleFlagReplication, constants.RoleFlagByPassRLS} { + constants.RoleFlagInherit, constants.RoleFlagReplication, constants.RoleFlagByPassRLS, + constants.RoleFlagCreateRole} { if flag == validFlag || flag == "NO"+validFlag { return true } From 199aa6508cd94a1001e9a9353e675c2e0c772bea Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Wed, 8 Aug 2018 11:00:56 +0200 Subject: [PATCH 21/30] Populate list of clusters in the controller at startup. (#364) Assign the list of clusters in the controller with the up-to-date list of Postgres manifests on Kubernetes during the startup. Node migration routines launched asynchronously to the cluster processing rely on an up-to-date list of clusters in the controller to detect clusters affected by the migration of the node and lock them when doing migration of master pods. Without the initial list the operator was subject to race conditions like the one described at https://github.com/zalando-incubator/postgres-operator/issues/363 Restructure the code to decouple list cluster function required by the postgresql informer from the one that emits cluster sync events. No extra work is introduced, since cluster sync already runs in a separate goroutine (clusterResync). Introduce explicit initial cluster sync at the end of acquireInitialListOfClusters instead of relying on an implicit one coming from list function of the PostgreSQL informer. Some minor refactoring. Review by @zerg-junior --- pkg/controller/controller.go | 17 +++++++--- pkg/controller/postgresql.go | 62 +++++++++++++++++++++++++++++------- pkg/util/config/util.go | 29 ++++++++--------- 3 files changed, 77 insertions(+), 31 deletions(-) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index e48375dfd..7e340abb3 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -326,6 +326,18 @@ func (c *Controller) initSharedInformers() { func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { c.initController() + // start workers reading from the events queue to prevent the initial sync from blocking on it. + for i := range c.clusterEventQueues { + wg.Add(1) + c.workerLogs[uint32(i)] = ringlog.New(c.opConfig.RingLogLines) + go c.processClusterEventsQueue(i, stopCh, wg) + } + + // populate clusters before starting nodeInformer that relies on it and run the initial sync + if err := c.acquireInitialListOfClusters(); err != nil { + panic("could not acquire initial list of clusters") + } + wg.Add(5) go c.runPodInformer(stopCh, wg) go c.runPostgresqlInformer(stopCh, wg) @@ -333,11 +345,6 @@ func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { go c.apiserver.Run(stopCh, wg) go c.kubeNodesInformer(stopCh, wg) - for i := range c.clusterEventQueues { - wg.Add(1) - c.workerLogs[uint32(i)] = ringlog.New(c.opConfig.RingLogLines) - go c.processClusterEventsQueue(i, stopCh, wg) - } c.logger.Info("started working in background") } diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index 58a7afcb2..4e5df42a7 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -32,7 +32,7 @@ func (c *Controller) clusterResync(stopCh <-chan struct{}, wg *sync.WaitGroup) { for { select { case <-ticker.C: - if _, err := c.clusterListFunc(metav1.ListOptions{ResourceVersion: "0"}); err != nil { + if err := c.clusterListAndSync(); err != nil { c.logger.Errorf("could not list clusters: %v", err) } case <-stopCh: @@ -41,15 +41,10 @@ func (c *Controller) clusterResync(stopCh <-chan struct{}, wg *sync.WaitGroup) { } } -// TODO: make a separate function to be called from InitSharedInformers -// clusterListFunc obtains a list of all PostgreSQL clusters and runs sync when necessary -// NB: as this function is called directly by the informer, it needs to avoid acquiring locks -// on individual cluster structures. Therefore, it acts on the manifests obtained from Kubernetes -// and not on the internal state of the clusters. -func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object, error) { +// clusterListFunc obtains a list of all PostgreSQL clusters +func (c *Controller) listClusters(options metav1.ListOptions) (*spec.PostgresqlList, error) { var ( - list spec.PostgresqlList - event spec.EventType + list spec.PostgresqlList ) req := c.KubeClient.CRDREST. @@ -67,21 +62,42 @@ func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object c.logger.Warningf("could not unmarshal list of clusters: %v", err) } + return &list, err + +} + +// A separate function to be called from InitSharedInformers +func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object, error) { + return c.listClusters(options) +} + +// clusterListAndSync lists all manifests and decides whether to run the sync or repair. +func (c *Controller) clusterListAndSync() error { + var ( + err error + event spec.EventType + ) + currentTime := time.Now().Unix() timeFromPreviousSync := currentTime - atomic.LoadInt64(&c.lastClusterSyncTime) timeFromPreviousRepair := currentTime - atomic.LoadInt64(&c.lastClusterRepairTime) + if timeFromPreviousSync >= int64(c.opConfig.ResyncPeriod.Seconds()) { event = spec.EventSync } else if timeFromPreviousRepair >= int64(c.opConfig.RepairPeriod.Seconds()) { event = spec.EventRepair } if event != "" { - c.queueEvents(&list, event) + var list *spec.PostgresqlList + if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil { + return err + } + c.queueEvents(list, event) } else { c.logger.Infof("not enough time passed since the last sync (%s seconds) or repair (%s seconds)", timeFromPreviousSync, timeFromPreviousRepair) } - return &list, err + return nil } // queueEvents queues a sync or repair event for every cluster with a valid manifest @@ -125,6 +141,30 @@ func (c *Controller) queueEvents(list *spec.PostgresqlList, event spec.EventType } } +func (c *Controller) acquireInitialListOfClusters() error { + var ( + list *spec.PostgresqlList + err error + clusterName spec.NamespacedName + ) + + if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil { + return err + } + c.logger.Debugf("acquiring initial list of clusters") + for _, pg := range list.Items { + if pg.Error != nil { + continue + } + clusterName = util.NameFromMeta(pg.ObjectMeta) + c.addCluster(c.logger, clusterName, &pg) + c.logger.Debugf("added new cluster: %q", clusterName) + } + // initiate initial sync of all clusters. + c.queueEvents(list, spec.EventSync) + return nil +} + type crdDecoder struct { dec *json.Decoder close func() error diff --git a/pkg/util/config/util.go b/pkg/util/config/util.go index dbe411045..aef333ce7 100644 --- a/pkg/util/config/util.go +++ b/pkg/util/config/util.go @@ -172,10 +172,9 @@ func processField(value string, field reflect.Value) error { type parserState int const ( - Plain parserState = iota - DoubleQuoted - SingleQuoted - Escape + plain parserState = iota + doubleQuoted + singleQuoted ) // Split the pair candidates by commas not located inside open quotes @@ -183,7 +182,7 @@ const ( // expect to find them inside the map values for our use cases func getMapPairsFromString(value string) (pairs []string, err error) { pairs = make([]string, 0) - state := Plain + state := plain var start, quote int for i, ch := range strings.Split(value, "") { @@ -191,29 +190,29 @@ func getMapPairsFromString(value string) (pairs []string, err error) { fmt.Printf("Parser warning: ecape character '\\' have no effect on quotes inside the configuration value %s\n", value) } if ch == `"` { - if state == Plain { - state = DoubleQuoted + if state == plain { + state = doubleQuoted quote = i - } else if state == DoubleQuoted { - state = Plain + } else if state == doubleQuoted { + state = plain quote = 0 } } if ch == "'" { - if state == Plain { - state = SingleQuoted + if state == plain { + state = singleQuoted quote = i - } else if state == SingleQuoted { - state = Plain + } else if state == singleQuoted { + state = plain quote = 0 } } - if ch == "," && state == Plain { + if ch == "," && state == plain { pairs = append(pairs, strings.Trim(value[start:i], " \t")) start = i + 1 } } - if state != Plain { + if state != plain { err = fmt.Errorf("unmatched quote starting at position %d", quote+1) pairs = nil } else { From e93390808407965d98d4444c3ad5c6fefa88141e Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Wed, 8 Aug 2018 11:01:26 +0200 Subject: [PATCH 22/30] Configure pg_hba in the local postgresql configuration of Patroni. (#361) Previously, the operator put pg_hba into the bootstrap/pg_hba key of Patroni. That had 2 adverse effects: - pg_hba.conf was shadowed by Spilo default section in the local postgresql configuration - when updating pg_hba in the cluster manifest, the updated lines were not propagated to DCS, since the key was defined in the boostrap section of Patroni. Include some minor refactoring, moving methods to unexported when possible and commenting out usage of md5, so that gosec won't complain. Per https://github.com/zalando-incubator/postgres-operator/issues/330 Review by @zerg-junior --- pkg/cluster/cluster.go | 35 +++++++++++++------------ pkg/cluster/k8sres.go | 56 ++++++++++++++++++++-------------------- pkg/cluster/resources.go | 8 +++--- pkg/cluster/util.go | 2 +- pkg/spec/postgresql.go | 1 + pkg/util/util.go | 5 ++-- 6 files changed, 55 insertions(+), 52 deletions(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 8414da8d1..49bab8599 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -119,7 +119,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec spec.Postgresql } cluster.logger = logger.WithField("pkg", "cluster").WithField("cluster-name", cluster.clusterName()) cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger) - cluster.oauthTokenGetter = NewSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName) + cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName) cluster.patroni = patroni.New(cluster.logger) return cluster @@ -404,15 +404,15 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace} } -type ContainerCondition func(a, b v1.Container) bool +type containerCondition func(a, b v1.Container) bool -type ContainerCheck struct { - condition ContainerCondition +type containerCheck struct { + condition containerCondition reason string } -func NewCheck(msg string, cond ContainerCondition) ContainerCheck { - return ContainerCheck{reason: msg, condition: cond} +func newCheck(msg string, cond containerCondition) containerCheck { + return containerCheck{reason: msg, condition: cond} } // compareContainers: compare containers from two stateful sets @@ -422,18 +422,18 @@ func NewCheck(msg string, cond ContainerCondition) ContainerCheck { func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []string) { reasons := make([]string, 0) needsRollUpdate := false - checks := []ContainerCheck{ - NewCheck("new statefulset's container %d name doesn't match the current one", + checks := []containerCheck{ + newCheck("new statefulset's container %d name doesn't match the current one", func(a, b v1.Container) bool { return a.Name != b.Name }), - NewCheck("new statefulset's container %d image doesn't match the current one", + newCheck("new statefulset's container %d image doesn't match the current one", func(a, b v1.Container) bool { return a.Image != b.Image }), - NewCheck("new statefulset's container %d ports don't match the current one", + newCheck("new statefulset's container %d ports don't match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Ports, b.Ports) }), - NewCheck("new statefulset's container %d resources don't match the current ones", + newCheck("new statefulset's container %d resources don't match the current ones", func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }), - NewCheck("new statefulset's container %d environment doesn't match the current one", + newCheck("new statefulset's container %d environment doesn't match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }), - NewCheck("new statefulset's container %d environment sources don't match the current one", + newCheck("new statefulset's container %d environment sources don't match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }), } @@ -630,6 +630,7 @@ func (c *Cluster) Delete() { } } +//NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status). func (c *Cluster) NeedsRepair() (bool, spec.PostgresStatus) { c.specMu.RLock() defer c.specMu.RUnlock() @@ -905,9 +906,9 @@ func (c *Cluster) shouldDeleteSecret(secret *v1.Secret) (delete bool, userName s type simpleActionWithResult func() error -type ClusterObjectGet func(name string) (spec.NamespacedName, error) +type clusterObjectGet func(name string) (spec.NamespacedName, error) -type ClusterObjectDelete func(name string) error +type clusterObjectDelete func(name string) error func (c *Cluster) deletePatroniClusterObjects() error { // TODO: figure out how to remove leftover patroni objects in other cases @@ -924,8 +925,8 @@ func (c *Cluster) deletePatroniClusterObjects() error { } func (c *Cluster) deleteClusterObject( - get ClusterObjectGet, - del ClusterObjectDelete, + get clusterObjectGet, + del clusterObjectDelete, objType string) error { for _, suffix := range patroniObjectSuffixes { name := fmt.Sprintf("%s-%s", c.Name, suffix) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index a71fa5e35..b620082b5 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -25,6 +25,7 @@ const ( pgBinariesLocationTemplate = "/usr/lib/postgresql/%s/bin" patroniPGBinariesParameterName = "bin_dir" patroniPGParametersParameterName = "parameters" + patroniPGHBAConfParameterName = "pg_hba" localHost = "127.0.0.1/32" ) @@ -44,7 +45,6 @@ type patroniDCS struct { type pgBootstrap struct { Initdb []interface{} `json:"initdb"` Users map[string]pgUser `json:"users"` - PgHBA []string `json:"pg_hba"` DCS patroniDCS `json:"dcs,omitempty"` } @@ -202,19 +202,6 @@ PatroniInitDBParams: config.Bootstrap.Initdb = append(config.Bootstrap.Initdb, map[string]string{k: v}) } - // pg_hba parameters in the manifest replace the default ones. We cannot - // reasonably merge them automatically, because pg_hba parsing stops on - // a first successfully matched rule. - if len(patroni.PgHba) > 0 { - config.Bootstrap.PgHBA = patroni.PgHba - } else { - config.Bootstrap.PgHBA = []string{ - "hostnossl all all all reject", - fmt.Sprintf("hostssl all +%s all pam", pamRoleName), - "hostssl all all all md5", - } - } - if patroni.MaximumLagOnFailover >= 0 { config.Bootstrap.DCS.MaximumLagOnFailover = patroni.MaximumLagOnFailover } @@ -231,23 +218,23 @@ PatroniInitDBParams: config.PgLocalConfiguration = make(map[string]interface{}) config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion) if len(pg.Parameters) > 0 { - localParameters := make(map[string]string) - bootstrapParameters := make(map[string]string) - for param, val := range pg.Parameters { - if isBootstrapOnlyParameter(param) { - bootstrapParameters[param] = val - } else { - localParameters[param] = val - } + local, bootstrap := getLocalAndBoostrapPostgreSQLParameters(pg.Parameters) + + if len(local) > 0 { + config.PgLocalConfiguration[patroniPGParametersParameterName] = local } - if len(localParameters) > 0 { - config.PgLocalConfiguration[patroniPGParametersParameterName] = localParameters - } - if len(bootstrapParameters) > 0 { + if len(bootstrap) > 0 { config.Bootstrap.DCS.PGBootstrapConfiguration = make(map[string]interface{}) - config.Bootstrap.DCS.PGBootstrapConfiguration[patroniPGParametersParameterName] = bootstrapParameters + config.Bootstrap.DCS.PGBootstrapConfiguration[patroniPGParametersParameterName] = bootstrap } } + // Patroni gives us a choice of writing pg_hba.conf to either the bootstrap section or to the local postgresql one. + // We choose the local one, because we need Patroni to change pg_hba.conf in PostgreSQL after the user changes the + // relevant section in the manifest. + if len(patroni.PgHba) > 0 { + config.PgLocalConfiguration[patroniPGHBAConfParameterName] = patroni.PgHba + } + config.Bootstrap.Users = map[string]pgUser{ pamRoleName: { Password: "", @@ -262,6 +249,19 @@ PatroniInitDBParams: return string(result) } +func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (local, bootstrap map[string]string) { + local = make(map[string]string) + bootstrap = make(map[string]string) + for param, val := range parameters { + if isBootstrapOnlyParameter(param) { + bootstrap[param] = val + } else { + local[param] = val + } + } + return +} + func nodeAffinity(nodeReadinessLabel map[string]string) *v1.Affinity { matchExpressions := make([]v1.NodeSelectorRequirement, 0) if len(nodeReadinessLabel) == 0 { @@ -736,7 +736,7 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu Name: c.statefulSetName(), Namespace: c.Namespace, Labels: c.labelsSet(true), - Annotations: map[string]string{RollingUpdateStatefulsetAnnotationKey: "false"}, + Annotations: map[string]string{rollingUpdateStatefulsetAnnotationKey: "false"}, }, Spec: v1beta1.StatefulSetSpec{ Replicas: &numberOfInstances, diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 764dc22e5..22b34b7f4 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -18,7 +18,7 @@ import ( ) const ( - RollingUpdateStatefulsetAnnotationKey = "zalando-postgres-operator-rolling-update-required" + rollingUpdateStatefulsetAnnotationKey = "zalando-postgres-operator-rolling-update-required" ) func (c *Cluster) listResources() error { @@ -140,7 +140,7 @@ func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *v1beta1.StatefulSet, if anno == nil { anno = make(map[string]string) } - anno[RollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val) + anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val) sset.SetAnnotations(anno) } @@ -162,12 +162,12 @@ func (c *Cluster) getRollingUpdateFlagFromStatefulSet(sset *v1beta1.StatefulSet, anno := sset.GetAnnotations() flag = defaultValue - stringFlag, exists := anno[RollingUpdateStatefulsetAnnotationKey] + stringFlag, exists := anno[rollingUpdateStatefulsetAnnotationKey] if exists { var err error if flag, err = strconv.ParseBool(stringFlag); err != nil { c.logger.Warnf("error when parsing %q annotation for the statefulset %q: expected boolean value, got %q\n", - RollingUpdateStatefulsetAnnotationKey, + rollingUpdateStatefulsetAnnotationKey, types.NamespacedName{Namespace: sset.Namespace, Name: sset.Name}, stringFlag) flag = defaultValue diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 853f99ec0..85a61d969 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -35,7 +35,7 @@ type SecretOauthTokenGetter struct { OAuthTokenSecretName spec.NamespacedName } -func NewSecretOauthTokenGetter(kubeClient *k8sutil.KubernetesClient, +func newSecretOauthTokenGetter(kubeClient *k8sutil.KubernetesClient, OAuthTokenSecretName spec.NamespacedName) *SecretOauthTokenGetter { return &SecretOauthTokenGetter{kubeClient, OAuthTokenSecretName} } diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go index b56b74d23..889ebe5bc 100644 --- a/pkg/spec/postgresql.go +++ b/pkg/spec/postgresql.go @@ -71,6 +71,7 @@ type Sidecar struct { Env []v1.EnvVar `json:"env,omitempty"` } +// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users type UserFlags []string // PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) diff --git a/pkg/util/util.go b/pkg/util/util.go index b1b3d91b3..7b7b58fc4 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -1,7 +1,7 @@ package util import ( - "crypto/md5" + "crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords "encoding/hex" "math/rand" "regexp" @@ -48,7 +48,7 @@ func PGUserPassword(user spec.PgUser) string { // Avoid processing already encrypted or empty passwords return user.Password } - s := md5.Sum([]byte(user.Password + user.Name)) + s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords. return md5prefix + hex.EncodeToString(s[:]) } @@ -120,6 +120,7 @@ func MapContains(haystack, needle map[string]string) bool { return true } +// Coalesce returns the first argument if it is not null, otherwise the second one. func Coalesce(val, defaultVal string) string { if val == "" { return defaultVal From 6e8dcabac74c97db7dce7e0be460a8c12f0fbaeb Mon Sep 17 00:00:00 2001 From: Jan Mussler Date: Fri, 10 Aug 2018 14:17:44 +0200 Subject: [PATCH 23/30] Update postgres-operator.yaml Bump manifest to use v1.0.0 operator --- manifests/postgres-operator.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index d361c0b88..0c4cf84cb 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -12,7 +12,7 @@ spec: serviceAccountName: zalando-postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:417f13c + image: registry.opensource.zalan.do/acid/postgres-operator:v1.0.0 imagePullPolicy: IfNotPresent env: # provided additional ENV vars can overwrite individual config map entries From e1ed4b847d527104fab7edc6b246cf6d67d77fd8 Mon Sep 17 00:00:00 2001 From: Oleksii Kliukin Date: Wed, 15 Aug 2018 17:22:25 +0200 Subject: [PATCH 24/30] Use code-generation for CRD API and deepcopy methods (#369) Client-go provides a https://github.com/kubernetes/code-generator package in order to provide the API to work with CRDs similar to the one available for built-in types, i.e. Pods, Statefulsets and so on. Use this package to generate deepcopy methods (required for CRDs), instead of using an external deepcopy package; we also generate APIs used to manipulate both Postgres and OperatorConfiguration CRDs, as well as informers and listers for the Postgres CRD, instead of using generic informers and CRD REST API; by using generated code we can get rid of some custom and obscure CRD-related code and use a better API. All generated code resides in /pkg/generated, with an exception of zz_deepcopy.go in apis/acid.zalan.do/v1 Rename postgres-operator-configuration CRD to OperatorConfiguration, since the former broke naming convention in the code-generator. Moved Postgresql, PostgresqlList, OperatorConfiguration and OperatorConfigurationList and other types used by them into Change the type of the Error field in the Postgresql crd to a string, so that client-go could generate a deepcopy for it. Use generated code to set status of CRD objects as well. Right now this is done with patch, however, Kubernetes 1.11 introduces the /status subresources, allowing us to set the status with the special updateStatus call in the future. For now, we keep the code that is compatible with earlier versions of Kubernetes. Rename postgresql.go to database.go and status.go to logs_and_api.go to reflect the purpose of each of those files. Update client-go dependencies. Minor reformatting and renaming. --- docs/reference/operator_parameters.md | 2 +- glide.lock | 31 +- glide.yaml | 8 +- hack/custom-boilerplate.go.txt | 21 + hack/update-codegen.sh | 13 + hack/verify-codegen.sh | 33 + ...gresql-operator-default-configuration.yaml | 2 +- pkg/apis/acid.zalan.do/register.go | 6 + pkg/apis/acid.zalan.do/v1/const.go | 16 + pkg/apis/acid.zalan.do/v1/doc.go | 6 + pkg/apis/acid.zalan.do/v1/marshal.go | 130 ++++ .../v1/operator_configuration_type.go} | 129 +--- pkg/apis/acid.zalan.do/v1/postgresql_type.go | 127 ++++ pkg/apis/acid.zalan.do/v1/register.go | 58 ++ pkg/apis/acid.zalan.do/v1/util.go | 93 +++ .../acid.zalan.do/v1/util_test.go} | 45 +- .../acid.zalan.do/v1/zz_generated.deepcopy.go | 681 ++++++++++++++++++ pkg/apiserver/apiserver.go | 7 +- pkg/cluster/cluster.go | 90 ++- pkg/cluster/cluster_test.go | 17 +- pkg/cluster/{pg.go => database.go} | 0 pkg/cluster/k8sres.go | 51 +- pkg/cluster/k8sres_test.go | 14 +- pkg/cluster/pod.go | 4 +- pkg/cluster/sync.go | 9 +- pkg/cluster/types.go | 57 ++ pkg/cluster/util.go | 22 +- pkg/cluster/volumes.go | 7 +- pkg/controller/controller.go | 26 +- pkg/controller/{status.go => logs_and_api.go} | 9 +- pkg/controller/operator_config.go | 25 +- pkg/controller/pod.go | 16 +- pkg/controller/postgresql.go | 175 ++--- pkg/controller/postgresql_test.go | 13 +- pkg/controller/types.go | 30 + pkg/controller/util.go | 28 +- .../clientset/versioned/clientset.go | 104 +++ pkg/generated/clientset/versioned/doc.go | 26 + .../versioned/fake/clientset_generated.go | 88 +++ pkg/generated/clientset/versioned/fake/doc.go | 26 + .../clientset/versioned/fake/register.go | 60 ++ .../clientset/versioned/scheme/doc.go | 26 + .../clientset/versioned/scheme/register.go | 60 ++ .../acid.zalan.do/v1/acid.zalan.do_client.go | 101 +++ .../versioned/typed/acid.zalan.do/v1/doc.go | 26 + .../typed/acid.zalan.do/v1/fake/doc.go | 26 + .../v1/fake/fake_acid.zalan.do_client.go | 50 ++ .../v1/fake/fake_operatorconfiguration.go | 53 ++ .../acid.zalan.do/v1/fake/fake_postgresql.go | 146 ++++ .../acid.zalan.do/v1/generated_expansion.go | 29 + .../acid.zalan.do/v1/operatorconfiguration.go | 71 ++ .../typed/acid.zalan.do/v1/postgresql.go | 180 +++++ .../acid.zalan.do/interface.go | 52 ++ .../acid.zalan.do/v1/interface.go | 51 ++ .../acid.zalan.do/v1/postgresql.go | 95 +++ .../informers/externalversions/factory.go | 186 +++++ .../informers/externalversions/generic.go | 68 ++ .../internalinterfaces/factory_interfaces.go | 44 ++ .../acid.zalan.do/v1/expansion_generated.go | 33 + .../listers/acid.zalan.do/v1/postgresql.go | 100 +++ pkg/spec/postgresql.go | 394 ---------- pkg/spec/types.go | 93 +-- pkg/util/config/config.go | 8 +- pkg/util/config/util.go | 12 +- pkg/util/constants/crd.go | 13 - pkg/util/k8sutil/k8sutil.go | 29 +- 66 files changed, 3285 insertions(+), 966 deletions(-) create mode 100644 hack/custom-boilerplate.go.txt create mode 100755 hack/update-codegen.sh create mode 100755 hack/verify-codegen.sh create mode 100644 pkg/apis/acid.zalan.do/register.go create mode 100644 pkg/apis/acid.zalan.do/v1/const.go create mode 100644 pkg/apis/acid.zalan.do/v1/doc.go create mode 100644 pkg/apis/acid.zalan.do/v1/marshal.go rename pkg/{util/config/crd_config.go => apis/acid.zalan.do/v1/operator_configuration_type.go} (56%) create mode 100644 pkg/apis/acid.zalan.do/v1/postgresql_type.go create mode 100644 pkg/apis/acid.zalan.do/v1/register.go create mode 100644 pkg/apis/acid.zalan.do/v1/util.go rename pkg/{spec/postgresql_test.go => apis/acid.zalan.do/v1/util_test.go} (95%) create mode 100644 pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go rename pkg/cluster/{pg.go => database.go} (100%) rename pkg/controller/{status.go => logs_and_api.go} (95%) create mode 100644 pkg/controller/types.go create mode 100644 pkg/generated/clientset/versioned/clientset.go create mode 100644 pkg/generated/clientset/versioned/doc.go create mode 100644 pkg/generated/clientset/versioned/fake/clientset_generated.go create mode 100644 pkg/generated/clientset/versioned/fake/doc.go create mode 100644 pkg/generated/clientset/versioned/fake/register.go create mode 100644 pkg/generated/clientset/versioned/scheme/doc.go create mode 100644 pkg/generated/clientset/versioned/scheme/register.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go create mode 100644 pkg/generated/informers/externalversions/acid.zalan.do/interface.go create mode 100644 pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go create mode 100644 pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go create mode 100644 pkg/generated/informers/externalversions/factory.go create mode 100644 pkg/generated/informers/externalversions/generic.go create mode 100644 pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go create mode 100644 pkg/generated/listers/acid.zalan.do/v1/postgresql.go delete mode 100644 pkg/spec/postgresql.go delete mode 100644 pkg/util/constants/crd.go diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index d14d3d9d7..76ddb9ff9 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -12,7 +12,7 @@ configuration. * CRD-based configuration. The configuration is stored in the custom YAML manifest, an instance of the custom resource definition (CRD) called - `postgresql-operator-configuration`. This CRD is registered by the operator + `OperatorConfiguration`. This CRD is registered by the operator during the start when `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` variable is set to a non-empty value. The CRD-based configuration is a regular YAML document; non-scalar keys are simply represented in the usual YAML way. The diff --git a/glide.lock b/glide.lock index f87d10955..19cb6c41f 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,8 @@ -hash: ff2f80192f85899fb70880aabc4851672673f8ac3be257c6d9ff46ad33db94ca -updated: 2018-08-06T15:28:34.096941+02:00 +hash: bd5394acf101795aac9da20c104a57344a6c4fd71080bf1b16845367e6360578 +updated: 2018-08-14T15:18:08.144086+02:00 imports: - name: github.com/aws/aws-sdk-go - version: f70339bb6af843c8ab1974381b3f4fcaee2b1a41 + version: f831d5a0822a1ad72420ab18c6269bca1ddaf490 subpackages: - aws - aws/awserr @@ -103,7 +103,7 @@ imports: - name: github.com/Sirupsen/logrus version: 3e01752db0189b9157070a0e1668a620f9a85da2 - name: github.com/spf13/pflag - version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7 + version: 583c0c0531f06d5278b7d917446061adc344b5cd - name: golang.org/x/crypto version: c126467f60eb25f8f27e5a981f32a87e3965053f subpackages: @@ -124,17 +124,10 @@ imports: - name: golang.org/x/text version: b19bf474d317b857955b12035d2c5acb57ce8b01 subpackages: - - cases - - internal - - internal/tag - - language - - runes - secure/bidirule - - secure/precis - transform - unicode/bidi - unicode/norm - - width - name: golang.org/x/time version: f51c12702a4d776e4c1fa9b0fabab841babae631 subpackages: @@ -144,7 +137,7 @@ imports: - name: gopkg.in/yaml.v2 version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 - name: k8s.io/api - version: 072894a440bdee3a891dea811fe42902311cd2a3 + version: 2d6f90ab1293a1fb871cf149423ebb72aa7423aa subpackages: - admissionregistration/v1alpha1 - admissionregistration/v1beta1 @@ -164,6 +157,7 @@ imports: - core/v1 - events/v1beta1 - extensions/v1beta1 + - imagepolicy/v1alpha1 - networking/v1 - policy/v1beta1 - rbac/v1 @@ -176,7 +170,7 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: 06dfdaae5c2bd89e1243151ff65b9bf8ee050f28 + version: cc9cd5d998df84cc405d398e9030d29c95acff18 subpackages: - pkg/apis/apiextensions - pkg/apis/apiextensions/v1beta1 @@ -216,22 +210,26 @@ imports: - pkg/util/httpstream/spdy - pkg/util/intstr - pkg/util/json + - pkg/util/mergepatch - pkg/util/net - pkg/util/remotecommand - pkg/util/runtime - pkg/util/sets + - pkg/util/strategicpatch - pkg/util/validation - pkg/util/validation/field - pkg/util/wait - pkg/util/yaml - pkg/version - pkg/watch + - third_party/forked/golang/json - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/client-go - version: 7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65 + version: 1f13a808da65775f22cbf47862c4e5898d8f4ca1 subpackages: - discovery + - discovery/fake - kubernetes - kubernetes/scheme - kubernetes/typed/admissionregistration/v1alpha1 @@ -270,6 +268,7 @@ imports: - plugin/pkg/client/auth/exec - rest - rest/watch + - testing - tools/auth - tools/cache - tools/clientcmd @@ -294,4 +293,8 @@ imports: version: 6702109cc68eb6fe6350b83e14407c8d7309fd1a - name: k8s.io/gengo version: 906d99f89cd644eecf75ab547b29bf9f876f0b59 +- name: k8s.io/kube-openapi + version: 91cfa479c814065e420cee7ed227db0f63a5854e + subpackages: + - pkg/util/proto testImports: [] diff --git a/glide.yaml b/glide.yaml index 625a06d62..74e837877 100644 --- a/glide.yaml +++ b/glide.yaml @@ -11,13 +11,13 @@ import: - package: github.com/lib/pq - package: github.com/motomux/pretty - package: k8s.io/apimachinery - version: kubernetes-1.11.1 + version: kubernetes-1.11.3-beta.0 - package: k8s.io/apiextensions-apiserver - version: kubernetes-1.11.1 + version: kubernetes-1.11.3-beta.0 - package: k8s.io/client-go - version: ^8.0.0 + version: kubernetes-1.11.3-beta.0 - package: k8s.io/code-generator - version: kubernetes-1.11.1 + version: kubernetes-1.11.3-beta.0 - package: k8s.io/gengo - package: gopkg.in/yaml.v2 - package: github.com/mohae/deepcopy diff --git a/hack/custom-boilerplate.go.txt b/hack/custom-boilerplate.go.txt new file mode 100644 index 000000000..8fca2b20f --- /dev/null +++ b/hack/custom-boilerplate.go.txt @@ -0,0 +1,21 @@ +/* +Copyright YEAR Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh new file mode 100755 index 000000000..a1c1555c9 --- /dev/null +++ b/hack/update-codegen.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ${GOPATH}/src/k8s.io/code-generator)} + +vendor/k8s.io/code-generator/generate-groups.sh all \ + github.com/zalando-incubator/postgres-operator/pkg/generated github.com/zalando-incubator/postgres-operator/pkg/apis \ + acid.zalan.do:v1 \ + --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt diff --git a/hack/verify-codegen.sh b/hack/verify-codegen.sh new file mode 100755 index 000000000..904586d05 --- /dev/null +++ b/hack/verify-codegen.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -o errexit +set -o nounset +set -o pipefail + +SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. +DIFFROOT="${SCRIPT_ROOT}/pkg" +TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" +_tmp="${SCRIPT_ROOT}/_tmp" + +cleanup() { + rm -rf "${_tmp}" +} +trap "cleanup" EXIT SIGINT + +cleanup + +mkdir -p "${TMP_DIFFROOT}" +cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}" + +"${SCRIPT_ROOT}/hack/update-codegen.sh" +echo "diffing ${DIFFROOT} against freshly generated codegen" +ret=0 +diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$? +cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}" +if [[ $ret -eq 0 ]] +then + echo "${DIFFROOT} up to date." +else + echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh" + exit 1 +fi diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 05fa935e9..d2a1307f8 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -1,5 +1,5 @@ apiVersion: "acid.zalan.do/v1" -kind: postgresql-operator-configuration +kind: OperatorConfiguration metadata: name: postgresql-operator-default-configuration configuration: diff --git a/pkg/apis/acid.zalan.do/register.go b/pkg/apis/acid.zalan.do/register.go new file mode 100644 index 000000000..f99396656 --- /dev/null +++ b/pkg/apis/acid.zalan.do/register.go @@ -0,0 +1,6 @@ +package acidzalando + +const ( + // GroupName is the group name for the operator CRDs + GroupName = "acid.zalan.do" +) diff --git a/pkg/apis/acid.zalan.do/v1/const.go b/pkg/apis/acid.zalan.do/v1/const.go new file mode 100644 index 000000000..4592a2d68 --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/const.go @@ -0,0 +1,16 @@ +package v1 + +const ( + serviceNameMaxLength = 63 + clusterNameMaxLength = serviceNameMaxLength - len("-repl") + serviceNameRegexString = `^[a-z]([-a-z0-9]*[a-z0-9])?$` + + ClusterStatusUnknown PostgresStatus = "" + ClusterStatusCreating PostgresStatus = "Creating" + ClusterStatusUpdating PostgresStatus = "Updating" + ClusterStatusUpdateFailed PostgresStatus = "UpdateFailed" + ClusterStatusSyncFailed PostgresStatus = "SyncFailed" + ClusterStatusAddFailed PostgresStatus = "CreateFailed" + ClusterStatusRunning PostgresStatus = "Running" + ClusterStatusInvalid PostgresStatus = "Invalid" +) diff --git a/pkg/apis/acid.zalan.do/v1/doc.go b/pkg/apis/acid.zalan.do/v1/doc.go new file mode 100644 index 000000000..5accd806d --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register + +// Package v1 is the v1 version of the API. +// +groupName=acid.zalan.do + +package v1 diff --git a/pkg/apis/acid.zalan.do/v1/marshal.go b/pkg/apis/acid.zalan.do/v1/marshal.go new file mode 100644 index 000000000..b24c4e49d --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/marshal.go @@ -0,0 +1,130 @@ +package v1 + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +type postgresqlCopy Postgresql + +// MarshalJSON converts a maintenance window definition to JSON. +func (m *MaintenanceWindow) MarshalJSON() ([]byte, error) { + if m.Everyday { + return []byte(fmt.Sprintf("\"%s-%s\"", + m.StartTime.Format("15:04"), + m.EndTime.Format("15:04"))), nil + } + + return []byte(fmt.Sprintf("\"%s:%s-%s\"", + m.Weekday.String()[:3], + m.StartTime.Format("15:04"), + m.EndTime.Format("15:04"))), nil +} + +// UnmarshalJSON converts a JSON to the maintenance window definition. +func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error { + var ( + got MaintenanceWindow + err error + ) + + parts := strings.Split(string(data[1:len(data)-1]), "-") + if len(parts) != 2 { + return fmt.Errorf("incorrect maintenance window format") + } + + fromParts := strings.Split(parts[0], ":") + switch len(fromParts) { + case 3: + got.Everyday = false + got.Weekday, err = parseWeekday(fromParts[0]) + if err != nil { + return fmt.Errorf("could not parse weekday: %v", err) + } + + got.StartTime, err = parseTime(fromParts[1] + ":" + fromParts[2]) + case 2: + got.Everyday = true + got.StartTime, err = parseTime(fromParts[0] + ":" + fromParts[1]) + default: + return fmt.Errorf("incorrect maintenance window format") + } + if err != nil { + return fmt.Errorf("could not parse start time: %v", err) + } + + got.EndTime, err = parseTime(parts[1]) + if err != nil { + return fmt.Errorf("could not parse end time: %v", err) + } + + if got.EndTime.Before(&got.StartTime) { + return fmt.Errorf("'From' time must be prior to the 'To' time") + } + + *m = got + + return nil +} + +// UnmarshalJSON converts a JSON into the PostgreSQL object. +func (p *Postgresql) UnmarshalJSON(data []byte) error { + var tmp postgresqlCopy + + err := json.Unmarshal(data, &tmp) + if err != nil { + metaErr := json.Unmarshal(data, &tmp.ObjectMeta) + if metaErr != nil { + return err + } + + tmp.Error = err.Error() + tmp.Status = ClusterStatusInvalid + + *p = Postgresql(tmp) + + return nil + } + tmp2 := Postgresql(tmp) + + if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil { + tmp2.Error = err.Error() + tmp2.Status = ClusterStatusInvalid + } else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil { + tmp2.Error = err.Error() + tmp2.Status = ClusterStatusInvalid + } else { + tmp2.Spec.ClusterName = clusterName + } + + *p = tmp2 + + return nil +} + +func (d *Duration) UnmarshalJSON(b []byte) error { + var ( + v interface{} + err error + ) + if err = json.Unmarshal(b, &v); err != nil { + return err + } + switch val := v.(type) { + case string: + t, err := time.ParseDuration(val) + if err != nil { + return err + } + *d = Duration(t) + return nil + case float64: + t := time.Duration(val) + *d = Duration(t) + return nil + default: + return fmt.Errorf("could not recognize type %T as a valid type to unmarshal to Duration", val) + } +} diff --git a/pkg/util/config/crd_config.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go similarity index 56% rename from pkg/util/config/crd_config.go rename to pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 2a9090514..cd70d76d9 100644 --- a/pkg/util/config/crd_config.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -1,23 +1,25 @@ -package config +package v1 import ( - "encoding/json" + "github.com/zalando-incubator/postgres-operator/pkg/util/config" "github.com/zalando-incubator/postgres-operator/pkg/spec" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/mohae/deepcopy" + "time" ) +// +genclient +// +genclient:onlyVerbs=get +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type OperatorConfiguration struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` Configuration OperatorConfigurationData `json:"configuration"` - Error error `json:"-"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type OperatorConfigurationList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` @@ -33,18 +35,18 @@ type PostgresUsersConfiguration struct { type KubernetesMetaConfiguration struct { PodServiceAccountName string `json:"pod_service_account_name,omitempty"` // TODO: change it to the proper json - PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` - PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` - PodTerminateGracePeriod spec.Duration `json:"pod_terminate_grace_period,omitempty"` - WatchedNamespace string `json:"watched_namespace,omitempty"` - PDBNameFormat stringTemplate `json:"pdb_name_format,omitempty"` - SecretNameTemplate stringTemplate `json:"secret_name_template,omitempty"` - OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` - InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` - PodRoleLabel string `json:"pod_role_label,omitempty"` - ClusterLabels map[string]string `json:"cluster_labels,omitempty"` - ClusterNameLabel string `json:"cluster_name_label,omitempty"` - NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` + PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` + PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` + PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` + WatchedNamespace string `json:"watched_namespace,omitempty"` + PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` + SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` + OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"` + InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"` + PodRoleLabel string `json:"pod_role_label,omitempty"` + ClusterLabels map[string]string `json:"cluster_labels,omitempty"` + ClusterNameLabel string `json:"cluster_name_label,omitempty"` + NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` // TODO: use a proper toleration structure? PodToleration map[string]string `json:"toleration,omitempty"` // TODO: use namespacedname @@ -60,20 +62,20 @@ type PostgresPodResourcesDefaults struct { } type OperatorTimeouts struct { - ResourceCheckInterval spec.Duration `json:"resource_check_interval,omitempty"` - ResourceCheckTimeout spec.Duration `json:"resource_check_timeout,omitempty"` - PodLabelWaitTimeout spec.Duration `json:"pod_label_wait_timeout,omitempty"` - PodDeletionWaitTimeout spec.Duration `json:"pod_deletion_wait_timeout,omitempty"` - ReadyWaitInterval spec.Duration `json:"ready_wait_interval,omitempty"` - ReadyWaitTimeout spec.Duration `json:"ready_wait_timeout,omitempty"` + ResourceCheckInterval Duration `json:"resource_check_interval,omitempty"` + ResourceCheckTimeout Duration `json:"resource_check_timeout,omitempty"` + PodLabelWaitTimeout Duration `json:"pod_label_wait_timeout,omitempty"` + PodDeletionWaitTimeout Duration `json:"pod_deletion_wait_timeout,omitempty"` + ReadyWaitInterval Duration `json:"ready_wait_interval,omitempty"` + ReadyWaitTimeout Duration `json:"ready_wait_timeout,omitempty"` } type LoadBalancerConfiguration struct { - DbHostedZone string `json:"db_hosted_zone,omitempty"` - EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"` - EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"` - MasterDNSNameFormat stringTemplate `json:"master_dns_name_format,omitempty"` - ReplicaDNSNameFormat stringTemplate `json:"replica_dns_name_format,omitempty"` + DbHostedZone string `json:"db_hosted_zone,omitempty"` + EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"` + EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"` + MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"` + ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"` } type AWSGCPConfiguration struct { @@ -121,8 +123,8 @@ type OperatorConfigurationData struct { Workers uint32 `json:"workers,omitempty"` MinInstances int32 `json:"min_instances,omitempty"` MaxInstances int32 `json:"max_instances,omitempty"` - ResyncPeriod spec.Duration `json:"resync_period,omitempty"` - RepairPeriod spec.Duration `json:"repair_period,omitempty"` + ResyncPeriod Duration `json:"resync_period,omitempty"` + RepairPeriod Duration `json:"repair_period,omitempty"` Sidecars map[string]string `json:"sidecar_docker_images,omitempty"` PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` @@ -143,67 +145,4 @@ type OperatorConfigurationUsers struct { TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` } -type OperatorConfigurationCopy OperatorConfiguration -type OperatorConfigurationListCopy OperatorConfigurationList - -func (opc *OperatorConfiguration) UnmarshalJSON(data []byte) error { - var ref OperatorConfigurationCopy - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - *opc = OperatorConfiguration(ref) - return nil -} - -func (opc *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { - if opc != nil { - *out = deepcopy.Copy(*opc).(OperatorConfiguration) - } -} - -func (opc *OperatorConfiguration) DeepCopy() *OperatorConfiguration { - if opc == nil { - return nil - } - out := new(OperatorConfiguration) - opc.DeepCopyInto(out) - return out -} - -func (opc *OperatorConfiguration) DeepCopyObject() runtime.Object { - if c := opc.DeepCopy(); c != nil { - return c - } - return nil -} - -func (opcl *OperatorConfigurationList) UnmarshalJSON(data []byte) error { - var ref OperatorConfigurationListCopy - if err := json.Unmarshal(data, &ref); err != nil { - return nil - } - *opcl = OperatorConfigurationList(ref) - return nil -} - -func (opcl *OperatorConfigurationList) DeepCopyInto(out *OperatorConfigurationList) { - if opcl != nil { - *out = deepcopy.Copy(*opcl).(OperatorConfigurationList) - } -} - -func (opcl *OperatorConfigurationList) DeepCopy() *OperatorConfigurationList { - if opcl == nil { - return nil - } - out := new(OperatorConfigurationList) - opcl.DeepCopyInto(out) - return out -} - -func (opcl *OperatorConfigurationList) DeepCopyObject() runtime.Object { - if c := opcl.DeepCopy(); c != nil { - return c - } - return nil -} +type Duration time.Duration diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go new file mode 100644 index 000000000..3f6d34165 --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -0,0 +1,127 @@ +package v1 + +import ( + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +//Postgresql defines PostgreSQL Custom Resource Definition Object. +type Postgresql struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresSpec `json:"spec"` + Status PostgresStatus `json:"status,omitempty"` + Error string `json:"-"` +} + +// PostgresSpec defines the specification for the PostgreSQL TPR. +type PostgresSpec struct { + PostgresqlParam `json:"postgresql"` + Volume `json:"volume,omitempty"` + Patroni `json:"patroni,omitempty"` + Resources `json:"resources,omitempty"` + + TeamID string `json:"teamId"` + DockerImage string `json:"dockerImage,omitempty"` + + // vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest + // in that case the var evaluates to nil and the value is taken from the operator config + EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"` + EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"` + + // deprecated load balancer settings maintained for backward compatibility + // see "Load balancers" operator docs + UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"` + ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"` + + // load balancers' source ranges are the same for master and replica services + AllowedSourceRanges []string `json:"allowedSourceRanges"` + + NumberOfInstances int32 `json:"numberOfInstances"` + Users map[string]UserFlags `json:"users"` + MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` + Clone CloneDescription `json:"clone"` + ClusterName string `json:"-"` + Databases map[string]string `json:"databases,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + Sidecars []Sidecar `json:"sidecars,omitempty"` + PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PostgresqlList defines a list of PostgreSQL clusters. +type PostgresqlList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Postgresql `json:"items"` +} + +// MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster. +type MaintenanceWindow struct { + Everyday bool + Weekday time.Weekday + StartTime metav1.Time // Start time + EndTime metav1.Time // End time +} + +// Volume describes a single volume in the manifest. +type Volume struct { + Size string `json:"size"` + StorageClass string `json:"storageClass"` +} + +// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values. +type PostgresqlParam struct { + PgVersion string `json:"version"` + Parameters map[string]string `json:"parameters"` +} + +// ResourceDescription describes CPU and memory resources defined for a cluster. +type ResourceDescription struct { + CPU string `json:"cpu"` + Memory string `json:"memory"` +} + +// Resources describes requests and limits for the cluster resouces. +type Resources struct { + ResourceRequest ResourceDescription `json:"requests,omitempty"` + ResourceLimits ResourceDescription `json:"limits,omitempty"` +} + +// Patroni contains Patroni-specific configuration +type Patroni struct { + InitDB map[string]string `json:"initdb"` + PgHba []string `json:"pg_hba"` + TTL uint32 `json:"ttl"` + LoopWait uint32 `json:"loop_wait"` + RetryTimeout uint32 `json:"retry_timeout"` + MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 +} + +// CloneDescription describes which cluster the new should clone and up to which point in time +type CloneDescription struct { + ClusterName string `json:"cluster,omitempty"` + UID string `json:"uid,omitempty"` + EndTimestamp string `json:"timestamp,omitempty"` +} + +// Sidecar defines a container to be run in the same pod as the Postgres container. +type Sidecar struct { + Resources `json:"resources,omitempty"` + Name string `json:"name,omitempty"` + DockerImage string `json:"image,omitempty"` + Ports []v1.ContainerPort `json:"ports,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` +} + +// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users +type UserFlags []string + +// PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) +type PostgresStatus string diff --git a/pkg/apis/acid.zalan.do/v1/register.go b/pkg/apis/acid.zalan.do/v1/register.go new file mode 100644 index 000000000..210bf782e --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/register.go @@ -0,0 +1,58 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do" +) + +const ( + PostgresCRDResourceKind = "postgresql" + PostgresCRDResourcePlural = "postgresqls" + PostgresCRDResouceName = PostgresCRDResourcePlural + "." + acidzalando.GroupName + PostgresCRDResourceShort = "pg" + + OperatorConfigCRDResouceKind = "OperatorConfiguration" + OperatorConfigCRDResourcePlural = "operatorconfigurations" + OperatorConfigCRDResourceName = OperatorConfigCRDResourcePlural + "." + acidzalando.GroupName + OperatorConfigCRDResourceShort = "opconfig" + + APIVersion = "v1" +) + +var ( + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme + SchemeGroupVersion = schema.GroupVersion{Group: acidzalando.GroupName, Version: APIVersion} +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // AddKnownType assumes derives the type kind from the type name, which is always uppercase. + // For our CRDs we use lowercase names historically, therefore we have to supply the name separately. + // TODO: User uppercase CRDResourceKind of our types in the next major API version + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresql"), &Postgresql{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresqlList"), &PostgresqlList{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfiguration"), + &OperatorConfiguration{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfigurationList"), + &OperatorConfigurationList{}) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/pkg/apis/acid.zalan.do/v1/util.go b/pkg/apis/acid.zalan.do/v1/util.go new file mode 100644 index 000000000..7d071ce22 --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/util.go @@ -0,0 +1,93 @@ +package v1 + +import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "regexp" + "strings" + "time" +) + +var ( + weekdays = map[string]int{"Sun": 0, "Mon": 1, "Tue": 2, "Wed": 3, "Thu": 4, "Fri": 5, "Sat": 6} + serviceNameRegex = regexp.MustCompile(serviceNameRegexString) +) + +func (p *Postgresql) Clone() *Postgresql { + if p == nil { + return nil + } + return p.DeepCopy() +} + +func parseTime(s string) (metav1.Time, error) { + parts := strings.Split(s, ":") + if len(parts) != 2 { + return metav1.Time{}, fmt.Errorf("incorrect time format") + } + timeLayout := "15:04" + + tp, err := time.Parse(timeLayout, s) + if err != nil { + return metav1.Time{}, err + } + + return metav1.Time{Time: tp.UTC()}, nil +} + +func parseWeekday(s string) (time.Weekday, error) { + weekday, ok := weekdays[s] + if !ok { + return time.Weekday(0), fmt.Errorf("incorrect weekday") + } + + return time.Weekday(weekday), nil +} + +func extractClusterName(clusterName string, teamName string) (string, error) { + teamNameLen := len(teamName) + if len(clusterName) < teamNameLen+2 { + return "", fmt.Errorf("name is too short") + } + + if teamNameLen == 0 { + return "", fmt.Errorf("team name is empty") + } + + if strings.ToLower(clusterName[:teamNameLen+1]) != strings.ToLower(teamName)+"-" { + return "", fmt.Errorf("name must match {TEAM}-{NAME} format") + } + if len(clusterName) > clusterNameMaxLength { + return "", fmt.Errorf("name cannot be longer than %d characters", clusterNameMaxLength) + } + if !serviceNameRegex.MatchString(clusterName) { + return "", fmt.Errorf("name must confirm to DNS-1035, regex used for validation is %q", + serviceNameRegexString) + } + + return clusterName[teamNameLen+1:], nil +} + +func validateCloneClusterDescription(clone *CloneDescription) error { + // when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name + if clone.ClusterName != "" && clone.EndTimestamp == "" { + if !serviceNameRegex.MatchString(clone.ClusterName) { + return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q", + serviceNameRegexString) + } + if len(clone.ClusterName) > serviceNameMaxLength { + return fmt.Errorf("clone cluster name must be no longer than %d characters", serviceNameMaxLength) + } + } + return nil +} + +func (status PostgresStatus) Success() bool { + return status != ClusterStatusAddFailed && + status != ClusterStatusUpdateFailed && + status != ClusterStatusSyncFailed +} + +func (status PostgresStatus) String() string { + return string(status) +} diff --git a/pkg/spec/postgresql_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go similarity index 95% rename from pkg/spec/postgresql_test.go rename to pkg/apis/acid.zalan.do/v1/util_test.go index aef698782..d1f06a2cc 100644 --- a/pkg/spec/postgresql_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -1,4 +1,4 @@ -package spec +package v1 import ( "bytes" @@ -12,15 +12,15 @@ import ( var parseTimeTests = []struct { in string - out time.Time + out metav1.Time err error }{ {"16:08", mustParseTime("16:08"), nil}, {"11:00", mustParseTime("11:00"), nil}, {"23:59", mustParseTime("23:59"), nil}, - {"26:09", time.Now(), errors.New(`parsing time "26:09": hour out of range`)}, - {"23:69", time.Now(), errors.New(`parsing time "23:69": minute out of range`)}, + {"26:09", metav1.Now(), errors.New(`parsing time "26:09": hour out of range`)}, + {"23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)}, } var parseWeekdayTests = []struct { @@ -125,13 +125,13 @@ var unmarshalCluster = []struct { Name: "acid-testcluster1", }, Status: ClusterStatusInvalid, - Error: &json.UnmarshalTypeError{ + Error: (&json.UnmarshalTypeError{ Value: "number", Type: reflect.TypeOf(""), Offset: 126, Struct: "PostgresSpec", Field: "teamId", - }, + }).Error(), }, []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil}, {[]byte(`{ @@ -264,7 +264,7 @@ var unmarshalCluster = []struct { }, ClusterName: "testcluster1", }, - Error: nil, + Error: "", }, []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}}}`), nil}, { @@ -279,7 +279,7 @@ var unmarshalCluster = []struct { }, Spec: PostgresSpec{TeamID: "acid"}, Status: ClusterStatusInvalid, - Error: errors.New("name must match {TEAM}-{NAME} format"), + Error: errors.New("name must match {TEAM}-{NAME} format").Error(), }, []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil}, { @@ -299,7 +299,7 @@ var unmarshalCluster = []struct { }, ClusterName: "testcluster1", }, - Error: nil, + Error: "", }, marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}}}`), err: nil}, {[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`), @@ -344,7 +344,7 @@ var postgresqlList = []struct { NumberOfInstances: 1, }, Status: ClusterStatusRunning, - Error: nil, + Error: "", }}, }, nil}, @@ -352,13 +352,13 @@ var postgresqlList = []struct { PostgresqlList{}, errors.New("unexpected end of JSON input")}} -func mustParseTime(s string) time.Time { +func mustParseTime(s string) metav1.Time { v, err := time.Parse("15:04", s) if err != nil { panic(err) } - return v.UTC() + return metav1.Time{Time: v.UTC()} } func TestParseTime(t *testing.T) { @@ -509,25 +509,6 @@ func TestPostgresMeta(t *testing.T) { } } -func TestUnmarshalPostgresList(t *testing.T) { - for _, tt := range postgresqlList { - var list PostgresqlList - err := list.UnmarshalJSON(tt.in) - if err != nil { - if tt.err == nil || err.Error() != tt.err.Error() { - t.Errorf("PostgresqlList unmarshal expected error: %v, got: %v", tt.err, err) - } - continue - } else if tt.err != nil { - t.Errorf("Expected error: %v", tt.err) - } - - if !reflect.DeepEqual(list, tt.out) { - t.Errorf("Postgresql list unmarshall expected: %#v, got: %#v", tt.out, list) - } - } -} - func TestPostgresListMeta(t *testing.T) { for _, tt := range postgresqlList { if tt.err != nil { @@ -549,7 +530,7 @@ func TestPostgresListMeta(t *testing.T) { func TestPostgresqlClone(t *testing.T) { for _, tt := range unmarshalCluster { cp := &tt.out - cp.Error = nil + cp.Error = "" clone := cp.Clone() if !reflect.DeepEqual(clone, cp) { t.Errorf("TestPostgresqlClone expected: \n%#v\n, got \n%#v", cp, clone) diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..01280e548 --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -0,0 +1,681 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSGCPConfiguration) DeepCopyInto(out *AWSGCPConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSGCPConfiguration. +func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration { + if in == nil { + return nil + } + out := new(AWSGCPConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloneDescription) DeepCopyInto(out *CloneDescription) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneDescription. +func (in *CloneDescription) DeepCopy() *CloneDescription { + if in == nil { + return nil + } + out := new(CloneDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { + *out = *in + out.OAuthTokenSecretName = in.OAuthTokenSecretName + out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName + if in.ClusterLabels != nil { + in, out := &in.ClusterLabels, &out.ClusterLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeReadinessLabel != nil { + in, out := &in.NodeReadinessLabel, &out.NodeReadinessLabel + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PodToleration != nil { + in, out := &in.PodToleration, &out.PodToleration + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesMetaConfiguration. +func (in *KubernetesMetaConfiguration) DeepCopy() *KubernetesMetaConfiguration { + if in == nil { + return nil + } + out := new(KubernetesMetaConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerConfiguration) DeepCopyInto(out *LoadBalancerConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerConfiguration. +func (in *LoadBalancerConfiguration) DeepCopy() *LoadBalancerConfiguration { + if in == nil { + return nil + } + out := new(LoadBalancerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingRESTAPIConfiguration) DeepCopyInto(out *LoggingRESTAPIConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingRESTAPIConfiguration. +func (in *LoggingRESTAPIConfiguration) DeepCopy() *LoggingRESTAPIConfiguration { + if in == nil { + return nil + } + out := new(LoggingRESTAPIConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindow) DeepCopyInto(out *MaintenanceWindow) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + in.EndTime.DeepCopyInto(&out.EndTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindow. +func (in *MaintenanceWindow) DeepCopy() *MaintenanceWindow { + if in == nil { + return nil + } + out := new(MaintenanceWindow) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Configuration.DeepCopyInto(&out.Configuration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfiguration. +func (in *OperatorConfiguration) DeepCopy() *OperatorConfiguration { + if in == nil { + return nil + } + out := new(OperatorConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData) { + *out = *in + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.PostgresUsersConfiguration = in.PostgresUsersConfiguration + in.Kubernetes.DeepCopyInto(&out.Kubernetes) + out.PostgresPodResources = in.PostgresPodResources + out.Timeouts = in.Timeouts + out.LoadBalancer = in.LoadBalancer + out.AWSGCP = in.AWSGCP + out.OperatorDebug = in.OperatorDebug + in.TeamsAPI.DeepCopyInto(&out.TeamsAPI) + out.LoggingRESTAPI = in.LoggingRESTAPI + out.Scalyr = in.Scalyr + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationData. +func (in *OperatorConfigurationData) DeepCopy() *OperatorConfigurationData { + if in == nil { + return nil + } + out := new(OperatorConfigurationData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorConfigurationList) DeepCopyInto(out *OperatorConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OperatorConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationList. +func (in *OperatorConfigurationList) DeepCopy() *OperatorConfigurationList { + if in == nil { + return nil + } + out := new(OperatorConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorConfigurationUsers) DeepCopyInto(out *OperatorConfigurationUsers) { + *out = *in + if in.ProtectedRoles != nil { + in, out := &in.ProtectedRoles, &out.ProtectedRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TeamAPIRoleConfiguration != nil { + in, out := &in.TeamAPIRoleConfiguration, &out.TeamAPIRoleConfiguration + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationUsers. +func (in *OperatorConfigurationUsers) DeepCopy() *OperatorConfigurationUsers { + if in == nil { + return nil + } + out := new(OperatorConfigurationUsers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorDebugConfiguration) DeepCopyInto(out *OperatorDebugConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorDebugConfiguration. +func (in *OperatorDebugConfiguration) DeepCopy() *OperatorDebugConfiguration { + if in == nil { + return nil + } + out := new(OperatorDebugConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorTimeouts) DeepCopyInto(out *OperatorTimeouts) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorTimeouts. +func (in *OperatorTimeouts) DeepCopy() *OperatorTimeouts { + if in == nil { + return nil + } + out := new(OperatorTimeouts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Patroni) DeepCopyInto(out *Patroni) { + *out = *in + if in.InitDB != nil { + in, out := &in.InitDB, &out.InitDB + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PgHba != nil { + in, out := &in.PgHba, &out.PgHba + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patroni. +func (in *Patroni) DeepCopy() *Patroni { + if in == nil { + return nil + } + out := new(Patroni) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresPodResourcesDefaults) DeepCopyInto(out *PostgresPodResourcesDefaults) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresPodResourcesDefaults. +func (in *PostgresPodResourcesDefaults) DeepCopy() *PostgresPodResourcesDefaults { + if in == nil { + return nil + } + out := new(PostgresPodResourcesDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { + *out = *in + in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam) + out.Volume = in.Volume + in.Patroni.DeepCopyInto(&out.Patroni) + out.Resources = in.Resources + if in.EnableMasterLoadBalancer != nil { + in, out := &in.EnableMasterLoadBalancer, &out.EnableMasterLoadBalancer + *out = new(bool) + **out = **in + } + if in.EnableReplicaLoadBalancer != nil { + in, out := &in.EnableReplicaLoadBalancer, &out.EnableReplicaLoadBalancer + *out = new(bool) + **out = **in + } + if in.UseLoadBalancer != nil { + in, out := &in.UseLoadBalancer, &out.UseLoadBalancer + *out = new(bool) + **out = **in + } + if in.ReplicaLoadBalancer != nil { + in, out := &in.ReplicaLoadBalancer, &out.ReplicaLoadBalancer + *out = new(bool) + **out = **in + } + if in.AllowedSourceRanges != nil { + in, out := &in.AllowedSourceRanges, &out.AllowedSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make(map[string]UserFlags, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(UserFlags, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.MaintenanceWindows != nil { + in, out := &in.MaintenanceWindows, &out.MaintenanceWindows + *out = make([]MaintenanceWindow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Clone = in.Clone + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]Sidecar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSpec. +func (in *PostgresSpec) DeepCopy() *PostgresSpec { + if in == nil { + return nil + } + out := new(PostgresSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresUsersConfiguration. +func (in *PostgresUsersConfiguration) DeepCopy() *PostgresUsersConfiguration { + if in == nil { + return nil + } + out := new(PostgresUsersConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Postgresql) DeepCopyInto(out *Postgresql) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Postgresql. +func (in *Postgresql) DeepCopy() *Postgresql { + if in == nil { + return nil + } + out := new(Postgresql) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Postgresql) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlList) DeepCopyInto(out *PostgresqlList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Postgresql, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlList. +func (in *PostgresqlList) DeepCopy() *PostgresqlList { + if in == nil { + return nil + } + out := new(PostgresqlList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresqlList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresqlParam) DeepCopyInto(out *PostgresqlParam) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlParam. +func (in *PostgresqlParam) DeepCopy() *PostgresqlParam { + if in == nil { + return nil + } + out := new(PostgresqlParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDescription. +func (in *ResourceDescription) DeepCopy() *ResourceDescription { + if in == nil { + return nil + } + out := new(ResourceDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resources) DeepCopyInto(out *Resources) { + *out = *in + out.ResourceRequest = in.ResourceRequest + out.ResourceLimits = in.ResourceLimits + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources. +func (in *Resources) DeepCopy() *Resources { + if in == nil { + return nil + } + out := new(Resources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalyrConfiguration) DeepCopyInto(out *ScalyrConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalyrConfiguration. +func (in *ScalyrConfiguration) DeepCopy() *ScalyrConfiguration { + if in == nil { + return nil + } + out := new(ScalyrConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sidecar) DeepCopyInto(out *Sidecar) { + *out = *in + out.Resources = in.Resources + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]corev1.ContainerPort, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar. +func (in *Sidecar) DeepCopy() *Sidecar { + if in == nil { + return nil + } + out := new(Sidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) { + *out = *in + if in.TeamAPIRoleConfiguration != nil { + in, out := &in.TeamAPIRoleConfiguration, &out.TeamAPIRoleConfiguration + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ProtectedRoles != nil { + in, out := &in.ProtectedRoles, &out.ProtectedRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamsAPIConfiguration. +func (in *TeamsAPIConfiguration) DeepCopy() *TeamsAPIConfiguration { + if in == nil { + return nil + } + out := new(TeamsAPIConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in UserFlags) DeepCopyInto(out *UserFlags) { + { + in := &in + *out = make(UserFlags, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserFlags. +func (in UserFlags) DeepCopy() UserFlags { + if in == nil { + return nil + } + out := new(UserFlags) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index ac26a9114..54c870287 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -13,6 +13,7 @@ import ( "github.com/Sirupsen/logrus" + "github.com/zalando-incubator/postgres-operator/pkg/cluster" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/config" @@ -30,14 +31,14 @@ type controllerInformer interface { GetOperatorConfig() *config.Config GetStatus() *spec.ControllerStatus TeamClusterList() map[string][]spec.NamespacedName - ClusterStatus(team, namespace, cluster string) (*spec.ClusterStatus, error) + ClusterStatus(team, namespace, cluster string) (*cluster.ClusterStatus, error) ClusterLogs(team, namespace, cluster string) ([]*spec.LogEntry, error) ClusterHistory(team, namespace, cluster string) ([]*spec.Diff, error) ClusterDatabasesMap() map[string][]string WorkerLogs(workerID uint32) ([]*spec.LogEntry, error) ListQueue(workerID uint32) (*spec.QueueDump, error) GetWorkersCnt() uint32 - WorkerStatus(workerID uint32) (*spec.WorkerStatus, error) + WorkerStatus(workerID uint32) (*cluster.WorkerStatus, error) } // Server describes HTTP API server @@ -228,7 +229,7 @@ func (s *Server) workers(w http.ResponseWriter, req *http.Request) { resp, err = s.controller.ListQueue(workerID) } else if matches := util.FindNamedStringSubmatch(workerStatusURL, req.URL.Path); matches != nil { - var workerStatus *spec.WorkerStatus + var workerStatus *cluster.WorkerStatus workerID := mustConvertToUint32(matches["id"]) resp = "idle" diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 49bab8599..da266041e 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -4,7 +4,6 @@ package cluster import ( "database/sql" - "encoding/json" "fmt" "reflect" "regexp" @@ -20,6 +19,8 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" + "encoding/json" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/config" @@ -60,13 +61,13 @@ type kubeResources struct { // Cluster describes postgresql cluster type Cluster struct { kubeResources - spec.Postgresql + acidv1.Postgresql Config logger *logrus.Entry patroni patroni.Interface pgUsers map[string]spec.PgUser systemUsers map[string]spec.PgUser - podSubscribers map[spec.NamespacedName]chan spec.PodEvent + podSubscribers map[spec.NamespacedName]chan PodEvent podSubscribersMu sync.RWMutex pgDb *sql.DB mu sync.Mutex @@ -77,7 +78,7 @@ type Cluster struct { teamsAPIClient teams.Interface oauthTokenGetter OAuthTokenGetter KubeClient k8sutil.KubernetesClient //TODO: move clients to the better place? - currentProcess spec.Process + currentProcess Process processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex } @@ -90,11 +91,11 @@ type compareStatefulsetResult struct { } // New creates a new cluster. This function should be called from a controller. -func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec spec.Postgresql, logger *logrus.Entry) *Cluster { +func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry) *Cluster { deletePropagationPolicy := metav1.DeletePropagationOrphan podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) { - e, ok := obj.(spec.PodEvent) + e, ok := obj.(PodEvent) if !ok { return "", fmt.Errorf("could not cast to PodEvent") } @@ -107,7 +108,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec spec.Postgresql Postgresql: pgSpec, pgUsers: make(map[string]spec.PgUser), systemUsers: make(map[string]spec.PgUser), - podSubscribers: make(map[spec.NamespacedName]chan spec.PodEvent), + podSubscribers: make(map[spec.NamespacedName]chan PodEvent), kubeResources: kubeResources{ Secrets: make(map[types.UID]*v1.Secret), Services: make(map[PostgresRole]*v1.Service), @@ -141,39 +142,36 @@ func (c *Cluster) teamName() string { func (c *Cluster) setProcessName(procName string, args ...interface{}) { c.processMu.Lock() defer c.processMu.Unlock() - c.currentProcess = spec.Process{ + c.currentProcess = Process{ Name: fmt.Sprintf(procName, args...), StartTime: time.Now(), } } -func (c *Cluster) setStatus(status spec.PostgresStatus) { - c.Status = status - b, err := json.Marshal(status) +func (c *Cluster) setStatus(status acidv1.PostgresStatus) { + // TODO: eventually switch to updateStatus() for kubernetes 1.11 and above + var ( + err error + b []byte + ) + if b, err = json.Marshal(status); err != nil { + c.logger.Errorf("could not marshal status: %v", err) + } + + patch := []byte(fmt.Sprintf(`{"status": %s}`, string(b))) + // we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ), + // however, we could do patch without it. In the future, once /status subresource is there (starting Kubernets 1.11) + // we should take advantage of it. + newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.OpConfig.WatchedNamespace).Patch(c.Name, types.MergePatchType, patch) if err != nil { - c.logger.Fatalf("could not marshal status: %v", err) - } - request := []byte(fmt.Sprintf(`{"status": %s}`, string(b))) //TODO: Look into/wait for k8s go client methods - - _, err = c.KubeClient.CRDREST.Patch(types.MergePatchType). - Namespace(c.Namespace). - Resource(constants.PostgresCRDResource). - Name(c.Name). - Body(request). - DoRaw() - - if k8sutil.ResourceNotFound(err) { - c.logger.Warningf("could not set %q status for the non-existing cluster", status) - return - } - - if err != nil { - c.logger.Warningf("could not set %q status for the cluster: %v", status, err) + c.logger.Errorf("could not update status: %v", err) } + // update the spec, maintaining the new resourceVersion. + c.setSpec(newspec) } func (c *Cluster) isNewCluster() bool { - return c.Status == spec.ClusterStatusCreating + return c.Status == acidv1.ClusterStatusCreating } // initUsers populates c.systemUsers and c.pgUsers maps. @@ -215,13 +213,13 @@ func (c *Cluster) Create() error { defer func() { if err == nil { - c.setStatus(spec.ClusterStatusRunning) //TODO: are you sure it's running? + c.setStatus(acidv1.ClusterStatusRunning) //TODO: are you sure it's running? } else { - c.setStatus(spec.ClusterStatusAddFailed) + c.setStatus(acidv1.ClusterStatusAddFailed) } }() - c.setStatus(spec.ClusterStatusCreating) + c.setStatus(acidv1.ClusterStatusCreating) for _, role := range []PostgresRole{Master, Replica} { @@ -482,20 +480,20 @@ func compareResoucesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.Resource // Update changes Kubernetes objects according to the new specification. Unlike the sync case, the missing object. // (i.e. service) is treated as an error. -func (c *Cluster) Update(oldSpec, newSpec *spec.Postgresql) error { +func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed := false c.mu.Lock() defer c.mu.Unlock() - c.setStatus(spec.ClusterStatusUpdating) + c.setStatus(acidv1.ClusterStatusUpdating) c.setSpec(newSpec) defer func() { if updateFailed { - c.setStatus(spec.ClusterStatusUpdateFailed) - } else if c.Status != spec.ClusterStatusRunning { - c.setStatus(spec.ClusterStatusRunning) + c.setStatus(acidv1.ClusterStatusUpdateFailed) + } else if c.Status != acidv1.ClusterStatusRunning { + c.setStatus(acidv1.ClusterStatusRunning) } }() @@ -631,7 +629,7 @@ func (c *Cluster) Delete() { } //NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status). -func (c *Cluster) NeedsRepair() (bool, spec.PostgresStatus) { +func (c *Cluster) NeedsRepair() (bool, acidv1.PostgresStatus) { c.specMu.RLock() defer c.specMu.RUnlock() return !c.Status.Success(), c.Status @@ -639,20 +637,20 @@ func (c *Cluster) NeedsRepair() (bool, spec.PostgresStatus) { } // ReceivePodEvent is called back by the controller in order to add the cluster's pod event to the queue. -func (c *Cluster) ReceivePodEvent(event spec.PodEvent) { +func (c *Cluster) ReceivePodEvent(event PodEvent) { if err := c.podEventsQueue.Add(event); err != nil { c.logger.Errorf("error when receiving pod events: %v", err) } } func (c *Cluster) processPodEvent(obj interface{}) error { - event, ok := obj.(spec.PodEvent) + event, ok := obj.(PodEvent) if !ok { return fmt.Errorf("could not cast to PodEvent") } c.podSubscribersMu.RLock() - subscriber, ok := c.podSubscribers[event.PodName] + subscriber, ok := c.podSubscribers[spec.NamespacedName(event.PodName)] c.podSubscribersMu.RUnlock() if ok { subscriber <- event @@ -812,7 +810,7 @@ func (c *Cluster) shouldAvoidProtectedOrSystemRole(username, purpose string) boo } // GetCurrentProcess provides name of the last process of the cluster -func (c *Cluster) GetCurrentProcess() spec.Process { +func (c *Cluster) GetCurrentProcess() Process { c.processMu.RLock() defer c.processMu.RUnlock() @@ -820,8 +818,8 @@ func (c *Cluster) GetCurrentProcess() spec.Process { } // GetStatus provides status of the cluster -func (c *Cluster) GetStatus() *spec.ClusterStatus { - return &spec.ClusterStatus{ +func (c *Cluster) GetStatus() *ClusterStatus { + return &ClusterStatus{ Cluster: c.Spec.ClusterName, Team: c.Spec.TeamID, Status: c.Status, @@ -835,7 +833,7 @@ func (c *Cluster) GetStatus() *spec.ClusterStatus { PodDisruptionBudget: c.GetPodDisruptionBudget(), CurrentProcess: c.GetCurrentProcess(), - Error: c.Error, + Error: fmt.Errorf("error: %s", c.Error), } } diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 7786e4563..82400344f 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -3,6 +3,7 @@ package cluster import ( "fmt" "github.com/Sirupsen/logrus" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util/config" "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" @@ -21,43 +22,43 @@ var logger = logrus.New().WithField("test", "cluster") var cl = New(Config{OpConfig: config.Config{ProtectedRoles: []string{"admin"}, Auth: config.Auth{SuperUsername: superUserName, ReplicationUsername: replicationUserName}}}, - k8sutil.KubernetesClient{}, spec.Postgresql{}, logger) + k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) func TestInitRobotUsers(t *testing.T) { testName := "TestInitRobotUsers" tests := []struct { - manifestUsers map[string]spec.UserFlags + manifestUsers map[string]acidv1.UserFlags infraRoles map[string]spec.PgUser result map[string]spec.PgUser err error }{ { - manifestUsers: map[string]spec.UserFlags{"foo": {"superuser", "createdb"}}, + manifestUsers: map[string]acidv1.UserFlags{"foo": {"superuser", "createdb"}}, infraRoles: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Password: "bar"}}, result: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Password: "bar"}}, err: nil, }, { - manifestUsers: map[string]spec.UserFlags{"!fooBar": {"superuser", "createdb"}}, + manifestUsers: map[string]acidv1.UserFlags{"!fooBar": {"superuser", "createdb"}}, err: fmt.Errorf(`invalid username: "!fooBar"`), }, { - manifestUsers: map[string]spec.UserFlags{"foobar": {"!superuser", "createdb"}}, + manifestUsers: map[string]acidv1.UserFlags{"foobar": {"!superuser", "createdb"}}, err: fmt.Errorf(`invalid flags for user "foobar": ` + `user flag "!superuser" is not alphanumeric`), }, { - manifestUsers: map[string]spec.UserFlags{"foobar": {"superuser1", "createdb"}}, + manifestUsers: map[string]acidv1.UserFlags{"foobar": {"superuser1", "createdb"}}, err: fmt.Errorf(`invalid flags for user "foobar": ` + `user flag "SUPERUSER1" is not valid`), }, { - manifestUsers: map[string]spec.UserFlags{"foobar": {"inherit", "noinherit"}}, + manifestUsers: map[string]acidv1.UserFlags{"foobar": {"inherit", "noinherit"}}, err: fmt.Errorf(`invalid flags for user "foobar": ` + `conflicting user flags: "NOINHERIT" and "INHERIT"`), }, { - manifestUsers: map[string]spec.UserFlags{"admin": {"superuser"}, superUserName: {"createdb"}}, + manifestUsers: map[string]acidv1.UserFlags{"admin": {"superuser"}, superUserName: {"createdb"}}, infraRoles: map[string]spec.PgUser{}, result: map[string]spec.PgUser{}, err: nil, diff --git a/pkg/cluster/pg.go b/pkg/cluster/database.go similarity index 100% rename from pkg/cluster/pg.go rename to pkg/cluster/database.go diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index b620082b5..c2049ff29 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" @@ -83,17 +84,17 @@ func (c *Cluster) podDisruptionBudgetName() string { return c.OpConfig.PDBNameFormat.Format("cluster", c.Name) } -func (c *Cluster) makeDefaultResources() spec.Resources { +func (c *Cluster) makeDefaultResources() acidv1.Resources { config := c.OpConfig - defaultRequests := spec.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest} - defaultLimits := spec.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit} + defaultRequests := acidv1.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest} + defaultLimits := acidv1.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit} - return spec.Resources{ResourceRequest: defaultRequests, ResourceLimits: defaultLimits} + return acidv1.Resources{ResourceRequest: defaultRequests, ResourceLimits: defaultLimits} } -func generateResourceRequirements(resources spec.Resources, defaultResources spec.Resources) (*v1.ResourceRequirements, error) { +func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) { var err error specRequests := resources.ResourceRequest @@ -114,7 +115,7 @@ func generateResourceRequirements(resources spec.Resources, defaultResources spe return &result, nil } -func fillResourceList(spec spec.ResourceDescription, defaults spec.ResourceDescription) (v1.ResourceList, error) { +func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) { var err error requests := v1.ResourceList{} @@ -144,7 +145,7 @@ func fillResourceList(spec spec.ResourceDescription, defaults spec.ResourceDescr return requests, nil } -func generateSpiloJSONConfiguration(pg *spec.PostgresqlParam, patroni *spec.Patroni, pamRoleName string, logger *logrus.Entry) string { +func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, logger *logrus.Entry) string { config := spiloConfiguration{} config.Bootstrap = pgBootstrap{} @@ -362,8 +363,8 @@ func generateSpiloContainer( } } -func generateSidecarContainers(sidecars []spec.Sidecar, - volumeMounts []v1.VolumeMount, defaultResources spec.Resources, +func generateSidecarContainers(sidecars []acidv1.Sidecar, + volumeMounts []v1.VolumeMount, defaultResources acidv1.Resources, superUserName string, credentialsSecretName string, logger *logrus.Entry) ([]v1.Container, error) { if len(sidecars) > 0 { @@ -438,7 +439,7 @@ func generatePodTemplate( } // generatePodEnvVars generates environment variables for the Spilo Pod -func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration string, cloneDescription *spec.CloneDescription, customPodEnvVarsList []v1.EnvVar) []v1.EnvVar { +func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration string, cloneDescription *acidv1.CloneDescription, customPodEnvVarsList []v1.EnvVar) []v1.EnvVar { envVars := []v1.EnvVar{ { Name: "SCOPE", @@ -555,7 +556,7 @@ func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus. return result } -func getSidecarContainer(sidecar spec.Sidecar, index int, volumeMounts []v1.VolumeMount, +func getSidecarContainer(sidecar acidv1.Sidecar, index int, volumeMounts []v1.VolumeMount, resources *v1.ResourceRequirements, superUserName string, credentialsSecretName string, logger *logrus.Entry) *v1.Container { name := sidecar.Name if name == "" { @@ -618,20 +619,20 @@ func getBucketScopeSuffix(uid string) string { return "" } -func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) spec.Resources { - return spec.Resources{ - ResourceRequest: spec.ResourceDescription{ +func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acidv1.Resources { + return acidv1.Resources{ + ResourceRequest: acidv1.ResourceDescription{ CPU: cpuRequest, Memory: memoryRequest, }, - ResourceLimits: spec.ResourceDescription{ + ResourceLimits: acidv1.ResourceDescription{ CPU: cpuLimit, Memory: memoryLimit, }, } } -func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.StatefulSet, error) { +func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.StatefulSet, error) { var ( err error @@ -751,14 +752,14 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu } func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string, - containerResources *spec.Resources, logger *logrus.Entry) *spec.Sidecar { + containerResources *acidv1.Resources, logger *logrus.Entry) *acidv1.Sidecar { if APIKey == "" || dockerImage == "" { if APIKey == "" && dockerImage != "" { logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined") } return nil } - scalarSpec := &spec.Sidecar{ + scalarSpec := &acidv1.Sidecar{ Name: "scalyr-sidecar", DockerImage: dockerImage, Env: []v1.EnvVar{ @@ -780,9 +781,9 @@ func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage strin } // mergeSidecar merges globally-defined sidecars with those defined in the cluster manifest -func (c *Cluster) mergeSidecars(sidecars []spec.Sidecar) []spec.Sidecar { +func (c *Cluster) mergeSidecars(sidecars []acidv1.Sidecar) []acidv1.Sidecar { globalSidecarsToSkip := map[string]bool{} - result := make([]spec.Sidecar, 0) + result := make([]acidv1.Sidecar, 0) for i, sidecar := range sidecars { dockerImage, ok := c.OpConfig.Sidecars[sidecar.Name] @@ -798,13 +799,13 @@ func (c *Cluster) mergeSidecars(sidecars []spec.Sidecar) []spec.Sidecar { } for name, dockerImage := range c.OpConfig.Sidecars { if !globalSidecarsToSkip[name] { - result = append(result, spec.Sidecar{Name: name, DockerImage: dockerImage}) + result = append(result, acidv1.Sidecar{Name: name, DockerImage: dockerImage}) } } return result } -func (c *Cluster) getNumberOfInstances(spec *spec.PostgresSpec) int32 { +func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 { min := c.OpConfig.MinInstances max := c.OpConfig.MaxInstances cur := spec.NumberOfInstances @@ -907,7 +908,7 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) return &secret } -func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *spec.PostgresSpec) bool { +func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *acidv1.PostgresSpec) bool { switch role { @@ -935,7 +936,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *sp } -func (c *Cluster) generateService(role PostgresRole, spec *spec.PostgresSpec) *v1.Service { +func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service { var dnsName string if role == Master { @@ -1006,7 +1007,7 @@ func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubse return endpoints } -func (c *Cluster) generateCloneEnvironment(description *spec.CloneDescription) []v1.EnvVar { +func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) []v1.EnvVar { result := make([]v1.EnvVar, 0) if description.ClusterName == "" { diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 54890660c..12e145c04 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1,7 +1,7 @@ package cluster import ( - "github.com/zalando-incubator/postgres-operator/pkg/spec" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/util/config" "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" "testing" @@ -27,41 +27,41 @@ func TestCreateLoadBalancerLogic(t *testing.T) { ReplicationUsername: replicationUserName, }, }, - }, k8sutil.KubernetesClient{}, spec.Postgresql{}, logger) + }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) testName := "TestCreateLoadBalancerLogic" tests := []struct { subtest string role PostgresRole - spec *spec.PostgresSpec + spec *acidv1.PostgresSpec opConfig config.Config result bool }{ { subtest: "new format, load balancer is enabled for replica", role: Replica, - spec: &spec.PostgresSpec{EnableReplicaLoadBalancer: True()}, + spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: True()}, opConfig: config.Config{}, result: true, }, { subtest: "new format, load balancer is disabled for replica", role: Replica, - spec: &spec.PostgresSpec{EnableReplicaLoadBalancer: False()}, + spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: False()}, opConfig: config.Config{}, result: false, }, { subtest: "new format, load balancer isn't specified for replica", role: Replica, - spec: &spec.PostgresSpec{EnableReplicaLoadBalancer: nil}, + spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, opConfig: config.Config{EnableReplicaLoadBalancer: true}, result: true, }, { subtest: "new format, load balancer isn't specified for replica", role: Replica, - spec: &spec.PostgresSpec{EnableReplicaLoadBalancer: nil}, + spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil}, opConfig: config.Config{EnableReplicaLoadBalancer: false}, result: false, }, diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index beb433fa0..ab282b6b9 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -97,12 +97,12 @@ func (c *Cluster) unregisterPodSubscriber(podName spec.NamespacedName) { delete(c.podSubscribers, podName) } -func (c *Cluster) registerPodSubscriber(podName spec.NamespacedName) chan spec.PodEvent { +func (c *Cluster) registerPodSubscriber(podName spec.NamespacedName) chan PodEvent { c.logger.Debugf("subscribing to pod %q", podName) c.podSubscribersMu.Lock() defer c.podSubscribersMu.Unlock() - ch := make(chan spec.PodEvent) + ch := make(chan PodEvent) if _, ok := c.podSubscribers[podName]; ok { panic("pod '" + podName.String() + "' is already subscribed") } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index ad42eeac5..bbe158dd5 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -6,6 +6,7 @@ import ( policybeta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" @@ -15,7 +16,7 @@ import ( // Sync syncs the cluster, making sure the actual Kubernetes objects correspond to what is defined in the manifest. // Unlike the update, sync does not error out if some objects do not exist and takes care of creating them. -func (c *Cluster) Sync(newSpec *spec.Postgresql) error { +func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { var err error c.mu.Lock() defer c.mu.Unlock() @@ -25,9 +26,9 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) error { defer func() { if err != nil { c.logger.Warningf("error while syncing cluster state: %v", err) - c.setStatus(spec.ClusterStatusSyncFailed) - } else if c.Status != spec.ClusterStatusRunning { - c.setStatus(spec.ClusterStatusRunning) + c.setStatus(acidv1.ClusterStatusSyncFailed) + } else if c.Status != acidv1.ClusterStatusRunning { + c.setStatus(acidv1.ClusterStatusRunning) } }() diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index fbaa39fb7..83b7e73fb 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -1,5 +1,14 @@ package cluster +import ( + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/types" + "time" +) + // PostgresRole describes role of the node type PostgresRole string @@ -10,3 +19,51 @@ const ( // Replica role Replica PostgresRole = "replica" ) + +type PodEventType string + +// Possible values for the EventType +const ( + PodEventAdd PodEventType = "ADD" + PodEventUpdate PodEventType = "UPDATE" + PodEventDelete PodEventType = "DELETE" +) + +// PodEvent describes the event for a single Pod +type PodEvent struct { + ResourceVersion string + PodName types.NamespacedName + PrevPod *v1.Pod + CurPod *v1.Pod + EventType PodEventType +} + +// Process describes process of the cluster +type Process struct { + Name string + StartTime time.Time +} + +// WorkerStatus describes status of the worker +type WorkerStatus struct { + CurrentCluster types.NamespacedName + CurrentProcess Process +} + +// ClusterStatus describes status of the cluster +type ClusterStatus struct { + Team string + Cluster string + MasterService *v1.Service + ReplicaService *v1.Service + MasterEndpoint *v1.Endpoints + ReplicaEndpoint *v1.Endpoints + StatefulSet *v1beta1.StatefulSet + PodDisruptionBudget *policybeta1.PodDisruptionBudget + + CurrentProcess Process + Worker uint32 + Status acidv1.PostgresStatus + Spec acidv1.PostgresSpec + Error error +} diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 85a61d969..911bf74f8 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -17,6 +17,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + acidzalando "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" @@ -203,7 +205,7 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU } } -func (c *Cluster) logVolumeChanges(old, new spec.Volume) { +func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) { c.logger.Infof("volume specification has been changed") c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old, new)) } @@ -232,7 +234,7 @@ func (c *Cluster) getTeamMembers() ([]string, error) { return teamInfo.Members, nil } -func (c *Cluster) waitForPodLabel(podEvents chan spec.PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) { +func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) { timeout := time.After(c.OpConfig.PodLabelWaitTimeout) for { select { @@ -254,12 +256,12 @@ func (c *Cluster) waitForPodLabel(podEvents chan spec.PodEvent, stopChan chan st } } -func (c *Cluster) waitForPodDeletion(podEvents chan spec.PodEvent) error { +func (c *Cluster) waitForPodDeletion(podEvents chan PodEvent) error { timeout := time.After(c.OpConfig.PodDeletionWaitTimeout) for { select { case podEvent := <-podEvents: - if podEvent.EventType == spec.EventDelete { + if podEvent.EventType == PodEventDelete { return nil } case <-timeout: @@ -425,18 +427,18 @@ func (c *Cluster) credentialSecretNameForCluster(username string, clusterName st return c.OpConfig.SecretNameTemplate.Format( "username", strings.Replace(username, "_", "-", -1), "cluster", clusterName, - "tprkind", constants.PostgresCRDKind, - "tprgroup", constants.CRDGroup) + "tprkind", acidv1.PostgresCRDResourceKind, + "tprgroup", acidzalando.GroupName) } func masterCandidate(replicas []spec.NamespacedName) spec.NamespacedName { return replicas[rand.Intn(len(replicas))] } -func cloneSpec(from *spec.Postgresql) (*spec.Postgresql, error) { +func cloneSpec(from *acidv1.Postgresql) (*acidv1.Postgresql, error) { var ( buf bytes.Buffer - result *spec.Postgresql + result *acidv1.Postgresql err error ) enc := gob.NewEncoder(&buf) @@ -450,13 +452,13 @@ func cloneSpec(from *spec.Postgresql) (*spec.Postgresql, error) { return result, nil } -func (c *Cluster) setSpec(newSpec *spec.Postgresql) { +func (c *Cluster) setSpec(newSpec *acidv1.Postgresql) { c.specMu.Lock() c.Postgresql = *newSpec c.specMu.Unlock() } -func (c *Cluster) GetSpec() (*spec.Postgresql, error) { +func (c *Cluster) GetSpec() (*acidv1.Postgresql, error) { c.specMu.RLock() defer c.specMu.RUnlock() return cloneSpec(&c.Postgresql) diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index c44ac4a03..9816116c2 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -9,6 +9,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/constants" @@ -88,7 +89,7 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { } // resizeVolumes resize persistent volumes compatible with the given resizer interface -func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.VolumeResizer) error { +func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.VolumeResizer) error { c.setProcessName("resizing volumes") var totalIncompatible int @@ -158,7 +159,7 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume return nil } -func (c *Cluster) volumesNeedResizing(newVolume spec.Volume) (bool, error) { +func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) { vols, manifestSize, err := c.listVolumesWithManifestSize(newVolume) if err != nil { return false, err @@ -172,7 +173,7 @@ func (c *Cluster) volumesNeedResizing(newVolume spec.Volume) (bool, error) { return false, nil } -func (c *Cluster) listVolumesWithManifestSize(newVolume spec.Volume) ([]*v1.PersistentVolume, int64, error) { +func (c *Cluster) listVolumesWithManifestSize(newVolume acidv1.Volume) ([]*v1.PersistentVolume, int64, error) { newSize, err := resource.ParseQuantity(newVolume.Size) if err != nil { return nil, 0, fmt.Errorf("could not parse volume size from the manifest: %v", err) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 7e340abb3..f99d836b8 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -21,6 +21,8 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/util/constants" "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" "github.com/zalando-incubator/postgres-operator/pkg/util/ringlog" + + acidv1informer "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1" ) // Controller represents operator controller @@ -46,7 +48,7 @@ type Controller struct { postgresqlInformer cache.SharedIndexInformer podInformer cache.SharedIndexInformer nodesInformer cache.SharedIndexInformer - podCh chan spec.PodEvent + podCh chan cluster.PodEvent clusterEventQueues []*cache.FIFO // [workerID]Queue lastClusterSyncTime int64 @@ -74,7 +76,7 @@ func NewController(controllerConfig *spec.ControllerConfig) *Controller { clusterHistory: make(map[spec.NamespacedName]ringlog.RingLogger), teamClusters: make(map[string][]spec.NamespacedName), stopCh: make(chan struct{}), - podCh: make(chan spec.PodEvent), + podCh: make(chan cluster.PodEvent), } logger.Hooks.Add(c) @@ -227,9 +229,9 @@ func (c *Controller) initController() { } } else { c.initOperatorConfig() - c.initPodServiceAccount() - c.initRoleBinding() } + c.initPodServiceAccount() + c.initRoleBinding() c.modifyConfigFromEnvironment() @@ -256,7 +258,7 @@ func (c *Controller) initController() { c.workerLogs = make(map[uint32]ringlog.RingLogger, c.opConfig.Workers) for i := range c.clusterEventQueues { c.clusterEventQueues[i] = cache.NewFIFO(func(obj interface{}) (string, error) { - e, ok := obj.(spec.ClusterEvent) + e, ok := obj.(ClusterEvent) if !ok { return "", fmt.Errorf("could not cast to ClusterEvent") } @@ -269,13 +271,10 @@ func (c *Controller) initController() { } func (c *Controller) initSharedInformers() { - // Postgresqls - c.postgresqlInformer = cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: c.clusterListFunc, - WatchFunc: c.clusterWatchFunc, - }, - &spec.Postgresql{}, + + c.postgresqlInformer = acidv1informer.NewPostgresqlInformer( + c.KubeClient.AcidV1ClientSet, + c.opConfig.WatchedNamespace, constants.QueueResyncPeriodTPR, cache.Indexers{}) @@ -345,7 +344,6 @@ func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { go c.apiserver.Run(stopCh, wg) go c.kubeNodesInformer(stopCh, wg) - c.logger.Info("started working in background") } @@ -361,7 +359,7 @@ func (c *Controller) runPostgresqlInformer(stopCh <-chan struct{}, wg *sync.Wait c.postgresqlInformer.Run(stopCh) } -func queueClusterKey(eventType spec.EventType, uid types.UID) string { +func queueClusterKey(eventType EventType, uid types.UID) string { return fmt.Sprintf("%s-%s", eventType, uid) } diff --git a/pkg/controller/status.go b/pkg/controller/logs_and_api.go similarity index 95% rename from pkg/controller/status.go rename to pkg/controller/logs_and_api.go index 73f25f697..7a6619203 100644 --- a/pkg/controller/status.go +++ b/pkg/controller/logs_and_api.go @@ -11,10 +11,11 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" "github.com/zalando-incubator/postgres-operator/pkg/util/config" + "k8s.io/apimachinery/pkg/types" ) // ClusterStatus provides status of the cluster -func (c *Controller) ClusterStatus(team, namespace, cluster string) (*spec.ClusterStatus, error) { +func (c *Controller) ClusterStatus(team, namespace, cluster string) (*cluster.ClusterStatus, error) { clusterName := spec.NamespacedName{ Namespace: namespace, @@ -196,7 +197,7 @@ func (c *Controller) GetWorkersCnt() uint32 { } //WorkerStatus provides status of the worker -func (c *Controller) WorkerStatus(workerID uint32) (*spec.WorkerStatus, error) { +func (c *Controller) WorkerStatus(workerID uint32) (*cluster.WorkerStatus, error) { obj, ok := c.curWorkerCluster.Load(workerID) if !ok || obj == nil { return nil, nil @@ -207,8 +208,8 @@ func (c *Controller) WorkerStatus(workerID uint32) (*spec.WorkerStatus, error) { return nil, fmt.Errorf("could not cast to Cluster struct") } - return &spec.WorkerStatus{ - CurrentCluster: util.NameFromMeta(cl.ObjectMeta), + return &cluster.WorkerStatus{ + CurrentCluster: types.NamespacedName(util.NameFromMeta(cl.ObjectMeta)), CurrentProcess: cl.GetCurrentProcess(), }, nil } diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index bdac26a19..251828b32 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -1,40 +1,27 @@ package controller import ( - "encoding/json" "fmt" "time" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/util/config" - "github.com/zalando-incubator/postgres-operator/pkg/util/constants" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*config.OperatorConfiguration, error) { - var ( - opConfig config.OperatorConfiguration - ) +func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*acidv1.OperatorConfiguration, error) { - req := c.KubeClient.CRDREST.Get(). - Name(configObjectName). - Namespace(configObjectNamespace). - Resource(constants.OperatorConfigCRDResource). - VersionedParams(&metav1.ListOptions{ResourceVersion: "0"}, metav1.ParameterCodec) - - data, err := req.DoRaw() + config, err := c.KubeClient.AcidV1ClientSet.AcidV1().OperatorConfigurations(configObjectNamespace).Get(configObjectName, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("could not get operator configuration object %s: %v", configObjectName, err) - } - if err = json.Unmarshal(data, &opConfig); err != nil { - return nil, fmt.Errorf("could not unmarshal operator configuration object %s, %v", configObjectName, err) + return nil, fmt.Errorf("could not get operator configuration object %q: %v", configObjectName, err) } - return &opConfig, nil + return config, nil } // importConfigurationFromCRD is a transitional function that converts CRD configuration to the one based on the configmap -func (c *Controller) importConfigurationFromCRD(fromCRD *config.OperatorConfigurationData) *config.Config { +func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigurationData) *config.Config { result := &config.Config{} result.EtcdHost = fromCRD.EtcdHost diff --git a/pkg/controller/pod.go b/pkg/controller/pod.go index d3634ff27..d2e12eb30 100644 --- a/pkg/controller/pod.go +++ b/pkg/controller/pod.go @@ -6,8 +6,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" + "github.com/zalando-incubator/postgres-operator/pkg/cluster" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" + "k8s.io/apimachinery/pkg/types" ) func (c *Controller) podListFunc(options metav1.ListOptions) (runtime.Object, error) { @@ -30,7 +32,7 @@ func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface, return c.KubeClient.Pods(c.opConfig.WatchedNamespace).Watch(opts) } -func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event spec.PodEvent) { +func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event cluster.PodEvent) { c.clustersMu.RLock() cluster, ok := c.clusters[clusterName] c.clustersMu.RUnlock() @@ -41,7 +43,7 @@ func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event spe func (c *Controller) podAdd(obj interface{}) { if pod, ok := obj.(*v1.Pod); ok { - c.preparePodEventForDispatch(pod, nil, spec.EventAdd) + c.preparePodEventForDispatch(pod, nil, cluster.PodEventAdd) } } @@ -56,19 +58,19 @@ func (c *Controller) podUpdate(prev, cur interface{}) { return } - c.preparePodEventForDispatch(curPod, prevPod, spec.EventUpdate) + c.preparePodEventForDispatch(curPod, prevPod, cluster.PodEventUpdate) } func (c *Controller) podDelete(obj interface{}) { if pod, ok := obj.(*v1.Pod); ok { - c.preparePodEventForDispatch(pod, nil, spec.EventDelete) + c.preparePodEventForDispatch(pod, nil, cluster.PodEventDelete) } } -func (c *Controller) preparePodEventForDispatch(curPod, prevPod *v1.Pod, event spec.EventType) { - podEvent := spec.PodEvent{ - PodName: util.NameFromMeta(curPod.ObjectMeta), +func (c *Controller) preparePodEventForDispatch(curPod, prevPod *v1.Pod, event cluster.PodEventType) { + podEvent := cluster.PodEvent{ + PodName: types.NamespacedName(util.NameFromMeta(curPod.ObjectMeta)), CurPod: curPod, PrevPod: prevPod, EventType: event, diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index 4e5df42a7..379cec842 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -1,7 +1,6 @@ package controller import ( - "encoding/json" "fmt" "reflect" "strings" @@ -12,15 +11,13 @@ import ( "github.com/Sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/cluster" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" - "github.com/zalando-incubator/postgres-operator/pkg/util/constants" "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" "github.com/zalando-incubator/postgres-operator/pkg/util/ringlog" ) @@ -42,40 +39,20 @@ func (c *Controller) clusterResync(stopCh <-chan struct{}, wg *sync.WaitGroup) { } // clusterListFunc obtains a list of all PostgreSQL clusters -func (c *Controller) listClusters(options metav1.ListOptions) (*spec.PostgresqlList, error) { - var ( - list spec.PostgresqlList - ) - - req := c.KubeClient.CRDREST. - Get(). - Namespace(c.opConfig.WatchedNamespace). - Resource(constants.PostgresCRDResource). - VersionedParams(&options, metav1.ParameterCodec) - - b, err := req.DoRaw() +func (c *Controller) listClusters(options metav1.ListOptions) (*acidv1.PostgresqlList, error) { + // TODO: use the SharedInformer cache instead of quering Kubernetes API directly. + list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(options) if err != nil { - c.logger.Errorf("could not get the list of postgresql CRD objects: %v", err) - return nil, err + c.logger.Errorf("could not list postgresql objects: %v", err) } - if err = json.Unmarshal(b, &list); err != nil { - c.logger.Warningf("could not unmarshal list of clusters: %v", err) - } - - return &list, err - -} - -// A separate function to be called from InitSharedInformers -func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object, error) { - return c.listClusters(options) + return list, err } // clusterListAndSync lists all manifests and decides whether to run the sync or repair. func (c *Controller) clusterListAndSync() error { var ( err error - event spec.EventType + event EventType ) currentTime := time.Now().Unix() @@ -83,12 +60,12 @@ func (c *Controller) clusterListAndSync() error { timeFromPreviousRepair := currentTime - atomic.LoadInt64(&c.lastClusterRepairTime) if timeFromPreviousSync >= int64(c.opConfig.ResyncPeriod.Seconds()) { - event = spec.EventSync + event = EventSync } else if timeFromPreviousRepair >= int64(c.opConfig.RepairPeriod.Seconds()) { - event = spec.EventRepair + event = EventRepair } if event != "" { - var list *spec.PostgresqlList + var list *acidv1.PostgresqlList if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil { return err } @@ -101,16 +78,17 @@ func (c *Controller) clusterListAndSync() error { } // queueEvents queues a sync or repair event for every cluster with a valid manifest -func (c *Controller) queueEvents(list *spec.PostgresqlList, event spec.EventType) { +func (c *Controller) queueEvents(list *acidv1.PostgresqlList, event EventType) { var activeClustersCnt, failedClustersCnt, clustersToRepair int for i, pg := range list.Items { - if pg.Error != nil { + // XXX: check the cluster status field instead + if pg.Error != "" { failedClustersCnt++ continue } activeClustersCnt++ // check if that cluster needs repair - if event == spec.EventRepair { + if event == EventRepair { if pg.Status.Success() { continue } else { @@ -133,9 +111,9 @@ func (c *Controller) queueEvents(list *spec.PostgresqlList, event spec.EventType } else { c.logger.Infof("no clusters running") } - if event == spec.EventRepair || event == spec.EventSync { + if event == EventRepair || event == EventSync { atomic.StoreInt64(&c.lastClusterRepairTime, time.Now().Unix()) - if event == spec.EventSync { + if event == EventSync { atomic.StoreInt64(&c.lastClusterSyncTime, time.Now().Unix()) } } @@ -143,7 +121,7 @@ func (c *Controller) queueEvents(list *spec.PostgresqlList, event spec.EventType func (c *Controller) acquireInitialListOfClusters() error { var ( - list *spec.PostgresqlList + list *acidv1.PostgresqlList err error clusterName spec.NamespacedName ) @@ -153,7 +131,8 @@ func (c *Controller) acquireInitialListOfClusters() error { } c.logger.Debugf("acquiring initial list of clusters") for _, pg := range list.Items { - if pg.Error != nil { + // XXX: check the cluster status field instead + if pg.Error != "" { continue } clusterName = util.NameFromMeta(pg.ObjectMeta) @@ -161,54 +140,11 @@ func (c *Controller) acquireInitialListOfClusters() error { c.logger.Debugf("added new cluster: %q", clusterName) } // initiate initial sync of all clusters. - c.queueEvents(list, spec.EventSync) + c.queueEvents(list, EventSync) return nil } -type crdDecoder struct { - dec *json.Decoder - close func() error -} - -func (d *crdDecoder) Close() { - if err := d.close(); err != nil { - fmt.Printf("error when closing CRDDecorer: %v\n", err) - } -} - -func (d *crdDecoder) Decode() (action watch.EventType, object runtime.Object, err error) { - var e struct { - Type watch.EventType - Object spec.Postgresql - } - if err := d.dec.Decode(&e); err != nil { - return watch.Error, nil, err - } - - return e.Type, &e.Object, nil -} - -func (c *Controller) clusterWatchFunc(options metav1.ListOptions) (watch.Interface, error) { - options.Watch = true - // MIGRATION: FieldsSelectorParam(nil) - r, err := c.KubeClient.CRDREST. - Get(). - Namespace(c.opConfig.WatchedNamespace). - Resource(constants.PostgresCRDResource). - VersionedParams(&options, metav1.ParameterCodec). - Stream() - - if err != nil { - return nil, err - } - - return watch.NewStreamWatcher(&crdDecoder{ - dec: json.NewDecoder(r), - close: r.Close, - }), nil -} - -func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *spec.Postgresql) *cluster.Cluster { +func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) *cluster.Cluster { cl := cluster.New(c.makeClusterConfig(), c.KubeClient, *pgSpec, lg) cl.Run(c.stopCh) teamName := strings.ToLower(cl.Spec.TeamID) @@ -224,13 +160,13 @@ func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedNam return cl } -func (c *Controller) processEvent(event spec.ClusterEvent) { +func (c *Controller) processEvent(event ClusterEvent) { var clusterName spec.NamespacedName var clHistory ringlog.RingLogger lg := c.logger.WithField("worker", event.WorkerID) - if event.EventType == spec.EventAdd || event.EventType == spec.EventSync || event.EventType == spec.EventRepair { + if event.EventType == EventAdd || event.EventType == EventSync || event.EventType == EventRepair { clusterName = util.NameFromMeta(event.NewSpec.ObjectMeta) } else { clusterName = util.NameFromMeta(event.OldSpec.ObjectMeta) @@ -246,17 +182,17 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { defer c.curWorkerCluster.Store(event.WorkerID, nil) - if event.EventType == spec.EventRepair { + if event.EventType == EventRepair { runRepair, lastOperationStatus := cl.NeedsRepair() if !runRepair { lg.Debugf("Observed cluster status %s, repair is not required", lastOperationStatus) return } lg.Debugf("Observed cluster status %s, running sync scan to repair the cluster", lastOperationStatus) - event.EventType = spec.EventSync + event.EventType = EventSync } - if event.EventType == spec.EventAdd || event.EventType == spec.EventUpdate || event.EventType == spec.EventSync { + if event.EventType == EventAdd || event.EventType == EventUpdate || event.EventType == EventSync { // handle deprecated parameters by possibly assigning their values to the new ones. if event.OldSpec != nil { c.mergeDeprecatedPostgreSQLSpecParameters(&event.OldSpec.Spec) @@ -273,7 +209,7 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { } switch event.EventType { - case spec.EventAdd: + case EventAdd: if clusterFound { lg.Debugf("cluster already exists") return @@ -286,14 +222,14 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { c.curWorkerCluster.Store(event.WorkerID, cl) if err := cl.Create(); err != nil { - cl.Error = fmt.Errorf("could not create cluster: %v", err) + cl.Error = fmt.Sprintf("could not create cluster: %v", err) lg.Error(cl.Error) return } lg.Infoln("cluster has been created") - case spec.EventUpdate: + case EventUpdate: lg.Infoln("update of the cluster started") if !clusterFound { @@ -302,12 +238,12 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { } c.curWorkerCluster.Store(event.WorkerID, cl) if err := cl.Update(event.OldSpec, event.NewSpec); err != nil { - cl.Error = fmt.Errorf("could not update cluster: %v", err) + cl.Error = fmt.Sprintf("could not update cluster: %v", err) lg.Error(cl.Error) return } - cl.Error = nil + cl.Error = "" lg.Infoln("cluster has been updated") clHistory.Insert(&spec.Diff{ @@ -315,7 +251,7 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { ProcessTime: time.Now(), Diff: util.Diff(event.OldSpec, event.NewSpec), }) - case spec.EventDelete: + case EventDelete: if !clusterFound { lg.Errorf("unknown cluster: %q", clusterName) return @@ -345,7 +281,7 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { }() lg.Infof("cluster has been deleted") - case spec.EventSync: + case EventSync: lg.Infof("syncing of the cluster started") // no race condition because a cluster is always processed by single worker @@ -355,11 +291,11 @@ func (c *Controller) processEvent(event spec.ClusterEvent) { c.curWorkerCluster.Store(event.WorkerID, cl) if err := cl.Sync(event.NewSpec); err != nil { - cl.Error = fmt.Errorf("could not sync cluster: %v", err) + cl.Error = fmt.Sprintf("could not sync cluster: %v", err) lg.Error(cl.Error) return } - cl.Error = nil + cl.Error = "" lg.Infof("cluster has been synced") } @@ -382,7 +318,7 @@ func (c *Controller) processClusterEventsQueue(idx int, stopCh <-chan struct{}, c.logger.Errorf("error when processing cluster events queue: %v", err) continue } - event, ok := obj.(spec.ClusterEvent) + event, ok := obj.(ClusterEvent) if !ok { c.logger.Errorf("could not cast to ClusterEvent") } @@ -391,7 +327,7 @@ func (c *Controller) processClusterEventsQueue(idx int, stopCh <-chan struct{}, } } -func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *spec.PostgresSpec) { +func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.PostgresSpec) { deprecate := func(deprecated, replacement string) { c.logger.Warningf("Parameter %q is deprecated. Consider setting %q instead", deprecated, replacement) @@ -421,7 +357,7 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *spec.Postgre // mergeDeprecatedPostgreSQLSpecParameters modifies the spec passed to the cluster by setting current parameter // values from the obsolete ones. Note: while the spec that is modified is a copy made in queueClusterEvent, it is // still a shallow copy, so be extra careful not to modify values pointer fields point to, but copy them instead. -func (c *Controller) mergeDeprecatedPostgreSQLSpecParameters(spec *spec.PostgresSpec) *spec.PostgresSpec { +func (c *Controller) mergeDeprecatedPostgreSQLSpecParameters(spec *acidv1.PostgresSpec) *acidv1.PostgresSpec { if (spec.UseLoadBalancer != nil || spec.ReplicaLoadBalancer != nil) && (spec.EnableReplicaLoadBalancer == nil && spec.EnableMasterLoadBalancer == nil) { if spec.UseLoadBalancer != nil { @@ -439,18 +375,18 @@ func (c *Controller) mergeDeprecatedPostgreSQLSpecParameters(spec *spec.Postgres return spec } -func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Postgresql, eventType spec.EventType) { +func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.Postgresql, eventType EventType) { var ( uid types.UID clusterName spec.NamespacedName - clusterError error + clusterError string ) if informerOldSpec != nil { //update, delete uid = informerOldSpec.GetUID() clusterName = util.NameFromMeta(informerOldSpec.ObjectMeta) - if eventType == spec.EventUpdate && informerNewSpec.Error == nil && informerOldSpec.Error != nil { - eventType = spec.EventSync + if eventType == EventUpdate && informerNewSpec.Error == "" && informerOldSpec.Error != "" { + eventType = EventSync clusterError = informerNewSpec.Error } else { clusterError = informerOldSpec.Error @@ -461,10 +397,10 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po clusterError = informerNewSpec.Error } - if clusterError != nil && eventType != spec.EventDelete { + if clusterError != "" && eventType != EventDelete { c.logger. WithField("cluster-name", clusterName). - Debugf("skipping %q event for the invalid cluster: %v", eventType, clusterError) + Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError) return } @@ -473,7 +409,7 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po // effect, the modified state will be returned together with subsequent events). workerID := c.clusterWorkerID(clusterName) - clusterEvent := spec.ClusterEvent{ + clusterEvent := ClusterEvent{ EventTime: time.Now(), EventType: eventType, UID: uid, @@ -488,11 +424,11 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po } lg.Infof("%q event has been queued", eventType) - if eventType != spec.EventDelete { + if eventType != EventDelete { return } // A delete event discards all prior requests for that cluster. - for _, evType := range []spec.EventType{spec.EventAdd, spec.EventSync, spec.EventUpdate, spec.EventRepair} { + for _, evType := range []EventType{EventAdd, EventSync, EventUpdate, EventRepair} { obj, exists, err := c.clusterEventQueues[workerID].GetByKey(queueClusterKey(evType, uid)) if err != nil { lg.Warningf("could not get event from the queue: %v", err) @@ -513,40 +449,41 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po } func (c *Controller) postgresqlAdd(obj interface{}) { - pg, ok := obj.(*spec.Postgresql) + pg, ok := obj.(*acidv1.Postgresql) if !ok { c.logger.Errorf("could not cast to postgresql spec") return } // We will not get multiple Add events for the same cluster - c.queueClusterEvent(nil, pg, spec.EventAdd) + c.queueClusterEvent(nil, pg, EventAdd) } func (c *Controller) postgresqlUpdate(prev, cur interface{}) { - pgOld, ok := prev.(*spec.Postgresql) + pgOld, ok := prev.(*acidv1.Postgresql) if !ok { c.logger.Errorf("could not cast to postgresql spec") } - pgNew, ok := cur.(*spec.Postgresql) + pgNew, ok := cur.(*acidv1.Postgresql) if !ok { c.logger.Errorf("could not cast to postgresql spec") } + // Avoid the inifinite recursion for status updates if reflect.DeepEqual(pgOld.Spec, pgNew.Spec) { return } - c.queueClusterEvent(pgOld, pgNew, spec.EventUpdate) + c.queueClusterEvent(pgOld, pgNew, EventUpdate) } func (c *Controller) postgresqlDelete(obj interface{}) { - pg, ok := obj.(*spec.Postgresql) + pg, ok := obj.(*acidv1.Postgresql) if !ok { c.logger.Errorf("could not cast to postgresql spec") return } - c.queueClusterEvent(pg, nil, spec.EventDelete) + c.queueClusterEvent(pg, nil, EventDelete) } /* @@ -555,7 +492,7 @@ func (c *Controller) postgresqlDelete(obj interface{}) { The operator does not sync accounts/role bindings after creation. */ -func (c *Controller) submitRBACCredentials(event spec.ClusterEvent) error { +func (c *Controller) submitRBACCredentials(event ClusterEvent) error { namespace := event.NewSpec.GetNamespace() if _, ok := c.namespacesWithDefinedRBAC.Load(namespace); ok { diff --git a/pkg/controller/postgresql_test.go b/pkg/controller/postgresql_test.go index d5d5669af..c3a74be74 100644 --- a/pkg/controller/postgresql_test.go +++ b/pkg/controller/postgresql_test.go @@ -1,6 +1,7 @@ package controller import ( + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "reflect" "testing" @@ -16,21 +17,21 @@ func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) { tests := []struct { name string - in *spec.PostgresSpec - out *spec.PostgresSpec + in *acidv1.PostgresSpec + out *acidv1.PostgresSpec error string }{ { "Check that old parameters propagate values to the new ones", - &spec.PostgresSpec{UseLoadBalancer: &True, ReplicaLoadBalancer: &True}, - &spec.PostgresSpec{UseLoadBalancer: nil, ReplicaLoadBalancer: nil, + &acidv1.PostgresSpec{UseLoadBalancer: &True, ReplicaLoadBalancer: &True}, + &acidv1.PostgresSpec{UseLoadBalancer: nil, ReplicaLoadBalancer: nil, EnableMasterLoadBalancer: &True, EnableReplicaLoadBalancer: &True}, "New parameters should be set from the values of old ones", }, { "Check that new parameters are not set when both old and new ones are present", - &spec.PostgresSpec{UseLoadBalancer: &True, EnableMasterLoadBalancer: &False}, - &spec.PostgresSpec{UseLoadBalancer: nil, EnableMasterLoadBalancer: &False}, + &acidv1.PostgresSpec{UseLoadBalancer: &True, EnableMasterLoadBalancer: &False}, + &acidv1.PostgresSpec{UseLoadBalancer: nil, EnableMasterLoadBalancer: &False}, "New parameters should remain unchanged when both old and new are present", }, } diff --git a/pkg/controller/types.go b/pkg/controller/types.go new file mode 100644 index 000000000..c412eb66e --- /dev/null +++ b/pkg/controller/types.go @@ -0,0 +1,30 @@ +package controller + +import ( + "k8s.io/apimachinery/pkg/types" + "time" + + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" +) + +// EventType contains type of the events for the TPRs and Pods received from Kubernetes +type EventType string + +// Possible values for the EventType +const ( + EventAdd EventType = "ADD" + EventUpdate EventType = "UPDATE" + EventDelete EventType = "DELETE" + EventSync EventType = "SYNC" + EventRepair EventType = "REPAIR" +) + +// ClusterEvent carries the payload of the Cluster TPR events. +type ClusterEvent struct { + EventTime time.Time + UID types.UID + EventType EventType + OldSpec *acidv1.Postgresql + NewSpec *acidv1.Postgresql + WorkerID uint32 +} diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 46c369fdd..e6d7f972e 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -8,10 +8,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/cluster" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util/config" - "github.com/zalando-incubator/postgres-operator/pkg/util/constants" "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" "gopkg.in/yaml.v2" ) @@ -47,22 +47,24 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 { return c.clusterWorkers[clusterName] } -func (c *Controller) createOperatorCRD(plural, singular, short string) error { +func (c *Controller) createOperatorCRD(name, kind, plural, short string) error { + subResourceStatus := apiextv1beta1.CustomResourceSubresourceStatus{} crd := &apiextv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ - Name: plural + "." + constants.CRDGroup, + Name: name, }, Spec: apiextv1beta1.CustomResourceDefinitionSpec{ - Group: constants.CRDGroup, - Version: constants.CRDApiVersion, + Group: acidv1.SchemeGroupVersion.Group, + Version: acidv1.SchemeGroupVersion.Version, Names: apiextv1beta1.CustomResourceDefinitionNames{ Plural: plural, - Singular: singular, ShortNames: []string{short}, - Kind: singular, - ListKind: singular + "List", + Kind: kind, }, Scope: apiextv1beta1.NamespaceScoped, + Subresources: &apiextv1beta1.CustomResourceSubresources{ + Status: &subResourceStatus, + }, }, } @@ -99,11 +101,17 @@ func (c *Controller) createOperatorCRD(plural, singular, short string) error { } func (c *Controller) createPostgresCRD() error { - return c.createOperatorCRD(constants.PostgresCRDResource, constants.PostgresCRDKind, constants.PostgresCRDShort) + return c.createOperatorCRD(acidv1.PostgresCRDResouceName, + acidv1.PostgresCRDResourceKind, + acidv1.PostgresCRDResourcePlural, + acidv1.PostgresCRDResourceShort) } func (c *Controller) createConfigurationCRD() error { - return c.createOperatorCRD(constants.OperatorConfigCRDResource, constants.OperatorConfigCRDKind, constants.OperatorConfigCRDShort) + return c.createOperatorCRD(acidv1.OperatorConfigCRDResourceName, + acidv1.OperatorConfigCRDResouceKind, + acidv1.OperatorConfigCRDResourcePlural, + acidv1.OperatorConfigCRDResourceShort) } func readDecodedRole(s string) (*spec.PgUser, error) { diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go new file mode 100644 index 000000000..d42fa3b21 --- /dev/null +++ b/pkg/generated/clientset/versioned/clientset.go @@ -0,0 +1,104 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + AcidV1() acidv1.AcidV1Interface + // Deprecated: please explicitly pick a version if possible. + Acid() acidv1.AcidV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + acidV1 *acidv1.AcidV1Client +} + +// AcidV1 retrieves the AcidV1Client +func (c *Clientset) AcidV1() acidv1.AcidV1Interface { + return c.acidV1 +} + +// Deprecated: Acid retrieves the default version of AcidClient. +// Please explicitly pick a version. +func (c *Clientset) Acid() acidv1.AcidV1Interface { + return c.acidV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.acidV1, err = acidv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.acidV1 = acidv1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.acidV1 = acidv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/pkg/generated/clientset/versioned/doc.go b/pkg/generated/clientset/versioned/doc.go new file mode 100644 index 000000000..4ef8c1bb8 --- /dev/null +++ b/pkg/generated/clientset/versioned/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 000000000..19d9ab805 --- /dev/null +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" + fakeacidv1 "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// AcidV1 retrieves the AcidV1Client +func (c *Clientset) AcidV1() acidv1.AcidV1Interface { + return &fakeacidv1.FakeAcidV1{Fake: &c.Fake} +} + +// Acid retrieves the AcidV1Client +func (c *Clientset) Acid() acidv1.AcidV1Interface { + return &fakeacidv1.FakeAcidV1{Fake: &c.Fake} +} diff --git a/pkg/generated/clientset/versioned/fake/doc.go b/pkg/generated/clientset/versioned/fake/doc.go new file mode 100644 index 000000000..c249c43fa --- /dev/null +++ b/pkg/generated/clientset/versioned/fake/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go new file mode 100644 index 000000000..5269b757a --- /dev/null +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + AddToScheme(scheme) +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + acidv1.AddToScheme(scheme) +} diff --git a/pkg/generated/clientset/versioned/scheme/doc.go b/pkg/generated/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..d17209947 --- /dev/null +++ b/pkg/generated/clientset/versioned/scheme/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..346cd4b16 --- /dev/null +++ b/pkg/generated/clientset/versioned/scheme/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + AddToScheme(Scheme) +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + acidv1.AddToScheme(scheme) +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go new file mode 100644 index 000000000..4e73a425f --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go @@ -0,0 +1,101 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type AcidV1Interface interface { + RESTClient() rest.Interface + OperatorConfigurationsGetter + PostgresqlsGetter +} + +// AcidV1Client is used to interact with features provided by the acid.zalan.do group. +type AcidV1Client struct { + restClient rest.Interface +} + +func (c *AcidV1Client) OperatorConfigurations(namespace string) OperatorConfigurationInterface { + return newOperatorConfigurations(c, namespace) +} + +func (c *AcidV1Client) Postgresqls(namespace string) PostgresqlInterface { + return newPostgresqls(c, namespace) +} + +// NewForConfig creates a new AcidV1Client for the given config. +func NewForConfig(c *rest.Config) (*AcidV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AcidV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AcidV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AcidV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AcidV1Client for the given RESTClient. +func New(c rest.Interface) *AcidV1Client { + return &AcidV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AcidV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go new file mode 100644 index 000000000..97d91a36a --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go new file mode 100644 index 000000000..58640649f --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go new file mode 100644 index 000000000..9d401ef7c --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAcidV1 struct { + *testing.Fake +} + +func (c *FakeAcidV1) OperatorConfigurations(namespace string) v1.OperatorConfigurationInterface { + return &FakeOperatorConfigurations{c, namespace} +} + +func (c *FakeAcidV1) Postgresqls(namespace string) v1.PostgresqlInterface { + return &FakePostgresqls{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAcidV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go new file mode 100644 index 000000000..b2fada626 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + acidzalandov1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + testing "k8s.io/client-go/testing" +) + +// FakeOperatorConfigurations implements OperatorConfigurationInterface +type FakeOperatorConfigurations struct { + Fake *FakeAcidV1 + ns string +} + +var operatorconfigurationsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "operatorconfigurations"} + +var operatorconfigurationsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "OperatorConfiguration"} + +// Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any. +func (c *FakeOperatorConfigurations) Get(name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(operatorconfigurationsResource, c.ns, name), &acidzalandov1.OperatorConfiguration{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.OperatorConfiguration), err +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go new file mode 100644 index 000000000..6feb72eb8 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go @@ -0,0 +1,146 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + acidzalandov1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePostgresqls implements PostgresqlInterface +type FakePostgresqls struct { + Fake *FakeAcidV1 + ns string +} + +var postgresqlsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "postgresqls"} + +var postgresqlsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "Postgresql"} + +// Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any. +func (c *FakePostgresqls) Get(name string, options v1.GetOptions) (result *acidzalandov1.Postgresql, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.Postgresql), err +} + +// List takes label and field selectors, and returns the list of Postgresqls that match those selectors. +func (c *FakePostgresqls) List(opts v1.ListOptions) (result *acidzalandov1.PostgresqlList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(postgresqlsResource, postgresqlsKind, c.ns, opts), &acidzalandov1.PostgresqlList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &acidzalandov1.PostgresqlList{ListMeta: obj.(*acidzalandov1.PostgresqlList).ListMeta} + for _, item := range obj.(*acidzalandov1.PostgresqlList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested postgresqls. +func (c *FakePostgresqls) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(postgresqlsResource, c.ns, opts)) + +} + +// Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any. +func (c *FakePostgresqls) Create(postgresql *acidzalandov1.Postgresql) (result *acidzalandov1.Postgresql, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.Postgresql), err +} + +// Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any. +func (c *FakePostgresqls) Update(postgresql *acidzalandov1.Postgresql) (result *acidzalandov1.Postgresql, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.Postgresql), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePostgresqls) UpdateStatus(postgresql *acidzalandov1.Postgresql) (*acidzalandov1.Postgresql, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(postgresqlsResource, "status", c.ns, postgresql), &acidzalandov1.Postgresql{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.Postgresql), err +} + +// Delete takes name of the postgresql and deletes it. Returns an error if one occurs. +func (c *FakePostgresqls) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePostgresqls) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(postgresqlsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &acidzalandov1.PostgresqlList{}) + return err +} + +// Patch applies the patch and returns the patched postgresql. +func (c *FakePostgresqls) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *acidzalandov1.Postgresql, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(postgresqlsResource, c.ns, name, data, subresources...), &acidzalandov1.Postgresql{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.Postgresql), err +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go new file mode 100644 index 000000000..775a4b21f --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type OperatorConfigurationExpansion interface{} + +type PostgresqlExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go new file mode 100644 index 000000000..2541f0e3f --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + acidzalandov1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + scheme "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rest "k8s.io/client-go/rest" +) + +// OperatorConfigurationsGetter has a method to return a OperatorConfigurationInterface. +// A group's client should implement this interface. +type OperatorConfigurationsGetter interface { + OperatorConfigurations(namespace string) OperatorConfigurationInterface +} + +// OperatorConfigurationInterface has methods to work with OperatorConfiguration resources. +type OperatorConfigurationInterface interface { + Get(name string, options v1.GetOptions) (*acidzalandov1.OperatorConfiguration, error) + OperatorConfigurationExpansion +} + +// operatorConfigurations implements OperatorConfigurationInterface +type operatorConfigurations struct { + client rest.Interface + ns string +} + +// newOperatorConfigurations returns a OperatorConfigurations +func newOperatorConfigurations(c *AcidV1Client, namespace string) *operatorConfigurations { + return &operatorConfigurations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any. +func (c *operatorConfigurations) Get(name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) { + result = &acidzalandov1.OperatorConfiguration{} + err = c.client.Get(). + Namespace(c.ns). + Resource("operatorconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go new file mode 100644 index 000000000..df1045ee3 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go @@ -0,0 +1,180 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + scheme "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PostgresqlsGetter has a method to return a PostgresqlInterface. +// A group's client should implement this interface. +type PostgresqlsGetter interface { + Postgresqls(namespace string) PostgresqlInterface +} + +// PostgresqlInterface has methods to work with Postgresql resources. +type PostgresqlInterface interface { + Create(*v1.Postgresql) (*v1.Postgresql, error) + Update(*v1.Postgresql) (*v1.Postgresql, error) + UpdateStatus(*v1.Postgresql) (*v1.Postgresql, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Postgresql, error) + List(opts metav1.ListOptions) (*v1.PostgresqlList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Postgresql, err error) + PostgresqlExpansion +} + +// postgresqls implements PostgresqlInterface +type postgresqls struct { + client rest.Interface + ns string +} + +// newPostgresqls returns a Postgresqls +func newPostgresqls(c *AcidV1Client, namespace string) *postgresqls { + return &postgresqls{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any. +func (c *postgresqls) Get(name string, options metav1.GetOptions) (result *v1.Postgresql, err error) { + result = &v1.Postgresql{} + err = c.client.Get(). + Namespace(c.ns). + Resource("postgresqls"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Postgresqls that match those selectors. +func (c *postgresqls) List(opts metav1.ListOptions) (result *v1.PostgresqlList, err error) { + result = &v1.PostgresqlList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("postgresqls"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested postgresqls. +func (c *postgresqls) Watch(opts metav1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("postgresqls"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any. +func (c *postgresqls) Create(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) { + result = &v1.Postgresql{} + err = c.client.Post(). + Namespace(c.ns). + Resource("postgresqls"). + Body(postgresql). + Do(). + Into(result) + return +} + +// Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any. +func (c *postgresqls) Update(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) { + result = &v1.Postgresql{} + err = c.client.Put(). + Namespace(c.ns). + Resource("postgresqls"). + Name(postgresql.Name). + Body(postgresql). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *postgresqls) UpdateStatus(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) { + result = &v1.Postgresql{} + err = c.client.Put(). + Namespace(c.ns). + Resource("postgresqls"). + Name(postgresql.Name). + SubResource("status"). + Body(postgresql). + Do(). + Into(result) + return +} + +// Delete takes name of the postgresql and deletes it. Returns an error if one occurs. +func (c *postgresqls) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("postgresqls"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *postgresqls) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("postgresqls"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched postgresql. +func (c *postgresqls) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Postgresql, err error) { + result = &v1.Postgresql{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("postgresqls"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/interface.go new file mode 100644 index 000000000..9dfa60021 --- /dev/null +++ b/pkg/generated/informers/externalversions/acid.zalan.do/interface.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package acid + +import ( + v1 "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1" + internalinterfaces "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go new file mode 100644 index 000000000..f0f35b65c --- /dev/null +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Postgresqls returns a PostgresqlInformer. + Postgresqls() PostgresqlInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Postgresqls returns a PostgresqlInformer. +func (v *version) Postgresqls() PostgresqlInformer { + return &postgresqlInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go new file mode 100644 index 000000000..50f3126cf --- /dev/null +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + acidzalandov1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + versioned "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned" + internalinterfaces "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/zalando-incubator/postgres-operator/pkg/generated/listers/acid.zalan.do/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PostgresqlInformer provides access to a shared informer and lister for +// Postgresqls. +type PostgresqlInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PostgresqlLister +} + +type postgresqlInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPostgresqlInformer constructs a new informer for Postgresql type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPostgresqlInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPostgresqlInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPostgresqlInformer constructs a new informer for Postgresql type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPostgresqlInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcidV1().Postgresqls(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcidV1().Postgresqls(namespace).Watch(options) + }, + }, + &acidzalandov1.Postgresql{}, + resyncPeriod, + indexers, + ) +} + +func (f *postgresqlInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPostgresqlInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *postgresqlInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&acidzalandov1.Postgresql{}, f.defaultInformer) +} + +func (f *postgresqlInformer) Lister() v1.PostgresqlLister { + return v1.NewPostgresqlLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go new file mode 100644 index 000000000..395bc25b5 --- /dev/null +++ b/pkg/generated/informers/externalversions/factory.go @@ -0,0 +1,186 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned" + acidzalando "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do" + internalinterfaces "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Acid() acidzalando.Interface +} + +func (f *sharedInformerFactory) Acid() acidzalando.Interface { + return acidzalando.New(f, f.namespace, f.tweakListOptions) +} diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go new file mode 100644 index 000000000..1b1988212 --- /dev/null +++ b/pkg/generated/informers/externalversions/generic.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=acid.zalan.do, Version=v1 + case v1.SchemeGroupVersion.WithResource("postgresqls"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Acid().V1().Postgresqls().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..f3b4ab9fa --- /dev/null +++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,44 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go new file mode 100644 index 000000000..071a413d6 --- /dev/null +++ b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// PostgresqlListerExpansion allows custom methods to be added to +// PostgresqlLister. +type PostgresqlListerExpansion interface{} + +// PostgresqlNamespaceListerExpansion allows custom methods to be added to +// PostgresqlNamespaceLister. +type PostgresqlNamespaceListerExpansion interface{} diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go new file mode 100644 index 000000000..c8603bc79 --- /dev/null +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PostgresqlLister helps list Postgresqls. +type PostgresqlLister interface { + // List lists all Postgresqls in the indexer. + List(selector labels.Selector) (ret []*v1.Postgresql, err error) + // Postgresqls returns an object that can list and get Postgresqls. + Postgresqls(namespace string) PostgresqlNamespaceLister + PostgresqlListerExpansion +} + +// postgresqlLister implements the PostgresqlLister interface. +type postgresqlLister struct { + indexer cache.Indexer +} + +// NewPostgresqlLister returns a new PostgresqlLister. +func NewPostgresqlLister(indexer cache.Indexer) PostgresqlLister { + return &postgresqlLister{indexer: indexer} +} + +// List lists all Postgresqls in the indexer. +func (s *postgresqlLister) List(selector labels.Selector) (ret []*v1.Postgresql, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Postgresql)) + }) + return ret, err +} + +// Postgresqls returns an object that can list and get Postgresqls. +func (s *postgresqlLister) Postgresqls(namespace string) PostgresqlNamespaceLister { + return postgresqlNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PostgresqlNamespaceLister helps list and get Postgresqls. +type PostgresqlNamespaceLister interface { + // List lists all Postgresqls in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Postgresql, err error) + // Get retrieves the Postgresql from the indexer for a given namespace and name. + Get(name string) (*v1.Postgresql, error) + PostgresqlNamespaceListerExpansion +} + +// postgresqlNamespaceLister implements the PostgresqlNamespaceLister +// interface. +type postgresqlNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Postgresqls in the indexer for a given namespace. +func (s postgresqlNamespaceLister) List(selector labels.Selector) (ret []*v1.Postgresql, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Postgresql)) + }) + return ret, err +} + +// Get retrieves the Postgresql from the indexer for a given namespace and name. +func (s postgresqlNamespaceLister) Get(name string) (*v1.Postgresql, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("postgresql"), name) + } + return obj.(*v1.Postgresql), nil +} diff --git a/pkg/spec/postgresql.go b/pkg/spec/postgresql.go deleted file mode 100644 index 889ebe5bc..000000000 --- a/pkg/spec/postgresql.go +++ /dev/null @@ -1,394 +0,0 @@ -package spec - -import ( - "encoding/json" - "fmt" - "github.com/mohae/deepcopy" - "regexp" - "strings" - "time" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster. -type MaintenanceWindow struct { - Everyday bool - Weekday time.Weekday - StartTime time.Time // Start time - EndTime time.Time // End time -} - -// Volume describes a single volume in the manifest. -type Volume struct { - Size string `json:"size"` - StorageClass string `json:"storageClass"` -} - -// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values. -type PostgresqlParam struct { - PgVersion string `json:"version"` - Parameters map[string]string `json:"parameters"` -} - -// ResourceDescription describes CPU and memory resources defined for a cluster. -type ResourceDescription struct { - CPU string `json:"cpu"` - Memory string `json:"memory"` -} - -// Resources describes requests and limits for the cluster resouces. -type Resources struct { - ResourceRequest ResourceDescription `json:"requests,omitempty"` - ResourceLimits ResourceDescription `json:"limits,omitempty"` -} - -// Patroni contains Patroni-specific configuration -type Patroni struct { - InitDB map[string]string `json:"initdb"` - PgHba []string `json:"pg_hba"` - TTL uint32 `json:"ttl"` - LoopWait uint32 `json:"loop_wait"` - RetryTimeout uint32 `json:"retry_timeout"` - MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213 -} - -// CloneDescription describes which cluster the new should clone and up to which point in time -type CloneDescription struct { - ClusterName string `json:"cluster,omitempty"` - UID string `json:"uid,omitempty"` - EndTimestamp string `json:"timestamp,omitempty"` -} - -// Sidecar defines a container to be run in the same pod as the Postgres container. -type Sidecar struct { - Resources `json:"resources,omitempty"` - Name string `json:"name,omitempty"` - DockerImage string `json:"image,omitempty"` - Ports []v1.ContainerPort `json:"ports,omitempty"` - Env []v1.EnvVar `json:"env,omitempty"` -} - -// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users -type UserFlags []string - -// PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.) -type PostgresStatus string - -// possible values for PostgreSQL cluster statuses -const ( - ClusterStatusUnknown PostgresStatus = "" - ClusterStatusCreating PostgresStatus = "Creating" - ClusterStatusUpdating PostgresStatus = "Updating" - ClusterStatusUpdateFailed PostgresStatus = "UpdateFailed" - ClusterStatusSyncFailed PostgresStatus = "SyncFailed" - ClusterStatusAddFailed PostgresStatus = "CreateFailed" - ClusterStatusRunning PostgresStatus = "Running" - ClusterStatusInvalid PostgresStatus = "Invalid" -) - -const ( - serviceNameMaxLength = 63 - clusterNameMaxLength = serviceNameMaxLength - len("-repl") - serviceNameRegexString = `^[a-z]([-a-z0-9]*[a-z0-9])?$` -) - -// Postgresql defines PostgreSQL Custom Resource Definition Object. -type Postgresql struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - Spec PostgresSpec `json:"spec"` - Status PostgresStatus `json:"status,omitempty"` - Error error `json:"-"` -} - -// PostgresSpec defines the specification for the PostgreSQL TPR. -type PostgresSpec struct { - PostgresqlParam `json:"postgresql"` - Volume `json:"volume,omitempty"` - Patroni `json:"patroni,omitempty"` - Resources `json:"resources,omitempty"` - - TeamID string `json:"teamId"` - DockerImage string `json:"dockerImage,omitempty"` - - // vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest - // in that case the var evaluates to nil and the value is taken from the operator config - EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"` - EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"` - - // deprecated load balancer settings maintained for backward compatibility - // see "Load balancers" operator docs - UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"` - ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"` - - // load balancers' source ranges are the same for master and replica services - AllowedSourceRanges []string `json:"allowedSourceRanges"` - - NumberOfInstances int32 `json:"numberOfInstances"` - Users map[string]UserFlags `json:"users"` - MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` - Clone CloneDescription `json:"clone"` - ClusterName string `json:"-"` - Databases map[string]string `json:"databases,omitempty"` - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - Sidecars []Sidecar `json:"sidecars,omitempty"` - PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` -} - -// PostgresqlList defines a list of PostgreSQL clusters. -type PostgresqlList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Postgresql `json:"items"` -} - -var ( - weekdays = map[string]int{"Sun": 0, "Mon": 1, "Tue": 2, "Wed": 3, "Thu": 4, "Fri": 5, "Sat": 6} - serviceNameRegex = regexp.MustCompile(serviceNameRegexString) -) - -// Clone makes a deepcopy of the Postgresql structure. The Error field is nulled-out, -// as there is no guarantee that the actual implementation of the error interface -// will not contain any private fields not-reachable to deepcopy. This should be ok, -// since Error is never read from a Kubernetes object. -func (p *Postgresql) Clone() *Postgresql { - if p == nil { - return nil - } - c := deepcopy.Copy(p).(*Postgresql) - c.Error = nil - return c -} - -func (p *Postgresql) DeepCopyInto(out *Postgresql) { - if p != nil { - *out = deepcopy.Copy(*p).(Postgresql) - } -} - -func (p *Postgresql) DeepCopy() *Postgresql { - if p == nil { - return nil - } - out := new(Postgresql) - p.DeepCopyInto(out) - return out -} - -func (p *Postgresql) DeepCopyObject() runtime.Object { - if c := p.DeepCopy(); c != nil { - return c - } - return nil -} - -func parseTime(s string) (time.Time, error) { - parts := strings.Split(s, ":") - if len(parts) != 2 { - return time.Time{}, fmt.Errorf("incorrect time format") - } - timeLayout := "15:04" - - tp, err := time.Parse(timeLayout, s) - if err != nil { - return time.Time{}, err - } - - return tp.UTC(), nil -} - -func parseWeekday(s string) (time.Weekday, error) { - weekday, ok := weekdays[s] - if !ok { - return time.Weekday(0), fmt.Errorf("incorrect weekday") - } - - return time.Weekday(weekday), nil -} - -// MarshalJSON converts a maintenance window definition to JSON. -func (m *MaintenanceWindow) MarshalJSON() ([]byte, error) { - if m.Everyday { - return []byte(fmt.Sprintf("\"%s-%s\"", - m.StartTime.Format("15:04"), - m.EndTime.Format("15:04"))), nil - } - - return []byte(fmt.Sprintf("\"%s:%s-%s\"", - m.Weekday.String()[:3], - m.StartTime.Format("15:04"), - m.EndTime.Format("15:04"))), nil -} - -// UnmarshalJSON converts a JSON to the maintenance window definition. -func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error { - var ( - got MaintenanceWindow - err error - ) - - parts := strings.Split(string(data[1:len(data)-1]), "-") - if len(parts) != 2 { - return fmt.Errorf("incorrect maintenance window format") - } - - fromParts := strings.Split(parts[0], ":") - switch len(fromParts) { - case 3: - got.Everyday = false - got.Weekday, err = parseWeekday(fromParts[0]) - if err != nil { - return fmt.Errorf("could not parse weekday: %v", err) - } - - got.StartTime, err = parseTime(fromParts[1] + ":" + fromParts[2]) - case 2: - got.Everyday = true - got.StartTime, err = parseTime(fromParts[0] + ":" + fromParts[1]) - default: - return fmt.Errorf("incorrect maintenance window format") - } - if err != nil { - return fmt.Errorf("could not parse start time: %v", err) - } - - got.EndTime, err = parseTime(parts[1]) - if err != nil { - return fmt.Errorf("could not parse end time: %v", err) - } - - if got.EndTime.Before(got.StartTime) { - return fmt.Errorf("'From' time must be prior to the 'To' time") - } - - *m = got - - return nil -} - -func extractClusterName(clusterName string, teamName string) (string, error) { - teamNameLen := len(teamName) - if len(clusterName) < teamNameLen+2 { - return "", fmt.Errorf("name is too short") - } - - if teamNameLen == 0 { - return "", fmt.Errorf("team name is empty") - } - - if strings.ToLower(clusterName[:teamNameLen+1]) != strings.ToLower(teamName)+"-" { - return "", fmt.Errorf("name must match {TEAM}-{NAME} format") - } - if len(clusterName) > clusterNameMaxLength { - return "", fmt.Errorf("name cannot be longer than %d characters", clusterNameMaxLength) - } - if !serviceNameRegex.MatchString(clusterName) { - return "", fmt.Errorf("name must confirm to DNS-1035, regex used for validation is %q", - serviceNameRegexString) - } - - return clusterName[teamNameLen+1:], nil -} - -func validateCloneClusterDescription(clone *CloneDescription) error { - // when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name - if clone.ClusterName != "" && clone.EndTimestamp == "" { - if !serviceNameRegex.MatchString(clone.ClusterName) { - return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q", - serviceNameRegexString) - } - if len(clone.ClusterName) > serviceNameMaxLength { - return fmt.Errorf("clone cluster name must be no longer than %d characters", serviceNameMaxLength) - } - } - return nil -} - -type postgresqlListCopy PostgresqlList -type postgresqlCopy Postgresql - -// UnmarshalJSON converts a JSON into the PostgreSQL object. -func (p *Postgresql) UnmarshalJSON(data []byte) error { - var tmp postgresqlCopy - - err := json.Unmarshal(data, &tmp) - if err != nil { - metaErr := json.Unmarshal(data, &tmp.ObjectMeta) - if metaErr != nil { - return err - } - - tmp.Error = err - tmp.Status = ClusterStatusInvalid - - *p = Postgresql(tmp) - - return nil - } - tmp2 := Postgresql(tmp) - - if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil { - tmp2.Error = err - tmp2.Status = ClusterStatusInvalid - } else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil { - tmp2.Error = err - tmp2.Status = ClusterStatusInvalid - } else { - tmp2.Spec.ClusterName = clusterName - } - - *p = tmp2 - - return nil -} - -// UnmarshalJSON converts a JSON into the PostgreSQL List object. -func (pl *PostgresqlList) UnmarshalJSON(data []byte) error { - var tmp postgresqlListCopy - - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - tmp2 := PostgresqlList(tmp) - *pl = tmp2 - - return nil -} - -func (pl *PostgresqlList) DeepCopy() *PostgresqlList { - if pl == nil { - return nil - } - out := new(PostgresqlList) - pl.DeepCopyInto(out) - return out -} - -func (pl *PostgresqlList) DeepCopyInto(out *PostgresqlList) { - if pl != nil { - *out = deepcopy.Copy(*pl).(PostgresqlList) - } -} - -func (pl *PostgresqlList) DeepCopyObject() runtime.Object { - if c := pl.DeepCopy(); c != nil { - return c - } - return nil -} - -func (status PostgresStatus) Success() bool { - return status != ClusterStatusAddFailed && - status != ClusterStatusUpdateFailed && - status != ClusterStatusSyncFailed -} - -func (status PostgresStatus) String() string { - return string(status) -} diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 8e11f34ac..c683a1cef 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -11,29 +11,14 @@ import ( "time" "github.com/Sirupsen/logrus" - "k8s.io/api/apps/v1beta1" - "k8s.io/api/core/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" ) -// EventType contains type of the events for the TPRs and Pods received from Kubernetes -type EventType string - // NamespacedName describes the namespace/name pairs used in Kubernetes names. type NamespacedName types.NamespacedName -// Possible values for the EventType -const ( - EventAdd EventType = "ADD" - EventUpdate EventType = "UPDATE" - EventDelete EventType = "DELETE" - EventSync EventType = "SYNC" - EventRepair EventType = "REPAIR" - - fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" -) +const fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" // RoleOrigin contains the code of the origin of a role type RoleOrigin int @@ -47,16 +32,6 @@ const ( RoleOriginSystem ) -// ClusterEvent carries the payload of the Cluster TPR events. -type ClusterEvent struct { - EventTime time.Time - UID types.UID - EventType EventType - OldSpec *Postgresql - NewSpec *Postgresql - WorkerID uint32 -} - type syncUserOperation int // Possible values for the sync user operation (removal of users is not supported yet) @@ -66,15 +41,6 @@ const ( PGSyncAlterSet // handle ALTER ROLE SET parameter = value ) -// PodEvent describes the event for a single Pod -type PodEvent struct { - ResourceVersion string - PodName NamespacedName - PrevPod *v1.Pod - CurPod *v1.Pod - EventType EventType -} - // PgUser contains information about a single user. type PgUser struct { Origin RoleOrigin `yaml:"-"` @@ -109,36 +75,6 @@ type LogEntry struct { Message string } -// Process describes process of the cluster -type Process struct { - Name string - StartTime time.Time -} - -// ClusterStatus describes status of the cluster -type ClusterStatus struct { - Team string - Cluster string - MasterService *v1.Service - ReplicaService *v1.Service - MasterEndpoint *v1.Endpoints - ReplicaEndpoint *v1.Endpoints - StatefulSet *v1beta1.StatefulSet - PodDisruptionBudget *policyv1beta1.PodDisruptionBudget - - CurrentProcess Process - Worker uint32 - Status PostgresStatus - Spec PostgresSpec - Error error -} - -// WorkerStatus describes status of the worker -type WorkerStatus struct { - CurrentCluster NamespacedName - CurrentProcess Process -} - // Diff describes diff type Diff struct { EventTime time.Time @@ -260,30 +196,3 @@ func GetOperatorNamespace() string { } return operatorNamespace } - -type Duration time.Duration - -func (d *Duration) UnmarshalJSON(b []byte) error { - var ( - v interface{} - err error - ) - if err = json.Unmarshal(b, &v); err != nil { - return err - } - switch val := v.(type) { - case string: - t, err := time.ParseDuration(val) - if err != nil { - return err - } - *d = Duration(t) - return nil - case float64: - t := time.Duration(val) - *d = Duration(t) - return nil - default: - return fmt.Errorf("could not recognize type %T as a valid type to unmarshal to Duration", val) - } -} diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 34d282a96..bcfea0647 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -42,7 +42,7 @@ type Resources struct { // Auth describes authentication specific configuration parameters type Auth struct { - SecretNameTemplate stringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"` + SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"` PamRoleName string `name:"pam_role_name" default:"zalandos"` PamConfiguration string `name:"pam_configuration" default:"https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"` TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"` @@ -93,9 +93,9 @@ type Config struct { EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` // deprecated and kept for backward compatibility EnableLoadBalancer *bool `name:"enable_load_balancer"` - MasterDNSNameFormat stringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` - ReplicaDNSNameFormat stringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` - PDBNameFormat stringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` + MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` + ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` + PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` Workers uint32 `name:"workers" default:"4"` APIPort int `name:"api_port" default:"8080"` RingLogLines int `name:"ring_log_lines" default:"100"` diff --git a/pkg/util/config/util.go b/pkg/util/config/util.go index aef333ce7..498810bb7 100644 --- a/pkg/util/config/util.go +++ b/pkg/util/config/util.go @@ -19,7 +19,7 @@ type fieldInfo struct { Field reflect.Value } -type stringTemplate string +type StringTemplate string func decoderFrom(field reflect.Value) (d decoder) { // it may be impossible for a struct field to fail this check @@ -172,7 +172,7 @@ func processField(value string, field reflect.Value) error { type parserState int const ( - plain parserState = iota + plain parserState = iota doubleQuoted singleQuoted ) @@ -221,13 +221,13 @@ func getMapPairsFromString(value string) (pairs []string, err error) { return } -func (f *stringTemplate) Decode(value string) error { - *f = stringTemplate(value) +func (f *StringTemplate) Decode(value string) error { + *f = StringTemplate(value) return nil } -func (f *stringTemplate) Format(a ...string) string { +func (f *StringTemplate) Format(a ...string) string { res := string(*f) for i := 0; i < len(a); i += 2 { @@ -237,6 +237,6 @@ func (f *stringTemplate) Format(a ...string) string { return res } -func (f stringTemplate) MarshalJSON() ([]byte, error) { +func (f StringTemplate) MarshalJSON() ([]byte, error) { return json.Marshal(string(f)) } diff --git a/pkg/util/constants/crd.go b/pkg/util/constants/crd.go deleted file mode 100644 index 113264f01..000000000 --- a/pkg/util/constants/crd.go +++ /dev/null @@ -1,13 +0,0 @@ -package constants - -// Different properties of the PostgreSQL Custom Resource Definition -const ( - PostgresCRDKind = "postgresql" - PostgresCRDResource = "postgresqls" - PostgresCRDShort = "pg" - CRDGroup = "acid.zalan.do" - CRDApiVersion = "v1" - OperatorConfigCRDKind = "postgresql-operator-configuration" - OperatorConfigCRDResource = "postgresql-operator-configurations" - OperatorConfigCRDShort = "pgopconfig" -) diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index f4af5fea1..9ab5cb876 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -2,25 +2,22 @@ package k8sutil import ( "fmt" - "reflect" - + "github.com/zalando-incubator/postgres-operator/pkg/util/constants" "k8s.io/api/core/v1" policybeta1 "k8s.io/api/policy/v1beta1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextbeta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/typed/apps/v1beta1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "reflect" - "github.com/zalando-incubator/postgres-operator/pkg/util/constants" + acidv1client "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned" ) // KubernetesClient describes getters for Kubernetes objects @@ -40,8 +37,8 @@ type KubernetesClient struct { policyv1beta1.PodDisruptionBudgetsGetter apiextbeta1.CustomResourceDefinitionsGetter - RESTClient rest.Interface - CRDREST rest.Interface + RESTClient rest.Interface + AcidV1ClientSet *acidv1client.Clientset } // RestConfig creates REST config @@ -87,27 +84,13 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { kubeClient.RESTClient = client.CoreV1().RESTClient() kubeClient.RoleBindingsGetter = client.RbacV1beta1() - cfg2 := *cfg - cfg2.GroupVersion = &schema.GroupVersion{ - Group: constants.CRDGroup, - Version: constants.CRDApiVersion, - } - cfg2.APIPath = constants.K8sAPIPath - // MIGRATION: api.codecs -> scheme.Codecs? - cfg2.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - crd, err := rest.RESTClientFor(&cfg2) - if err != nil { - return kubeClient, fmt.Errorf("could not get rest client: %v", err) - } - kubeClient.CRDREST = crd - apiextClient, err := apiextclient.NewForConfig(cfg) if err != nil { return kubeClient, fmt.Errorf("could not create api client:%v", err) } kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1beta1() + kubeClient.AcidV1ClientSet = acidv1client.NewForConfigOrDie(cfg) return kubeClient, nil } From aeae0a6ef296bfebd14f277be6646d2fa5a0e0f8 Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Wed, 22 Aug 2018 11:07:12 +0200 Subject: [PATCH 25/30] Use cluster's own namespace to patch the cluster manifest (#373) --- pkg/cluster/cluster.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index da266041e..f5d514950 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -20,6 +20,7 @@ import ( "k8s.io/client-go/tools/cache" "encoding/json" + acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" "github.com/zalando-incubator/postgres-operator/pkg/util" @@ -162,7 +163,7 @@ func (c *Cluster) setStatus(status acidv1.PostgresStatus) { // we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ), // however, we could do patch without it. In the future, once /status subresource is there (starting Kubernets 1.11) // we should take advantage of it. - newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.OpConfig.WatchedNamespace).Patch(c.Name, types.MergePatchType, patch) + newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch(c.Name, types.MergePatchType, patch) if err != nil { c.logger.Errorf("could not update status: %v", err) } From 5ed109678ce7becd90459863ce1cbf8c653da8a6 Mon Sep 17 00:00:00 2001 From: Valer Cara Date: Wed, 22 Aug 2018 12:33:17 +0300 Subject: [PATCH 26/30] Add GoDoc badge to readme (#372) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e7b486087..595dca6d5 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![Build Status](https://travis-ci.org/zalando-incubator/postgres-operator.svg?branch=master)](https://travis-ci.org/zalando-incubator/postgres-operator) [![Coverage Status](https://coveralls.io/repos/github/zalando-incubator/postgres-operator/badge.svg)](https://coveralls.io/github/zalando-incubator/postgres-operator) [![Go Report Card](https://goreportcard.com/badge/github.com/zalando-incubator/postgres-operator)](https://goreportcard.com/report/github.com/zalando-incubator/postgres-operator) +[![GoDoc](https://godoc.org/github.com/zalando-incubator/postgres-operator?status.svg)](https://godoc.org/github.com/zalando-incubator/postgres-operator) ## Introduction From 4543bfde96aac406240ee2f1faa591bae7c6b83d Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Wed, 22 Aug 2018 11:34:15 +0200 Subject: [PATCH 27/30] Document code generation (#370) * Document code generation --- docs/developer.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/developer.md b/docs/developer.md index 407af6439..dba627149 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -151,6 +151,20 @@ minikube. The following steps will get you the docker image built and deployed. $ sed -e "s/\(image\:.*\:\).*$/\1$TAG/" manifests/postgres-operator.yaml|kubectl --context minikube create -f - ``` +# Code generation + +The operator employs k8s-provided code generation to obtain deep copy methods and Kubernetes-like APIs for its custom resource definitons, namely the Postgres CRD and the operator CRD. The usage of the code generation follows conventions from the k8s community. Relevant scripts live in the `hack` directory: the `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`, +the `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI). The `/pkg/generated/` contains the resultant code. To make these scripts work, you may need to `export GOPATH=$(go env GOPATH)` + +References for code generation are: +* [Relevant pull request](https://github.com/zalando-incubator/postgres-operator/pull/369) +See comments there for minor issues that can sometimes broke the generation process. +* [Code generator source code](https://github.com/kubernetes/code-generator) +* [Code Generation for CustomResources](https://blog.openshift.com/kubernetes-deep-dive-code-generation-customresources/) - intro post on the topic +* Code generation in [Prometheus](https://github.com/coreos/prometheus-operator) and [etcd](https://github.com/coreos/etcd-operator) operators + +To debug the generated API locally, use the [kubectl proxy](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) and `kubectl --v=8` log level to display contents of HTTP requests (run the operator itself with `--v=8` to log all REST API requests). To attach a debugger to the operator, use the `-outofcluster` option to run the operator locally on the developer's laptop (and not in a docker container). + # Debugging the operator There is a web interface in the operator to observe its internal state. The From 75a1d782b0d533a8db279dce5319af1a8dd0e1fa Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Wed, 29 Aug 2018 14:00:00 +0200 Subject: [PATCH 28/30] Update CODEOWNERS (#376) Add @Jan-M @CyberDem0n @avaczi as codeowners --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index c7016c764..7e6db5933 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # global owners -* @alexeyklyukin @erthalion @zerg-junior +* @alexeyklyukin @erthalion @zerg-junior @Jan-M @CyberDem0n @avaczi From 1e53e22773e7ed2a0acc1dc4c9365c7da84fadf9 Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Wed, 29 Aug 2018 17:08:59 +0200 Subject: [PATCH 29/30] Improve error reporting for short cluster names (#377) * Improve error reporting for short cluster names * Revert to clusterName --- pkg/apis/acid.zalan.do/v1/util.go | 5 +++-- pkg/apis/acid.zalan.do/v1/util_test.go | 10 +++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/apis/acid.zalan.do/v1/util.go b/pkg/apis/acid.zalan.do/v1/util.go index 7d071ce22..2d3c90db8 100644 --- a/pkg/apis/acid.zalan.do/v1/util.go +++ b/pkg/apis/acid.zalan.do/v1/util.go @@ -2,10 +2,11 @@ package v1 import ( "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "regexp" "strings" "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var ( @@ -47,7 +48,7 @@ func parseWeekday(s string) (time.Weekday, error) { func extractClusterName(clusterName string, teamName string) (string, error) { teamNameLen := len(teamName) if len(clusterName) < teamNameLen+2 { - return "", fmt.Errorf("name is too short") + return "", fmt.Errorf("cluster name must match {TEAM}-{NAME} format. Got cluster name '%v', team name '%v'", clusterName, teamName) } if teamNameLen == 0 { diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index d1f06a2cc..ab5a39e9e 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -4,10 +4,11 @@ import ( "bytes" "encoding/json" "errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "reflect" "testing" "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) var parseTimeTests = []struct { @@ -49,8 +50,11 @@ var clusterNames = []struct { {"acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")}, {"-test", "", "", errors.New("team name is empty")}, {"-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")}, - {"", "-", "", errors.New("name is too short")}, - {"-", "-", "", errors.New("name is too short")}, + {"", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '', team name '-'")}, + {"-", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '-', team name '-'")}, + // user may specify the team part of the full cluster name differently from the team name returned by the Teams API + // in the case the actual Teams API name is long enough, this will fail the check + {"foo-bar", "qwerty", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name 'foo-bar', team name 'qwerty'")}, } var cloneClusterDescriptions = []struct { From 25fa45fd580bb20d5b41b1f373a6907ad5670f00 Mon Sep 17 00:00:00 2001 From: zerg-junior Date: Thu, 30 Aug 2018 10:51:37 +0200 Subject: [PATCH 30/30] [WIP] Grant 'superuser' to the members of Postgres admin teams (#371) Added support for superuser team in addition to the admin team that owns the postgres cluster. --- docs/administrator.md | 12 ++ docs/reference/operator_parameters.md | 3 + .../v1/operator_configuration_type.go | 4 +- .../acid.zalan.do/v1/zz_generated.deepcopy.go | 5 + pkg/cluster/cluster.go | 37 ++++- pkg/cluster/cluster_test.go | 147 +++++++++++++++++- pkg/cluster/util.go | 12 +- pkg/controller/operator_config.go | 1 + pkg/spec/types.go | 2 +- pkg/util/config/config.go | 1 + 10 files changed, 211 insertions(+), 13 deletions(-) diff --git a/docs/administrator.md b/docs/administrator.md index a7dff68ef..1b360cd00 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -208,3 +208,15 @@ generated from the current cluster manifest. There are two types of scans: a `sync scan`, running every `resync_period` seconds for every cluster, and the `repair scan`, coming every `repair_period` only for those clusters that didn't report success as a result of the last operation applied to them. + +## Postgres roles supported by the operator + +The operator is capable of maintaining roles of multiple kinds within a Postgres database cluster: + +1. **System roles** are roles necessary for the proper work of Postgres itself such as a replication role or the initial superuser role. The operator delegates creating such roles to Patroni and only establishes relevant secrets. + +2. **Infrastructure roles** are roles for processes originating from external systems, e.g. monitoring robots. The operator creates such roles in all PG clusters it manages assuming k8s secrets with the relevant credentials exist beforehand. + +3. **Per-cluster robot users** are also roles for processes originating from external systems but defined for an individual Postgres cluster in its manifest. A typical example is a role for connections from an application that uses the database. + +4. **Human users** originate from the Teams API that returns list of the team members given a team id. Operator differentiates between (a) product teams that own a particular Postgres cluster and are granted admin rights to maintain it, and (b) Postgres superuser teams that get the superuser access to all PG databases running in a k8s cluster for the purposes of maintaining and troubleshooting. \ No newline at end of file diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 76ddb9ff9..7d8e243bb 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -377,6 +377,9 @@ key. List of roles that cannot be overwritten by an application, team or infrastructure role. The default is `admin`. +* **postgres_superuser_teams** + List of teams which members need the superuser role in each PG database cluster to administer Postgres and maintain infrastructure built around it. The default is `postgres_superuser`. + ## Logging and REST API Parameters affecting logging and REST API listener. In the CRD-based configuration they are grouped under the `logging_rest_api` key. diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index cd70d76d9..de7681db4 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -3,9 +3,10 @@ package v1 import ( "github.com/zalando-incubator/postgres-operator/pkg/util/config" + "time" + "github.com/zalando-incubator/postgres-operator/pkg/spec" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "time" ) // +genclient @@ -99,6 +100,7 @@ type TeamsAPIConfiguration struct { PamRoleName string `json:"pam_role_name,omitempty"` PamConfiguration string `json:"pam_configuration,omitempty"` ProtectedRoles []string `json:"protected_role_names,omitempty"` + PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` } type LoggingRESTAPIConfiguration struct { diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 01280e548..d58668054 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -631,6 +631,11 @@ func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.PostgresSuperuserTeams != nil { + in, out := &in.PostgresSuperuserTeams, &out.PostgresSuperuserTeams + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index f5d514950..7e73bd97c 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -723,11 +723,13 @@ func (c *Cluster) initRobotUsers() error { return nil } -func (c *Cluster) initHumanUsers() error { - teamMembers, err := c.getTeamMembers() +func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) error { + teamMembers, err := c.getTeamMembers(teamID) + if err != nil { - return fmt.Errorf("could not get list of team members: %v", err) + return fmt.Errorf("could not get list of team members for team %q: %v", teamID, err) } + for _, username := range teamMembers { flags := []string{constants.RoleFlagLogin} memberOf := []string{c.OpConfig.PamRoleName} @@ -735,7 +737,7 @@ func (c *Cluster) initHumanUsers() error { if c.shouldAvoidProtectedOrSystemRole(username, "API role") { continue } - if c.OpConfig.EnableTeamSuperuser { + if c.OpConfig.EnableTeamSuperuser || isPostgresSuperuserTeam { flags = append(flags, constants.RoleFlagSuperuser) } else { if c.OpConfig.TeamAdminRole != "" { @@ -761,6 +763,33 @@ func (c *Cluster) initHumanUsers() error { return nil } +func (c *Cluster) initHumanUsers() error { + + var clusterIsOwnedBySuperuserTeam bool + + for _, postgresSuperuserTeam := range c.OpConfig.PostgresSuperuserTeams { + err := c.initTeamMembers(postgresSuperuserTeam, true) + if err != nil { + return fmt.Errorf("Cannot create a team %q of Postgres superusers: %v", postgresSuperuserTeam, err) + } + if postgresSuperuserTeam == c.Spec.TeamID { + clusterIsOwnedBySuperuserTeam = true + } + } + + if clusterIsOwnedBySuperuserTeam { + c.logger.Infof("Team %q owning the cluster is also a team of superusers. Created superuser roles for its members instead of admin roles.", c.Spec.TeamID) + return nil + } + + err := c.initTeamMembers(c.Spec.TeamID, false) + if err != nil { + return fmt.Errorf("Cannot create a team %q of admins owning the PG cluster: %v", c.Spec.TeamID, err) + } + + return nil +} + func (c *Cluster) initInfrastructureRoles() error { // add infrastructure roles from the operator's definition for username, newRole := range c.InfrastructureRoles { diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 82400344f..c89874e99 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -2,6 +2,9 @@ package cluster import ( "fmt" + "reflect" + "testing" + "github.com/Sirupsen/logrus" acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando-incubator/postgres-operator/pkg/spec" @@ -9,8 +12,6 @@ import ( "github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil" "github.com/zalando-incubator/postgres-operator/pkg/util/teams" "k8s.io/api/core/v1" - "reflect" - "testing" ) const ( @@ -101,6 +102,7 @@ func (m *mockTeamsAPIClient) setMembers(members []string) { m.members = members } +// Test adding a member of a product team owning a particular DB cluster func TestInitHumanUsers(t *testing.T) { var mockTeamsAPI mockTeamsAPIClient @@ -108,7 +110,9 @@ func TestInitHumanUsers(t *testing.T) { cl.teamsAPIClient = &mockTeamsAPI testName := "TestInitHumanUsers" + // members of a product team are granted superuser rights for DBs of their team cl.OpConfig.EnableTeamSuperuser = true + cl.OpConfig.EnableTeamsAPI = true cl.OpConfig.PamRoleName = "zalandos" cl.Spec.TeamID = "test" @@ -146,6 +150,145 @@ func TestInitHumanUsers(t *testing.T) { } } +type mockTeam struct { + teamID string + members []string + isPostgresSuperuserTeam bool +} + +type mockTeamsAPIClientMultipleTeams struct { + teams []mockTeam +} + +func (m *mockTeamsAPIClientMultipleTeams) TeamInfo(teamID, token string) (tm *teams.Team, err error) { + for _, team := range m.teams { + if team.teamID == teamID { + return &teams.Team{Members: team.members}, nil + } + } + + // should not be reached if a slice with teams is populated correctly + return nil, nil +} + +// Test adding members of maintenance teams that get superuser rights for all PG databases +func TestInitHumanUsersWithSuperuserTeams(t *testing.T) { + + var mockTeamsAPI mockTeamsAPIClientMultipleTeams + cl.oauthTokenGetter = &mockOAuthTokenGetter{} + cl.teamsAPIClient = &mockTeamsAPI + cl.OpConfig.EnableTeamSuperuser = false + testName := "TestInitHumanUsersWithSuperuserTeams" + + cl.OpConfig.EnableTeamsAPI = true + cl.OpConfig.PamRoleName = "zalandos" + + teamA := mockTeam{ + teamID: "postgres_superusers", + members: []string{"postgres_superuser"}, + isPostgresSuperuserTeam: true, + } + + userA := spec.PgUser{ + Name: "postgres_superuser", + Origin: spec.RoleOriginTeamsAPI, + MemberOf: []string{cl.OpConfig.PamRoleName}, + Flags: []string{"LOGIN", "SUPERUSER"}, + } + + teamB := mockTeam{ + teamID: "postgres_admins", + members: []string{"postgres_admin"}, + isPostgresSuperuserTeam: true, + } + + userB := spec.PgUser{ + Name: "postgres_admin", + Origin: spec.RoleOriginTeamsAPI, + MemberOf: []string{cl.OpConfig.PamRoleName}, + Flags: []string{"LOGIN", "SUPERUSER"}, + } + + teamTest := mockTeam{ + teamID: "test", + members: []string{"test_user"}, + isPostgresSuperuserTeam: false, + } + + userTest := spec.PgUser{ + Name: "test_user", + Origin: spec.RoleOriginTeamsAPI, + MemberOf: []string{cl.OpConfig.PamRoleName}, + Flags: []string{"LOGIN"}, + } + + tests := []struct { + ownerTeam string + existingRoles map[string]spec.PgUser + superuserTeams []string + teams []mockTeam + result map[string]spec.PgUser + }{ + // case 1: there are two different teams of PG maintainers and one product team + { + ownerTeam: "test", + existingRoles: map[string]spec.PgUser{}, + superuserTeams: []string{"postgres_superusers", "postgres_admins"}, + teams: []mockTeam{teamA, teamB, teamTest}, + result: map[string]spec.PgUser{ + "postgres_superuser": userA, + "postgres_admin": userB, + "test_user": userTest, + }, + }, + // case 2: the team of superusers creates a new PG cluster + { + ownerTeam: "postgres_superusers", + existingRoles: map[string]spec.PgUser{}, + superuserTeams: []string{"postgres_superusers"}, + teams: []mockTeam{teamA}, + result: map[string]spec.PgUser{ + "postgres_superuser": userA, + }, + }, + // case 3: the team owning the cluster is promoted to the maintainers' status + { + ownerTeam: "postgres_superusers", + existingRoles: map[string]spec.PgUser{ + // role with the name exists before w/o superuser privilege + "postgres_superuser": spec.PgUser{ + Origin: spec.RoleOriginTeamsAPI, + Name: "postgres_superuser", + Password: "", + Flags: []string{"LOGIN"}, + MemberOf: []string{cl.OpConfig.PamRoleName}, + Parameters: map[string]string(nil)}}, + superuserTeams: []string{"postgres_superusers"}, + teams: []mockTeam{teamA}, + result: map[string]spec.PgUser{ + "postgres_superuser": userA, + }, + }, + } + + for _, tt := range tests { + + mockTeamsAPI.teams = tt.teams + + cl.Spec.TeamID = tt.ownerTeam + cl.pgUsers = tt.existingRoles + cl.OpConfig.PostgresSuperuserTeams = tt.superuserTeams + + if err := cl.initHumanUsers(); err != nil { + t.Errorf("%s got an unexpected error %v", testName, err) + } + + if !reflect.DeepEqual(cl.pgUsers, tt.result) { + t.Errorf("%s expects %#v, got %#v", testName, tt.result, cl.pgUsers) + } + } +} + func TestShouldDeleteSecret(t *testing.T) { testName := "TestShouldDeleteSecret" diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 911bf74f8..9bfcd19c5 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -210,12 +210,14 @@ func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) { c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old, new)) } -func (c *Cluster) getTeamMembers() ([]string, error) { - if c.Spec.TeamID == "" { +func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { + + if teamID == "" { return nil, fmt.Errorf("no teamId specified") } + if !c.OpConfig.EnableTeamsAPI { - c.logger.Debug("team API is disabled, returning empty list of members") + c.logger.Debugf("team API is disabled, returning empty list of members for team %q", teamID) return []string{}, nil } @@ -225,9 +227,9 @@ func (c *Cluster) getTeamMembers() ([]string, error) { return []string{}, nil } - teamInfo, err := c.teamsAPIClient.TeamInfo(c.Spec.TeamID, token) + teamInfo, err := c.teamsAPIClient.TeamInfo(teamID, token) if err != nil { - c.logger.Warnf("could not get team info, returning empty list of team members: %v", err) + c.logger.Warnf("could not get team info for team %q, returning empty list of team members: %v", teamID, err) return []string{}, nil } diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 251828b32..93ba1a0f4 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -82,6 +82,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.EnableTeamSuperuser = fromCRD.TeamsAPI.EnableTeamSuperuser result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole result.PamRoleName = fromCRD.TeamsAPI.PamRoleName + result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams result.APIPort = fromCRD.LoggingRESTAPI.APIPort result.RingLogLines = fromCRD.LoggingRESTAPI.RingLogLines diff --git a/pkg/spec/types.go b/pkg/spec/types.go index c683a1cef..1607dbd9b 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -23,7 +23,7 @@ const fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespa // RoleOrigin contains the code of the origin of a role type RoleOrigin int -// The rolesOrigin constant values should be sorted by the role priority. +// The rolesOrigin constant values must be sorted by the role priority for resolveNameConflict(...) to work. const ( RoleOriginUnknown RoleOrigin = iota RoleOriginManifest diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index bcfea0647..92fd3fd73 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -103,6 +103,7 @@ type Config struct { TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"` PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` ProtectedRoles []string `name:"protected_role_names" default:"admin"` + PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` } // MustMarshal marshals the config or panics