From 692c721854e4667102778e8e69e3dd12e47984b5 Mon Sep 17 00:00:00 2001 From: Alex Stockinger Date: Thu, 8 Oct 2020 15:32:15 +0200 Subject: [PATCH 01/12] Introduce ENABLE_JSON_LOGGING env variable (#1158) --- charts/postgres-operator/templates/deployment.yaml | 4 ++++ charts/postgres-operator/values.yaml | 3 +++ cmd/main.go | 7 ++++++- docs/reference/command_line_and_environment.md | 4 ++++ pkg/controller/controller.go | 3 +++ pkg/spec/types.go | 2 ++ 6 files changed, 22 insertions(+), 1 deletion(-) diff --git a/charts/postgres-operator/templates/deployment.yaml b/charts/postgres-operator/templates/deployment.yaml index 2d8eebcb3..9841bf1bc 100644 --- a/charts/postgres-operator/templates/deployment.yaml +++ b/charts/postgres-operator/templates/deployment.yaml @@ -37,6 +37,10 @@ spec: image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: + {{- if .Values.enableJsonLogging }} + - name: ENABLE_JSON_LOGGING + value: "true" + {{- end }} {{- if eq .Values.configTarget "ConfigMap" }} - name: CONFIG_MAP_NAME value: {{ template "postgres-operator.fullname" . }} diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 37eac4254..d4acfe1aa 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -15,6 +15,9 @@ podLabels: {} configTarget: "ConfigMap" +# JSON logging format +enableJsonLogging: false + # general configuration parameters configGeneral: # choose if deployment creates/updates CRDs with OpenAPIV3Validation diff --git a/cmd/main.go b/cmd/main.go index a178c187e..376df0bad 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -2,7 +2,7 @@ package main import ( "flag" - "log" + log "github.com/sirupsen/logrus" "os" "os/signal" "sync" @@ -36,6 +36,8 @@ func init() { flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API") flag.Parse() + config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true" + configMapRawName := os.Getenv("CONFIG_MAP_NAME") if configMapRawName != "" { @@ -63,6 +65,9 @@ func init() { func main() { var err error + if config.EnableJsonLogging { + log.SetFormatter(&log.JSONFormatter{}) + } log.SetOutput(os.Stdout) log.Printf("Spilo operator %s\n", version) diff --git a/docs/reference/command_line_and_environment.md b/docs/reference/command_line_and_environment.md index ece29b094..35f47cabf 100644 --- a/docs/reference/command_line_and_environment.md +++ b/docs/reference/command_line_and_environment.md @@ -56,3 +56,7 @@ The following environment variables are accepted by the operator: * **CRD_READY_WAIT_INTERVAL** defines the interval between consecutive attempts waiting for the `postgresql` CRD to be created. The default is 5s. + +* **ENABLE_JSON_LOGGING** + Set to `true` for JSON formatted logging output. + The default is false. diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index aa996288c..8e9f02029 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -71,6 +71,9 @@ type Controller struct { // NewController creates a new controller func NewController(controllerConfig *spec.ControllerConfig, controllerId string) *Controller { logger := logrus.New() + if controllerConfig.EnableJsonLogging { + logger.SetFormatter(&logrus.JSONFormatter{}) + } var myComponentName = "postgres-operator" if controllerId != "" { diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 7a2c0ddac..78c79e1b3 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -114,6 +114,8 @@ type ControllerConfig struct { CRDReadyWaitTimeout time.Duration ConfigMapName NamespacedName Namespace string + + EnableJsonLogging bool } // cached value for the GetOperatorNamespace From d15f2d339205baddcaea7aa7f6f81bdcc846a5dc Mon Sep 17 00:00:00 2001 From: Dmitry Dolgov <9erthalion6@gmail.com> Date: Thu, 15 Oct 2020 10:16:42 +0200 Subject: [PATCH 02/12] Readiness probe (#1169) Right now there are no readiness probes defined for connection pooler, which means after a pod restart there is a short time window (between a container start and connection pooler starting listening to a socket) when a service can send queries to a new pod, but connection will be refused. The pooler container is rather lightweight and it start to listen immediately, so the time window is small, but still. To fix this add a readiness probe for tcp socket opened by connection pooler. --- pkg/cluster/k8sres.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index fef202538..88eb33efb 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2216,6 +2216,13 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(spec *acidv1.PostgresSpec) }, }, Env: envVars, + ReadinessProbe: &v1.Probe{ + Handler: v1.Handler{ + TCPSocket: &v1.TCPSocketAction{ + Port: intstr.IntOrString{IntVal: pgPort}, + }, + }, + }, } podTemplate := &v1.PodTemplateSpec{ From 1f5d0995a58b8df0973deb3fbb90b162d6c982a2 Mon Sep 17 00:00:00 2001 From: Dmitry Dolgov <9erthalion6@gmail.com> Date: Mon, 19 Oct 2020 16:18:58 +0200 Subject: [PATCH 03/12] Lookup function installation (#1171) * Lookup function installation Due to reusing a previous database connection without closing it, lookup function installation process was skipping the first database in the list, installing twice into postgres db instead. To prevent that, make internal initDbConnWithName to overwrite a connection object, and return the same object only from initDbConn, which is sort of public interface. Another solution for this would be to modify initDbConnWithName to return a connection object and then generate one temporary connection for each db. It sound feasible but after one attempt it seems it requires a bit more changes around (init, close connections) and doesn't bring anything significantly better on the table. In case if some future changes will prove this wrong, do not hesitate to refactor. Change retry strategy to more insistive one, namely: * retry on the next sync even if we failed to process one database and install pooler appliance. * perform the whole installation unconditionally on update, since the list of target databases could be changed. And for the sake of making it even more robust, also log the case when operator decides to skip installation. Extend connection pooler e2e test with verification that all dbs have required schema installed. --- e2e/exec.sh | 2 +- e2e/tests/test_e2e.py | 78 +++++++++++++++++++++++++++++++++++++- pkg/cluster/cluster.go | 8 +++- pkg/cluster/database.go | 84 ++++++++++++++++++++++++++++------------- pkg/cluster/sync.go | 9 ++++- 5 files changed, 151 insertions(+), 30 deletions(-) diff --git a/e2e/exec.sh b/e2e/exec.sh index 56276bc3c..1ab666e5e 100755 --- a/e2e/exec.sh +++ b/e2e/exec.sh @@ -1,2 +1,2 @@ #!/usr/bin/env bash -kubectl exec -it $1 -- sh -c "$2" +kubectl exec -i $1 -- sh -c "$2" diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 550d3ced8..fc251c430 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -15,6 +15,14 @@ def to_selector(labels): return ",".join(["=".join(l) for l in labels.items()]) +def clean_list(values): + # value is not stripped bytes, strip and convert to a string + clean = lambda v: v.strip().decode() + notNone = lambda v: v + + return list(filter(notNone, map(clean, values))) + + class EndToEndTestCase(unittest.TestCase): ''' Test interaction of the operator with multiple K8s components. @@ -140,6 +148,58 @@ class EndToEndTestCase(unittest.TestCase): k8s.wait_for_running_pods(pod_selector, 2) + # Verify that all the databases have pooler schema installed. + # Do this via psql, since otherwise we need to deal with + # credentials. + dbList = [] + + leader = k8s.get_cluster_leader_pod('acid-minimal-cluster') + dbListQuery = "select datname from pg_database" + schemasQuery = """ + select schema_name + from information_schema.schemata + where schema_name = 'pooler' + """ + exec_query = r"psql -tAq -c \"{}\" -d {}" + + if leader: + try: + q = exec_query.format(dbListQuery, "postgres") + q = "su postgres -c \"{}\"".format(q) + print('Get databases: {}'.format(q)) + result = k8s.exec_with_kubectl(leader.metadata.name, q) + dbList = clean_list(result.stdout.split(b'\n')) + print('dbList: {}, stdout: {}, stderr {}'.format( + dbList, result.stdout, result.stderr + )) + except Exception as ex: + print('Could not get databases: {}'.format(ex)) + print('Stdout: {}'.format(result.stdout)) + print('Stderr: {}'.format(result.stderr)) + + for db in dbList: + if db in ('template0', 'template1'): + continue + + schemas = [] + try: + q = exec_query.format(schemasQuery, db) + q = "su postgres -c \"{}\"".format(q) + print('Get schemas: {}'.format(q)) + result = k8s.exec_with_kubectl(leader.metadata.name, q) + schemas = clean_list(result.stdout.split(b'\n')) + print('schemas: {}, stdout: {}, stderr {}'.format( + schemas, result.stdout, result.stderr + )) + except Exception as ex: + print('Could not get databases: {}'.format(ex)) + print('Stdout: {}'.format(result.stdout)) + print('Stderr: {}'.format(result.stderr)) + + self.assertNotEqual(len(schemas), 0) + else: + print('Could not find leader pod') + # turn it off, keeping configuration section k8s.api.custom_objects_api.patch_namespaced_custom_object( 'acid.zalan.do', 'v1', 'default', @@ -235,7 +295,10 @@ class EndToEndTestCase(unittest.TestCase): # operator configuration via API operator_pod = k8s.get_operator_pod() get_config_cmd = "wget --quiet -O - localhost:8080/config" - result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd) + result = k8s.exec_with_kubectl( + operator_pod.metadata.name, + get_config_cmd, + ) roles_dict = (json.loads(result.stdout) .get("controller", {}) .get("InfrastructureRoles")) @@ -862,6 +925,19 @@ class K8s: return master_pod_node, replica_pod_nodes + def get_cluster_leader_pod(self, pg_cluster_name, namespace='default'): + labels = { + 'application': 'spilo', + 'cluster-name': pg_cluster_name, + 'spilo-role': 'master', + } + + pods = self.api.core_v1.list_namespaced_pod( + namespace, label_selector=to_selector(labels)).items + + if pods: + return pods[0] + def wait_for_operator_pod_start(self): self. wait_for_pod_start("name=postgres-operator") # HACK operator must register CRD and/or Sync existing PG clusters after start up diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 9b8b51eb0..6aa1f6fa4 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -780,7 +780,13 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } } - // sync connection pooler + // Sync connection pooler. Before actually doing sync reset lookup + // installation flag, since manifest updates could add another db which we + // need to process. In the future we may want to do this more careful and + // check which databases we need to process, but even repeating the whole + // installation process should be good enough. + c.ConnectionPooler.LookupFunction = false + if _, err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil { c.logger.Errorf("could not sync connection pooler: %v", err) diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index 75e2d2097..f51b58a89 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -101,15 +101,20 @@ func (c *Cluster) databaseAccessDisabled() bool { } func (c *Cluster) initDbConn() error { - return c.initDbConnWithName("") -} - -func (c *Cluster) initDbConnWithName(dbname string) error { - c.setProcessName("initializing db connection") if c.pgDb != nil { return nil } + return c.initDbConnWithName("") +} + +// Worker function for connection initialization. This function does not check +// if the connection is already open, if it is then it will be overwritten. +// Callers need to make sure no connection is open, otherwise we could leak +// connections +func (c *Cluster) initDbConnWithName(dbname string) error { + c.setProcessName("initializing db connection") + var conn *sql.DB connstring := c.pgConnectionString(dbname) @@ -145,6 +150,12 @@ func (c *Cluster) initDbConnWithName(dbname string) error { conn.SetMaxOpenConns(1) conn.SetMaxIdleConns(-1) + if c.pgDb != nil { + msg := "Closing an existing connection before opening a new one to %s" + c.logger.Warningf(msg, dbname) + c.closeDbConn() + } + c.pgDb = conn return nil @@ -465,8 +476,11 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi // perform remote authentification. func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { var stmtBytes bytes.Buffer + c.logger.Info("Installing lookup function") + // Open a new connection if not yet done. This connection will be used only + // to get the list of databases, not for the actuall installation. if err := c.initDbConn(); err != nil { return fmt.Errorf("could not init database connection") } @@ -480,37 +494,41 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { } }() + // List of databases we failed to process. At the moment it function just + // like a flag to retry on the next sync, but in the future we may want to + // retry only necessary parts, so let's keep the list. + failedDatabases := []string{} currentDatabases, err := c.getDatabases() if err != nil { msg := "could not get databases to install pooler lookup function: %v" return fmt.Errorf(msg, err) } + // We've got the list of target databases, now close this connection to + // open a new one to every each of them. + if err := c.closeDbConn(); err != nil { + c.logger.Errorf("could not close database connection: %v", err) + } + templater := template.Must(template.New("sql").Parse(connectionPoolerLookup)) + params := TemplateParams{ + "pooler_schema": poolerSchema, + "pooler_user": poolerUser, + } + + if err := templater.Execute(&stmtBytes, params); err != nil { + msg := "could not prepare sql statement %+v: %v" + return fmt.Errorf(msg, params, err) + } for dbname := range currentDatabases { + if dbname == "template0" || dbname == "template1" { continue } - if err := c.initDbConnWithName(dbname); err != nil { - return fmt.Errorf("could not init database connection to %s", dbname) - } - c.logger.Infof("Install pooler lookup function into %s", dbname) - params := TemplateParams{ - "pooler_schema": poolerSchema, - "pooler_user": poolerUser, - } - - if err := templater.Execute(&stmtBytes, params); err != nil { - c.logger.Errorf("could not prepare sql statement %+v: %v", - params, err) - // process other databases - continue - } - // golang sql will do retries couple of times if pq driver reports // connections issues (driver.ErrBadConn), but since our query is // idempotent, we can retry in a view of other errors (e.g. due to @@ -520,7 +538,20 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { constants.PostgresConnectTimeout, constants.PostgresConnectRetryTimeout, func() (bool, error) { - if _, err := c.pgDb.Exec(stmtBytes.String()); err != nil { + + // At this moment we are not connected to any database + if err := c.initDbConnWithName(dbname); err != nil { + msg := "could not init database connection to %s" + return false, fmt.Errorf(msg, dbname) + } + defer func() { + if err := c.closeDbConn(); err != nil { + msg := "could not close database connection: %v" + c.logger.Errorf(msg, err) + } + }() + + if _, err = c.pgDb.Exec(stmtBytes.String()); err != nil { msg := fmt.Errorf("could not execute sql statement %s: %v", stmtBytes.String(), err) return false, msg @@ -533,15 +564,16 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { c.logger.Errorf("could not execute after retries %s: %v", stmtBytes.String(), err) // process other databases + failedDatabases = append(failedDatabases, dbname) continue } c.logger.Infof("pooler lookup function installed into %s", dbname) - if err := c.closeDbConn(); err != nil { - c.logger.Errorf("could not close database connection: %v", err) - } } - c.ConnectionPooler.LookupFunction = true + if len(failedDatabases) == 0 { + c.ConnectionPooler.LookupFunction = true + } + return nil } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index fef5b7b66..2a3959b1a 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -847,7 +847,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, var err error if c.ConnectionPooler == nil { - c.ConnectionPooler = &ConnectionPoolerObjects{} + c.ConnectionPooler = &ConnectionPoolerObjects{ + LookupFunction: false, + } } newNeedConnectionPooler := c.needConnectionPoolerWorker(&newSpec.Spec) @@ -885,6 +887,11 @@ func (c *Cluster) syncConnectionPooler(oldSpec, if err = lookup(schema, user); err != nil { return NoSync, err } + } else { + // Lookup function installation seems to be a fragile point, so + // let's log for debugging if we skip it + msg := "Skip lookup function installation, old: %d, already installed %d" + c.logger.Debug(msg, oldNeedConnectionPooler, c.ConnectionPooler.LookupFunction) } if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec); err != nil { From a8bfe4eb874f829785ada30085af34853a31f790 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=96=B0?= <1148576125@qq.com> Date: Tue, 20 Oct 2020 20:18:22 +0800 Subject: [PATCH 04/12] Remove repeated initialization of Pod ServiceAccount (#1164) Co-authored-by: xin.liu --- pkg/controller/controller.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 8e9f02029..cc08f1587 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -295,7 +295,6 @@ func (c *Controller) initController() { c.logger.Fatalf("could not register Postgres CustomResourceDefinition: %v", err) } - c.initPodServiceAccount() c.initSharedInformers() if c.opConfig.DebugLogging { From 22fa0875e2378466aed60eab6c94fe0f807caf2e Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Thu, 22 Oct 2020 08:44:04 +0200 Subject: [PATCH 05/12] add maxLength constraint for CRD (#1175) * add maxLength constraint for CRD --- charts/postgres-operator/crds/postgresqls.yaml | 9 +++++++++ docs/user.md | 2 +- manifests/postgresql.crd.yaml | 9 +++++++++ pkg/apis/acid.zalan.do/v1/crds.go | 13 ++++++++++++- 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 0d444e568..488f17c2b 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -57,6 +57,7 @@ spec: required: - kind - apiVersion + - metadata - spec properties: kind: @@ -67,6 +68,14 @@ spec: type: string enum: - acid.zalan.do/v1 + metadata: + type: object + required: + - name + properties: + name: + type: string + maxLength: 53 spec: type: object required: diff --git a/docs/user.md b/docs/user.md index a4b1424b8..9a9e01b9a 100644 --- a/docs/user.md +++ b/docs/user.md @@ -49,7 +49,7 @@ Note, that the name of the cluster must start with the `teamId` and `-`. At Zalando we use team IDs (nicknames) to lower the chance of duplicate cluster names and colliding entities. The team ID would also be used to query an API to get all members of a team and create [database roles](#teams-api-roles) for -them. +them. Besides, the maximum cluster name length is 53 characters. ## Watch pods being created diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 97b72a8ca..56c010739 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -53,6 +53,7 @@ spec: required: - kind - apiVersion + - metadata - spec properties: kind: @@ -63,6 +64,14 @@ spec: type: string enum: - acid.zalan.do/v1 + metadata: + type: object + required: + - name + properties: + name: + type: string + maxLength: 53 spec: type: object required: diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 2cfc28856..a7d9bccf0 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -107,12 +107,13 @@ var min0 = 0.0 var min1 = 1.0 var min2 = 2.0 var minDisable = -1.0 +var maxLength = int64(53) // PostgresCRDResourceValidation to check applied manifest parameters var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ OpenAPIV3Schema: &apiextv1beta1.JSONSchemaProps{ Type: "object", - Required: []string{"kind", "apiVersion", "spec"}, + Required: []string{"kind", "apiVersion", "metadata", "spec"}, Properties: map[string]apiextv1beta1.JSONSchemaProps{ "kind": { Type: "string", @@ -130,6 +131,16 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{ }, }, }, + "metadata": { + Type: "object", + Required: []string{"name"}, + Properties: map[string]apiextv1beta1.JSONSchemaProps{ + "name": { + Type: "string", + MaxLength: &maxLength, + }, + }, + }, "spec": { Type: "object", Required: []string{"numberOfInstances", "teamId", "postgresql", "volume"}, From d9f5d1c9dfef1a7e9e00b216553ef77cf6904da4 Mon Sep 17 00:00:00 2001 From: preved911 Date: Thu, 22 Oct 2020 09:49:30 +0300 Subject: [PATCH 06/12] changed PodEnvironmentSecret location namespace (#1177) Signed-off-by: Ildar Valiullin --- pkg/cluster/k8sres.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 88eb33efb..ba22f24c3 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -841,7 +841,7 @@ func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) { return secretPodEnvVarsList, nil } - secret, err := c.KubeClient.Secrets(c.OpConfig.PodEnvironmentSecret).Get( + secret, err := c.KubeClient.Secrets(c.Namespace).Get( context.TODO(), c.OpConfig.PodEnvironmentSecret, metav1.GetOptions{}) From e97235aa398e6e3e561fe04fffcaa2b19deb7103 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Tue, 27 Oct 2020 16:59:26 +0100 Subject: [PATCH 07/12] update dependencies oct 2020 (#1184) * update dependencies oct 2020 * update codegen --- Makefile | 2 +- go.mod | 16 +- go.sum | 322 +++++++++++++----- .../clientset/versioned/fake/register.go | 2 +- .../listers/acid.zalan.do/v1/postgresql.go | 5 + 5 files changed, 249 insertions(+), 98 deletions(-) diff --git a/Makefile b/Makefile index 29bbb47e6..1a676adad 100644 --- a/Makefile +++ b/Makefile @@ -79,7 +79,7 @@ scm-source.json: .git tools: GO111MODULE=on go get -u honnef.co/go/tools/cmd/staticcheck - GO111MODULE=on go get k8s.io/client-go@kubernetes-1.18.8 + GO111MODULE=on go get k8s.io/client-go@kubernetes-1.19.3 GO111MODULE=on go mod tidy fmt: diff --git a/go.mod b/go.mod index 79c3b9be9..341af771c 100644 --- a/go.mod +++ b/go.mod @@ -3,18 +3,18 @@ module github.com/zalando/postgres-operator go 1.14 require ( - github.com/aws/aws-sdk-go v1.34.10 + github.com/aws/aws-sdk-go v1.35.15 github.com/lib/pq v1.8.0 github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d github.com/r3labs/diff v1.1.0 - github.com/sirupsen/logrus v1.6.0 + github.com/sirupsen/logrus v1.7.0 github.com/stretchr/testify v1.5.1 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab // indirect + golang.org/x/tools v0.0.0-20201026223136-e84cfc6dd5ca // indirect gopkg.in/yaml.v2 v2.2.8 - k8s.io/api v0.18.8 - k8s.io/apiextensions-apiserver v0.18.0 - k8s.io/apimachinery v0.18.8 - k8s.io/client-go v0.18.8 - k8s.io/code-generator v0.18.8 + k8s.io/api v0.19.3 + k8s.io/apiextensions-apiserver v0.19.3 + k8s.io/apimachinery v0.19.3 + k8s.io/client-go v0.19.3 + k8s.io/code-generator v0.19.3 ) diff --git a/go.sum b/go.sum index 2d76a94ee..1f2e5f1d8 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,33 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= @@ -21,40 +37,49 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.34.10 h1:VU78gcf/3wA4HNEDCHidK738l7K0Bals4SJnfnvXOtY= -github.com/aws/aws-sdk-go v1.34.10/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.35.15 h1:JdQNM8hJe+9N9xP53S54NDmX8GCaZn8CCJ4LBHfom4U= +github.com/aws/aws-sdk-go v1.35.15/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= @@ -64,19 +89,24 @@ github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -124,45 +154,61 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= @@ -170,28 +216,31 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= @@ -210,8 +259,10 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -224,6 +275,7 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -237,85 +289,120 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M= github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -327,51 +414,71 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -380,36 +487,77 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab h1:CyH2SDm5ATQiX9gtbMYfvNNed97A9v+TJFnUX/fTaJY= -golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201026223136-e84cfc6dd5ca h1:vL6Mv8VrSxz8azdgLrH/zO/Rd1Bzdk89ZfMVW39gD0Q= +golang.org/x/tools v0.0.0-20201026223136-e84cfc6dd5ca/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -423,45 +571,43 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= -k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4= -k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= -k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q= -k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= -k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0= -k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= -k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= -k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= -k8s.io/client-go v0.18.8 h1:SdbLpIxk5j5YbFr1b7fq8S7mDgDjYmUxSbszyoesoDM= -k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= -k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/code-generator v0.18.8 h1:lgO1P1wjikEtzNvj7ia+x1VC4svJ28a/r0wnOLhhOTU= -k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.19.3 h1:GN6ntFnv44Vptj/b+OnMW7FmzkpDoIDLZRvKX3XH9aU= +k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= +k8s.io/apiextensions-apiserver v0.19.3 h1:WZxBypSHW4SdXHbdPTS/Jy7L2la6Niggs8BuU5o+avo= +k8s.io/apiextensions-apiserver v0.19.3/go.mod h1:igVEkrE9TzInc1tYE7qSqxaLg/rEAp6B5+k9Q7+IC8Q= +k8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc= +k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apiserver v0.19.3/go.mod h1:bx6dMm+H6ifgKFpCQT/SAhPwhzoeIMlHIaibomUDec0= +k8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg= +k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM= +k8s.io/code-generator v0.19.3 h1:fTrTpJ8PZog5oo6MmeZtveo89emjQZHiw0ieybz1RSs= +k8s.io/code-generator v0.19.3/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/component-base v0.19.3/go.mod h1:WhLWSIefQn8W8jxSLl5WNiR6z8oyMe/8Zywg7alOkRc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go index 5363e8cc4..c5a24f7da 100644 --- a/pkg/generated/clientset/versioned/fake/register.go +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -35,7 +35,7 @@ import ( var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) + var localSchemeBuilder = runtime.SchemeBuilder{ acidv1.AddToScheme, } diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go index 9a60c8281..ee3efbdfe 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go @@ -32,8 +32,10 @@ import ( ) // PostgresqlLister helps list Postgresqls. +// All objects returned here must be treated as read-only. type PostgresqlLister interface { // List lists all Postgresqls in the indexer. + // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v1.Postgresql, err error) // Postgresqls returns an object that can list and get Postgresqls. Postgresqls(namespace string) PostgresqlNamespaceLister @@ -64,10 +66,13 @@ func (s *postgresqlLister) Postgresqls(namespace string) PostgresqlNamespaceList } // PostgresqlNamespaceLister helps list and get Postgresqls. +// All objects returned here must be treated as read-only. type PostgresqlNamespaceLister interface { // List lists all Postgresqls in the indexer for a given namespace. + // Objects returned here must be treated as read-only. List(selector labels.Selector) (ret []*v1.Postgresql, err error) // Get retrieves the Postgresql from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. Get(name string) (*v1.Postgresql, error) PostgresqlNamespaceListerExpansion } From 7730ecfdeceb6e447232801a4efccff5b60fa3be Mon Sep 17 00:00:00 2001 From: arminfelder Date: Wed, 28 Oct 2020 09:33:52 +0100 Subject: [PATCH 08/12] fixed case where, no ready label is defined, but node is unscheduable (#1162) * fixed case where, no ready label is defined, but node is unscheduable --- pkg/controller/node.go | 2 +- pkg/controller/node_test.go | 52 +++++++++++++++++++++++++++++-------- 2 files changed, 42 insertions(+), 12 deletions(-) diff --git a/pkg/controller/node.go b/pkg/controller/node.go index be41b79ab..4ffe7e26c 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -76,7 +76,7 @@ func (c *Controller) nodeUpdate(prev, cur interface{}) { } func (c *Controller) nodeIsReady(node *v1.Node) bool { - return (!node.Spec.Unschedulable || util.MapContains(node.Labels, c.opConfig.NodeReadinessLabel) || + return (!node.Spec.Unschedulable || (len(c.opConfig.NodeReadinessLabel) > 0 && util.MapContains(node.Labels, c.opConfig.NodeReadinessLabel)) || util.MapContains(node.Labels, map[string]string{"master": "true"})) } diff --git a/pkg/controller/node_test.go b/pkg/controller/node_test.go index 28e178bfb..919f30f39 100644 --- a/pkg/controller/node_test.go +++ b/pkg/controller/node_test.go @@ -15,7 +15,6 @@ const ( func newNodeTestController() *Controller { var controller = NewController(&spec.ControllerConfig{}, "node-test") - controller.opConfig.NodeReadinessLabel = map[string]string{readyLabel: readyValue} return controller } @@ -36,27 +35,58 @@ var nodeTestController = newNodeTestController() func TestNodeIsReady(t *testing.T) { testName := "TestNodeIsReady" var testTable = []struct { - in *v1.Node - out bool + in *v1.Node + out bool + readinessLabel map[string]string }{ { - in: makeNode(map[string]string{"foo": "bar"}, true), - out: true, + in: makeNode(map[string]string{"foo": "bar"}, true), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{"foo": "bar"}, false), - out: false, + in: makeNode(map[string]string{"foo": "bar"}, false), + out: false, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{readyLabel: readyValue}, false), - out: true, + in: makeNode(map[string]string{readyLabel: readyValue}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, }, { - in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), - out: true, + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, + }, + { + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{readyLabel: readyValue}, + }, + { + in: makeNode(map[string]string{"foo": "bar"}, true), + out: true, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{"foo": "bar"}, false), + out: false, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{readyLabel: readyValue}, false), + out: false, + readinessLabel: map[string]string{}, + }, + { + in: makeNode(map[string]string{"foo": "bar", "master": "true"}, false), + out: true, + readinessLabel: map[string]string{}, }, } for _, tt := range testTable { + nodeTestController.opConfig.NodeReadinessLabel = tt.readinessLabel if isReady := nodeTestController.nodeIsReady(tt.in); isReady != tt.out { t.Errorf("%s: expected response %t doesn't match the actual %t for the node %#v", testName, tt.out, isReady, tt.in) From 3a86dfc8bbab3bf286e77ce225459adc71dc919d Mon Sep 17 00:00:00 2001 From: Jan Mussler Date: Wed, 28 Oct 2020 10:04:33 +0100 Subject: [PATCH 09/12] End 2 End tests speedup (#1180) * Improving end 2 end tests, especially speed of execution and error, by implementing proper eventual asserts and timeouts. * Add documentation for running individual tests * Fixed String encoding in Patorni state check and error case * Printing config as multi log line entity, makes it readable and grepable on startup * Cosmetic changes to logs. Removed quotes from diff. Move all object diffs to text diff. Enabled padding for log level. * Mount script with tools for easy logaccess and watching objects. * Set proper update strategy for Postgres operator deployment. * Move long running test to end. Move pooler test to new functions. * Remove quote from valid K8s identifiers. --- Makefile | 10 +- e2e/Dockerfile | 6 +- e2e/README.md | 44 ++ e2e/exec_into_env.sh | 14 + e2e/run.sh | 43 +- e2e/scripts/cleanup.sh | 7 + e2e/scripts/get_logs.sh | 2 + e2e/scripts/watch_objects.sh | 19 + e2e/tests/k8s_api.py | 522 ++++++++++++++++++ e2e/tests/test_e2e.py | 880 +++++++++++++------------------ manifests/postgres-operator.yaml | 2 + pkg/cluster/cluster.go | 30 +- pkg/cluster/database.go | 2 +- pkg/cluster/k8sres.go | 7 +- pkg/cluster/pod.go | 9 +- pkg/cluster/resources.go | 9 +- pkg/cluster/sync.go | 16 +- pkg/cluster/util.go | 41 +- pkg/controller/controller.go | 39 +- pkg/controller/node.go | 2 +- pkg/controller/postgresql.go | 2 +- pkg/util/config/config.go | 2 +- pkg/util/nicediff/diff.go | 191 +++++++ pkg/util/util_test.go | 10 + 24 files changed, 1317 insertions(+), 592 deletions(-) create mode 100755 e2e/exec_into_env.sh create mode 100755 e2e/scripts/cleanup.sh create mode 100755 e2e/scripts/get_logs.sh create mode 100755 e2e/scripts/watch_objects.sh create mode 100644 e2e/tests/k8s_api.py create mode 100644 pkg/util/nicediff/diff.go diff --git a/Makefile b/Makefile index 1a676adad..2b2d2668f 100644 --- a/Makefile +++ b/Makefile @@ -24,12 +24,16 @@ PKG := `go list ./... | grep -v /vendor/` ifeq ($(DEBUG),1) DOCKERFILE = DebugDockerfile - DEBUG_POSTFIX := -debug + DEBUG_POSTFIX := -debug-$(shell date hhmmss) BUILD_FLAGS += -gcflags "-N -l" else DOCKERFILE = Dockerfile endif +ifeq ($(FRESH),1) + DEBUG_FRESH=$(shell date +"%H-%M-%S") +endif + ifdef CDP_PULL_REQUEST_NUMBER CDP_TAG := -${CDP_BUILD_VERSION} endif @@ -66,7 +70,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE} docker-context echo "Version ${VERSION}" echo "CDP tag ${CDP_TAG}" echo "git describe $(shell git describe --tags --always --dirty)" - cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" . + cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" . indocker-race: docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.8.1 bash -c "make linux" @@ -97,4 +101,4 @@ test: GO111MODULE=on go test ./... e2e: docker # build operator image to be tested - cd e2e; make e2etest + cd e2e; make e2etest \ No newline at end of file diff --git a/e2e/Dockerfile b/e2e/Dockerfile index 70e6f0a84..3eb8c9d70 100644 --- a/e2e/Dockerfile +++ b/e2e/Dockerfile @@ -14,6 +14,7 @@ RUN apt-get update \ python3-setuptools \ python3-pip \ curl \ + vim \ && pip3 install --no-cache-dir -r requirements.txt \ && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl \ && chmod +x ./kubectl \ @@ -21,4 +22,7 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -ENTRYPOINT ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"] +# working line +# python3 -m unittest discover -v --failfast -k test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade --start-directory tests +ENTRYPOINT ["python3", "-m", "unittest"] +CMD ["discover","-v","--failfast","--start-directory","/tests"] \ No newline at end of file diff --git a/e2e/README.md b/e2e/README.md index f1bc5f9ed..92a1fc731 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -12,6 +12,10 @@ Docker. Docker Go +# Notice + +The `manifest` folder in e2e tests folder is not commited to git, it comes from `/manifests` + ## Build test runner In the directory of the cloned Postgres Operator repository change to the e2e @@ -35,6 +39,46 @@ In the e2e folder you can invoke tests either with `make test` or with: To run both the build and test step you can invoke `make e2e` from the parent directory. +To run the end 2 end test and keep the kind state execute: +```bash +NOCLEANUP=True ./run.sh +``` + +## Run indidual test + +After having executed a normal E2E run with `NOCLEANUP=True` Kind still continues to run, allowing you subsequent test runs. + +To run an individual test, run the following command in the `e2e` directory + +```bash +NOCLEANUP=True ./run.sh main tests.test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade +``` + +## Inspecting Kind + +If you want to inspect Kind/Kubernetes cluster, use the following script to exec into the K8s setup and then use `kubectl` + +```bash +./exec_into_env.sh + +# use kube ctl +kubectl get pods + +# watch relevant objects +./scripts/watch_objects.sh + +# get operator logs +./scripts/get_logs.sh +``` + +## Cleaning up Kind + +To cleanup kind and start fresh + +```bash +e2e/run.sh cleanup +``` + ## Covered use cases The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py): diff --git a/e2e/exec_into_env.sh b/e2e/exec_into_env.sh new file mode 100755 index 000000000..ef12ba18a --- /dev/null +++ b/e2e/exec_into_env.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +export cluster_name="postgres-operator-e2e-tests" +export kubeconfig_path="/tmp/kind-config-${cluster_name}" +export operator_image="registry.opensource.zalan.do/acid/postgres-operator:latest" +export e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3" + +docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \ + --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \ + --mount type=bind,source="$(readlink -f manifests)",target=/manifests \ + --mount type=bind,source="$(readlink -f tests)",target=/tests \ + --mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \ + --mount type=bind,source="$(readlink -f scripts)",target=/scripts \ + -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" diff --git a/e2e/run.sh b/e2e/run.sh index 74d842879..0024a2569 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -9,6 +9,10 @@ IFS=$'\n\t' readonly cluster_name="postgres-operator-e2e-tests" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" readonly spilo_image="registry.opensource.zalan.do/acid/spilo-12:1.6-p5" +readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3" + +export GOPATH=${GOPATH-~/go} +export PATH=${GOPATH}/bin:$PATH echo "Clustername: ${cluster_name}" echo "Kubeconfig path: ${kubeconfig_path}" @@ -19,12 +23,7 @@ function pull_images(){ then docker pull registry.opensource.zalan.do/acid/postgres-operator:latest fi - operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1) - - # this image does not contain the tests; a container mounts them from a local "./tests" dir at start time - e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:latest" - docker pull ${e2e_test_runner_image} } function start_kind(){ @@ -36,12 +35,17 @@ function start_kind(){ fi export KUBECONFIG="${kubeconfig_path}" - kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml - kind load docker-image "${operator_image}" --name ${cluster_name} + kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml docker pull "${spilo_image}" kind load docker-image "${spilo_image}" --name ${cluster_name} } +function load_operator_image() { + echo "Loading operator image" + export KUBECONFIG="${kubeconfig_path}" + kind load docker-image "${operator_image}" --name ${cluster_name} +} + function set_kind_api_server_ip(){ echo "Setting up kind API server ip" # use the actual kubeconfig to connect to the 'kind' API server @@ -52,8 +56,7 @@ function set_kind_api_server_ip(){ } function run_tests(){ - echo "Running tests..." - + echo "Running tests... image: ${e2e_test_runner_image}" # tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile docker run --rm --network=host -e "TERM=xterm-256color" \ @@ -61,11 +64,11 @@ function run_tests(){ --mount type=bind,source="$(readlink -f manifests)",target=/manifests \ --mount type=bind,source="$(readlink -f tests)",target=/tests \ --mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \ - -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" - + --mount type=bind,source="$(readlink -f scripts)",target=/scripts \ + -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" ${E2E_TEST_CASE-} $@ } -function clean_up(){ +function cleanup(){ echo "Executing cleanup" unset KUBECONFIG kind delete cluster --name ${cluster_name} @@ -73,14 +76,16 @@ function clean_up(){ } function main(){ + echo "Entering main function..." + [[ -z ${NOCLEANUP-} ]] && trap "cleanup" QUIT TERM EXIT + pull_images + [[ ! -f ${kubeconfig_path} ]] && start_kind + load_operator_image + set_kind_api_server_ip - trap "clean_up" QUIT TERM EXIT - - time pull_images - time start_kind - time set_kind_api_server_ip - run_tests + shift + run_tests $@ exit 0 } -"$@" +"$1" $@ diff --git a/e2e/scripts/cleanup.sh b/e2e/scripts/cleanup.sh new file mode 100755 index 000000000..2c82388ae --- /dev/null +++ b/e2e/scripts/cleanup.sh @@ -0,0 +1,7 @@ +#!/bin/bash +kubectl delete postgresql acid-minimal-cluster +kubectl delete deployments -l application=db-connection-pooler,cluster-name=acid-minimal-cluster +kubectl delete statefulsets -l application=spilo,cluster-name=acid-minimal-cluster +kubectl delete services -l application=spilo,cluster-name=acid-minimal-cluster +kubectl delete configmap postgres-operator +kubectl delete deployment postgres-operator \ No newline at end of file diff --git a/e2e/scripts/get_logs.sh b/e2e/scripts/get_logs.sh new file mode 100755 index 000000000..1639f3995 --- /dev/null +++ b/e2e/scripts/get_logs.sh @@ -0,0 +1,2 @@ +#!/bin/bash +kubectl logs $(kubectl get pods -l name=postgres-operator --field-selector status.phase=Running -o jsonpath='{.items..metadata.name}') diff --git a/e2e/scripts/watch_objects.sh b/e2e/scripts/watch_objects.sh new file mode 100755 index 000000000..c866fbd45 --- /dev/null +++ b/e2e/scripts/watch_objects.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +watch -c " +kubectl get postgresql +echo +echo -n 'Rolling upgrade pending: ' +kubectl get statefulset -o jsonpath='{.items..metadata.annotations.zalando-postgres-operator-rolling-update-required}' +echo +echo +kubectl get pods -o wide +echo +kubectl get statefulsets +echo +kubectl get deployments +echo +kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.annotations.step}' +echo +kubectl get pods -l application=spilo -o jsonpath='{.items..spec.containers..image}' +" \ No newline at end of file diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py new file mode 100644 index 000000000..371fa8e0d --- /dev/null +++ b/e2e/tests/k8s_api.py @@ -0,0 +1,522 @@ +import json +import unittest +import time +import timeout_decorator +import subprocess +import warnings +import os +import yaml + +from datetime import datetime +from kubernetes import client, config +from kubernetes.client.rest import ApiException + +def to_selector(labels): + return ",".join(["=".join(l) for l in labels.items()]) + +class K8sApi: + + def __init__(self): + + # https://github.com/kubernetes-client/python/issues/309 + warnings.simplefilter("ignore", ResourceWarning) + + self.config = config.load_kube_config() + self.k8s_client = client.ApiClient() + + self.core_v1 = client.CoreV1Api() + self.apps_v1 = client.AppsV1Api() + self.batch_v1_beta1 = client.BatchV1beta1Api() + self.custom_objects_api = client.CustomObjectsApi() + self.policy_v1_beta1 = client.PolicyV1beta1Api() + self.storage_v1_api = client.StorageV1Api() + + +class K8s: + ''' + Wraps around K8s api client and helper methods. + ''' + + RETRY_TIMEOUT_SEC = 1 + + def __init__(self, labels='x=y', namespace='default'): + self.api = K8sApi() + self.labels=labels + self.namespace=namespace + + def get_pg_nodes(self, pg_cluster_name, namespace='default'): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes + + def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + m = [] + r = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + m.append(pod.spec.node_name) + elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': + r.append(pod.spec.node_name) + + return m, r + + def wait_for_operator_pod_start(self): + self.wait_for_pod_start("name=postgres-operator") + # give operator time to subscribe to objects + time.sleep(1) + return True + + def get_operator_pod(self): + pods = self.api.core_v1.list_namespaced_pod( + 'default', label_selector='name=postgres-operator' + ).items + + pods = list(filter(lambda x: x.status.phase=='Running', pods)) + + if len(pods): + return pods[0] + + return None + + def get_operator_log(self): + operator_pod = self.get_operator_pod() + pod_name = operator_pod.metadata.name + return self.api.core_v1.read_namespaced_pod_log( + name=pod_name, + namespace='default' + ) + + def pg_get_status(self, name="acid-minimal-cluster", namespace="default"): + pg = self.api.custom_objects_api.get_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name) + return pg.get("status", {}).get("PostgresClusterStatus", None) + + def wait_for_pod_start(self, pod_labels, namespace='default'): + pod_phase = 'No pod running' + while pod_phase != 'Running': + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items + if pods: + pod_phase = pods[0].status.phase + + time.sleep(self.RETRY_TIMEOUT_SEC) + + + def get_service_type(self, svc_labels, namespace='default'): + svc_type = '' + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + svc_type = svc.spec.type + return svc_type + + def check_service_annotations(self, svc_labels, annotations, namespace='default'): + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + for key, value in annotations.items(): + if not svc.metadata.annotations or key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: + print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotations)) + return False + return True + + def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items + for sset in ssets: + for key, value in annotations.items(): + if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: + print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotations)) + return False + return True + + def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"): + body = { + "spec": { + "numberOfInstances": number_of_instances + } + } + self.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name, body) + + def wait_for_running_pods(self, labels, number, namespace=''): + while self.count_pods_with_label(labels) != number: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_pods_to_stop(self, labels, namespace=''): + while self.count_pods_with_label(labels) != 0: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_service(self, labels, namespace='default'): + def get_services(): + return self.api.core_v1.list_namespaced_service( + namespace, label_selector=labels + ).items + + while not get_services(): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def count_pods_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) + + def count_services_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items) + + def count_endpoints_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items) + + def count_secrets_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items) + + def count_statefulsets_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items) + + def count_deployments_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items) + + def count_pdbs_with_label(self, labels, namespace='default'): + return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget( + namespace, label_selector=labels).items) + + def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: x.status.phase=='Running', pods))) + + def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): + pod_phase = 'Failing over' + new_pod_node = '' + + while (pod_phase != 'Running') or (new_pod_node not in failover_targets): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + if pods: + new_pod_node = pods[0].spec.node_name + pod_phase = pods[0].status.phase + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_logical_backup_job(self, namespace='default'): + return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") + + def wait_for_logical_backup_job(self, expected_num_of_jobs): + while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_logical_backup_job_deletion(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=0) + + def wait_for_logical_backup_job_creation(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=1) + + def delete_operator_pod(self, step="Delete operator deplyment"): + operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name + self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}}) + self.wait_for_operator_pod_start() + + def update_config(self, config_map_patch, step="Updating operator deployment"): + self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) + self.delete_operator_pod(step=step) + + def patch_statefulset(self, data, name="acid-minimal-cluster", namespace="default"): + self.api.apps_v1.patch_namespaced_stateful_set(name, namespace, data) + + def create_with_kubectl(self, path): + return subprocess.run( + ["kubectl", "apply", "-f", path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def exec_with_kubectl(self, pod, cmd): + return subprocess.run(["./exec.sh", pod, cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def get_patroni_state(self, pod): + r = self.exec_with_kubectl(pod, "patronictl list -f json") + if not r.returncode == 0 or not r.stdout.decode()[0:1]=="[": + return [] + return json.loads(r.stdout.decode()) + + def get_patroni_running_members(self, pod="acid-minimal-cluster-0"): + result = self.get_patroni_state(pod) + return list(filter(lambda x: "State" in x and x["State"] == "running", result)) + + def get_deployment_replica_count(self, name="acid-minimal-cluster-pooler", namespace="default"): + try: + deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace) + return deployment.spec.replicas + except ApiException as e: + return None + + def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1) + if len(ssets.items) == 0: + return None + return ssets.items[0].spec.template.spec.containers[0].image + + def get_effective_pod_image(self, pod_name, namespace='default'): + ''' + Get the Spilo image pod currently uses. In case of lazy rolling updates + it may differ from the one specified in the stateful set. + ''' + pod = self.api.core_v1.list_namespaced_pod( + namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name) + + if len(pod.items) == 0: + return None + return pod.items[0].spec.containers[0].image + + def get_cluster_leader_pod(self, pg_cluster_name, namespace='default'): + labels = { + 'application': 'spilo', + 'cluster-name': pg_cluster_name, + 'spilo-role': 'master', + } + + pods = self.api.core_v1.list_namespaced_pod( + namespace, label_selector=to_selector(labels)).items + + if pods: + return pods[0] + + +class K8sBase: + ''' + K8s basic API wrapper class supposed to be inherited by other more specific classes for e2e tests + ''' + + RETRY_TIMEOUT_SEC = 1 + + def __init__(self, labels='x=y', namespace='default'): + self.api = K8sApi() + self.labels=labels + self.namespace=namespace + + def get_pg_nodes(self, pg_cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes + + def get_cluster_nodes(self, cluster_labels='cluster-name=acid-minimal-cluster', namespace='default'): + m = [] + r = [] + podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=cluster_labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master' and pod.status.phase == 'Running': + m.append(pod.spec.node_name) + elif pod.metadata.labels.get('spilo-role') == 'replica' and pod.status.phase == 'Running': + r.append(pod.spec.node_name) + + return m, r + + def wait_for_operator_pod_start(self): + self.wait_for_pod_start("name=postgres-operator") + + def get_operator_pod(self): + pods = self.api.core_v1.list_namespaced_pod( + 'default', label_selector='name=postgres-operator' + ).items + + if pods: + return pods[0] + + return None + + def get_operator_log(self): + operator_pod = self.get_operator_pod() + pod_name = operator_pod.metadata.name + return self.api.core_v1.read_namespaced_pod_log( + name=pod_name, + namespace='default' + ) + + def wait_for_pod_start(self, pod_labels, namespace='default'): + pod_phase = 'No pod running' + while pod_phase != 'Running': + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items + if pods: + pod_phase = pods[0].status.phase + + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_service_type(self, svc_labels, namespace='default'): + svc_type = '' + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + svc_type = svc.spec.type + return svc_type + + def check_service_annotations(self, svc_labels, annotations, namespace='default'): + svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items + for svc in svcs: + for key, value in annotations.items(): + if key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: + print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotation)) + return False + return True + + def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items + for sset in ssets: + for key, value in annotations.items(): + if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: + print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotation)) + return False + return True + + def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"): + body = { + "spec": { + "numberOfInstances": number_of_instances + } + } + self.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", namespace, "postgresqls", name, body) + + def wait_for_running_pods(self, labels, number, namespace=''): + while self.count_pods_with_label(labels) != number: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_pods_to_stop(self, labels, namespace=''): + while self.count_pods_with_label(labels) != 0: + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_service(self, labels, namespace='default'): + def get_services(): + return self.api.core_v1.list_namespaced_service( + namespace, label_selector=labels + ).items + + while not get_services(): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def count_pods_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) + + def count_services_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items) + + def count_endpoints_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items) + + def count_secrets_with_label(self, labels, namespace='default'): + return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items) + + def count_statefulsets_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items) + + def count_deployments_with_label(self, labels, namespace='default'): + return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items) + + def count_pdbs_with_label(self, labels, namespace='default'): + return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget( + namespace, label_selector=labels).items) + + def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + return len(list(filter(lambda x: x.status.phase=='Running', pods))) + + def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): + pod_phase = 'Failing over' + new_pod_node = '' + + while (pod_phase != 'Running') or (new_pod_node not in failover_targets): + pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items + if pods: + new_pod_node = pods[0].spec.node_name + pod_phase = pods[0].status.phase + time.sleep(self.RETRY_TIMEOUT_SEC) + + def get_logical_backup_job(self, namespace='default'): + return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") + + def wait_for_logical_backup_job(self, expected_num_of_jobs): + while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): + time.sleep(self.RETRY_TIMEOUT_SEC) + + def wait_for_logical_backup_job_deletion(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=0) + + def wait_for_logical_backup_job_creation(self): + self.wait_for_logical_backup_job(expected_num_of_jobs=1) + + def delete_operator_pod(self, step="Delete operator deplyment"): + operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name + self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}}) + self.wait_for_operator_pod_start() + + def update_config(self, config_map_patch, step="Updating operator deployment"): + self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) + self.delete_operator_pod(step=step) + + def create_with_kubectl(self, path): + return subprocess.run( + ["kubectl", "apply", "-f", path], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def exec_with_kubectl(self, pod, cmd): + return subprocess.run(["./exec.sh", pod, cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + def get_patroni_state(self, pod): + r = self.exec_with_kubectl(pod, "patronictl list -f json") + if not r.returncode == 0 or not r.stdout.decode()[0:1]=="[": + return [] + return json.loads(r.stdout.decode()) + + def get_patroni_running_members(self, pod): + result = self.get_patroni_state(pod) + return list(filter(lambda x: x["State"]=="running", result)) + + def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'): + ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1) + if len(ssets.items) == 0: + return None + return ssets.items[0].spec.template.spec.containers[0].image + + def get_effective_pod_image(self, pod_name, namespace='default'): + ''' + Get the Spilo image pod currently uses. In case of lazy rolling updates + it may differ from the one specified in the stateful set. + ''' + pod = self.api.core_v1.list_namespaced_pod( + namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name) + + if len(pod.items) == 0: + return None + return pod.items[0].spec.containers[0].image + + +""" + Inspiriational classes towards easier writing of end to end tests with one cluster per test case +""" +class K8sOperator(K8sBase): + def __init__(self, labels="name=postgres-operator", namespace="default"): + super().__init__(labels, namespace) + +class K8sPostgres(K8sBase): + def __init__(self, labels="cluster-name=acid-minimal-cluster", namespace="default"): + super().__init__(labels, namespace) + + def get_pg_nodes(self): + master_pod_node = '' + replica_pod_nodes = [] + podsList = self.api.core_v1.list_namespaced_pod(self.namespace, label_selector=self.labels) + for pod in podsList.items: + if pod.metadata.labels.get('spilo-role') == 'master': + master_pod_node = pod.spec.node_name + elif pod.metadata.labels.get('spilo-role') == 'replica': + replica_pod_nodes.append(pod.spec.node_name) + + return master_pod_node, replica_pod_nodes \ No newline at end of file diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index fc251c430..888fc2eaa 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -10,6 +10,10 @@ import yaml from datetime import datetime from kubernetes import client, config +from tests.k8s_api import K8s + +SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-12:1.6-p5" +SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114" def to_selector(labels): return ",".join(["=".join(l) for l in labels.items()]) @@ -31,6 +35,41 @@ class EndToEndTestCase(unittest.TestCase): # `kind` pods may stuck in the `Terminating` phase for a few minutes; hence high test timeout TEST_TIMEOUT_SEC = 600 + def eventuallyEqual(self, f, x, m, retries=60, interval=2): + while True: + try: + y = f() + self.assertEqual(y, x, m.format(y)) + return True + except AssertionError: + retries = retries -1 + if not retries > 0: + raise + time.sleep(interval) + + def eventuallyNotEqual(self, f, x, m, retries=60, interval=2): + while True: + try: + y = f() + self.assertNotEqual(y, x, m.format(y)) + return True + except AssertionError: + retries = retries -1 + if not retries > 0: + raise + time.sleep(interval) + + def eventuallyTrue(self, f, m, retries=60, interval=2): + while True: + try: + self.assertTrue(f(), m) + return True + except AssertionError: + retries = retries -1 + if not retries > 0: + raise + time.sleep(interval) + @classmethod @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def setUpClass(cls): @@ -48,18 +87,26 @@ class EndToEndTestCase(unittest.TestCase): k8s = cls.k8s = K8s() # remove existing local storage class and create hostpath class - k8s.api.storage_v1_api.delete_storage_class("standard") + try: + k8s.api.storage_v1_api.delete_storage_class("standard") + except: + print("Storage class has already been remove") # operator deploys pod service account there on start up # needed for test_multi_namespace_support() cls.namespace = "test" - v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.namespace)) - k8s.api.core_v1.create_namespace(v1_namespace) + try: + v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.namespace)) + k8s.api.core_v1.create_namespace(v1_namespace) + except: + print("Namespace already present") # submit the most recent operator image built on the Docker host with open("manifests/postgres-operator.yaml", 'r+') as f: operator_deployment = yaml.safe_load(f) operator_deployment["spec"]["template"]["spec"]["containers"][0]["image"] = os.environ['OPERATOR_IMAGE'] + + with open("manifests/postgres-operator.yaml", 'w') as f: yaml.dump(operator_deployment, f, Dumper=yaml.Dumper) for filename in ["operator-service-account-rbac.yaml", @@ -73,6 +120,18 @@ class EndToEndTestCase(unittest.TestCase): k8s.wait_for_operator_pod_start() + # reset taints and tolerations + k8s.api.core_v1.patch_node("postgres-operator-e2e-tests-worker",{"spec":{"taints":[]}}) + k8s.api.core_v1.patch_node("postgres-operator-e2e-tests-worker2",{"spec":{"taints":[]}}) + + # make sure we start a new operator on every new run, + # this tackles the problem when kind is reused + # and the Docker image is infact changed (dirty one) + + # patch resync period, this can catch some problems with hanging e2e tests + # k8s.update_config({"data": {"resync_period":"30s"}},step="TestSuite setup") + k8s.update_config({}, step="TestSuite Startup") + actual_operator_image = k8s.api.core_v1.list_namespaced_pod( 'default', label_selector='name=postgres-operator').items[0].spec.containers[0].image print("Tested operator image: {}".format(actual_operator_image)) # shows up after tests finish @@ -105,124 +164,122 @@ class EndToEndTestCase(unittest.TestCase): pod_selector = to_selector(pod_labels) service_selector = to_selector(service_labels) - try: - # enable connection pooler - k8s.api.custom_objects_api.patch_namespaced_custom_object( - 'acid.zalan.do', 'v1', 'default', - 'postgresqls', 'acid-minimal-cluster', - { - 'spec': { - 'enableConnectionPooler': True, - } - }) - k8s.wait_for_pod_start(pod_selector) + # enable connection pooler + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': True, + } + }) - pods = k8s.api.core_v1.list_namespaced_pod( - 'default', label_selector=pod_selector - ).items + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2, "Deployment replicas is 2 default") + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 2, "No pooler pods found") + self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 1, "No pooler service found") - self.assertTrue(pods, 'No connection pooler pods') + # scale up connection pooler deployment + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'connectionPooler': { + 'numberOfInstances': 3, + }, + } + }) - k8s.wait_for_service(service_selector) - services = k8s.api.core_v1.list_namespaced_service( - 'default', label_selector=service_selector - ).items - services = [ - s for s in services - if s.metadata.name.endswith('pooler') - ] + self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 3, "Deployment replicas is scaled to 3") + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 3, "Scale up of pooler pods does not work") - self.assertTrue(services, 'No connection pooler service') + # turn it off, keeping config should be overwritten by false + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'enableConnectionPooler': False + } + }) - # scale up connection pooler deployment - k8s.api.custom_objects_api.patch_namespaced_custom_object( - 'acid.zalan.do', 'v1', 'default', - 'postgresqls', 'acid-minimal-cluster', - { - 'spec': { - 'connectionPooler': { - 'numberOfInstances': 2, - }, - } - }) + self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 0, "Pooler pods not scaled down") + self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 0, "Pooler service not removed") - k8s.wait_for_running_pods(pod_selector, 2) + # Verify that all the databases have pooler schema installed. + # Do this via psql, since otherwise we need to deal with + # credentials. + dbList = [] - # Verify that all the databases have pooler schema installed. - # Do this via psql, since otherwise we need to deal with - # credentials. - dbList = [] + leader = k8s.get_cluster_leader_pod('acid-minimal-cluster') + dbListQuery = "select datname from pg_database" + schemasQuery = """ + select schema_name + from information_schema.schemata + where schema_name = 'pooler' + """ + exec_query = r"psql -tAq -c \"{}\" -d {}" - leader = k8s.get_cluster_leader_pod('acid-minimal-cluster') - dbListQuery = "select datname from pg_database" - schemasQuery = """ - select schema_name - from information_schema.schemata - where schema_name = 'pooler' - """ - exec_query = r"psql -tAq -c \"{}\" -d {}" + if leader: + try: + q = exec_query.format(dbListQuery, "postgres") + q = "su postgres -c \"{}\"".format(q) + print('Get databases: {}'.format(q)) + result = k8s.exec_with_kubectl(leader.metadata.name, q) + dbList = clean_list(result.stdout.split(b'\n')) + print('dbList: {}, stdout: {}, stderr {}'.format( + dbList, result.stdout, result.stderr + )) + except Exception as ex: + print('Could not get databases: {}'.format(ex)) + print('Stdout: {}'.format(result.stdout)) + print('Stderr: {}'.format(result.stderr)) - if leader: + for db in dbList: + if db in ('template0', 'template1'): + continue + + schemas = [] try: - q = exec_query.format(dbListQuery, "postgres") + q = exec_query.format(schemasQuery, db) q = "su postgres -c \"{}\"".format(q) - print('Get databases: {}'.format(q)) + print('Get schemas: {}'.format(q)) result = k8s.exec_with_kubectl(leader.metadata.name, q) - dbList = clean_list(result.stdout.split(b'\n')) - print('dbList: {}, stdout: {}, stderr {}'.format( - dbList, result.stdout, result.stderr + schemas = clean_list(result.stdout.split(b'\n')) + print('schemas: {}, stdout: {}, stderr {}'.format( + schemas, result.stdout, result.stderr )) except Exception as ex: print('Could not get databases: {}'.format(ex)) print('Stdout: {}'.format(result.stdout)) print('Stderr: {}'.format(result.stderr)) - for db in dbList: - if db in ('template0', 'template1'): - continue + self.assertNotEqual(len(schemas), 0) + else: + print('Could not find leader pod') - schemas = [] - try: - q = exec_query.format(schemasQuery, db) - q = "su postgres -c \"{}\"".format(q) - print('Get schemas: {}'.format(q)) - result = k8s.exec_with_kubectl(leader.metadata.name, q) - schemas = clean_list(result.stdout.split(b'\n')) - print('schemas: {}, stdout: {}, stderr {}'.format( - schemas, result.stdout, result.stderr - )) - except Exception as ex: - print('Could not get databases: {}'.format(ex)) - print('Stdout: {}'.format(result.stdout)) - print('Stderr: {}'.format(result.stderr)) - - self.assertNotEqual(len(schemas), 0) - else: - print('Could not find leader pod') - - # turn it off, keeping configuration section - k8s.api.custom_objects_api.patch_namespaced_custom_object( - 'acid.zalan.do', 'v1', 'default', - 'postgresqls', 'acid-minimal-cluster', - { - 'spec': { - 'enableConnectionPooler': False, - } - }) - k8s.wait_for_pods_to_stop(pod_selector) - - except timeout_decorator.TimeoutError: - print('Operator log: {}'.format(k8s.get_operator_log())) - raise + # remove config section to make test work next time + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', + 'postgresqls', 'acid-minimal-cluster', + { + 'spec': { + 'connectionPooler': None + } + }) @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_enable_load_balancer(self): ''' - Test if services are updated when enabling/disabling load balancers + Test if services are updated when enabling/disabling load balancers in Postgres manifest ''' k8s = self.k8s - cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role={}' + + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'ClusterIP', + "Expected ClusterIP type initially, found {}") try: # enable load balancer services @@ -234,16 +291,14 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) - # wait for service recreation - time.sleep(60) + + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'LoadBalancer', + "Expected LoadBalancer service type for master, found {}") - master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') - self.assertEqual(master_svc_type, 'LoadBalancer', - "Expected LoadBalancer service type for master, found {}".format(master_svc_type)) - - repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') - self.assertEqual(repl_svc_type, 'LoadBalancer', - "Expected LoadBalancer service type for replica, found {}".format(repl_svc_type)) + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("replica")), + 'LoadBalancer', + "Expected LoadBalancer service type for master, found {}") # disable load balancer services again pg_patch_disable_lbs = { @@ -254,16 +309,14 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) - # wait for service recreation - time.sleep(60) + + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")), + 'ClusterIP', + "Expected LoadBalancer service type for master, found {}") - master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') - self.assertEqual(master_svc_type, 'ClusterIP', - "Expected ClusterIP service type for master, found {}".format(master_svc_type)) - - repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') - self.assertEqual(repl_svc_type, 'ClusterIP', - "Expected ClusterIP service type for replica, found {}".format(repl_svc_type)) + self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("replica")), + 'ClusterIP', + "Expected LoadBalancer service type for master, found {}") except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -277,8 +330,7 @@ class EndToEndTestCase(unittest.TestCase): k8s = self.k8s # update infrastructure roles description secret_name = "postgresql-infrastructure-roles" - roles = "secretname: postgresql-infrastructure-roles-new, \ - userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon" + roles = "secretname: postgresql-infrastructure-roles-new, userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon" patch_infrastructure_roles = { "data": { "infrastructure_roles_secret_name": secret_name, @@ -287,33 +339,41 @@ class EndToEndTestCase(unittest.TestCase): } k8s.update_config(patch_infrastructure_roles) - # wait a little before proceeding - time.sleep(30) - try: # check that new roles are represented in the config by requesting the # operator configuration via API - operator_pod = k8s.get_operator_pod() - get_config_cmd = "wget --quiet -O - localhost:8080/config" - result = k8s.exec_with_kubectl( - operator_pod.metadata.name, - get_config_cmd, - ) - roles_dict = (json.loads(result.stdout) - .get("controller", {}) - .get("InfrastructureRoles")) - self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict) - role = roles_dict["robot_zmon_acid_monitoring_new"] - role.pop("Password", None) - self.assertDictEqual(role, { - "Name": "robot_zmon_acid_monitoring_new", - "Flags": None, - "MemberOf": ["robot_zmon"], - "Parameters": None, - "AdminRole": "", - "Origin": 2, - }) + def verify_role(): + try: + operator_pod = k8s.get_operator_pod() + get_config_cmd = "wget --quiet -O - localhost:8080/config" + result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd) + try: + roles_dict = (json.loads(result.stdout) + .get("controller", {}) + .get("InfrastructureRoles")) + except: + return False + + if "robot_zmon_acid_monitoring_new" in roles_dict: + role = roles_dict["robot_zmon_acid_monitoring_new"] + role.pop("Password", None) + self.assertDictEqual(role, { + "Name": "robot_zmon_acid_monitoring_new", + "Flags": None, + "MemberOf": ["robot_zmon"], + "Parameters": None, + "AdminRole": "", + "Origin": 2, + }) + return True + except: + pass + + return False + + self.eventuallyTrue(verify_role, "infrastructure role setup is not loaded") + except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -333,33 +393,47 @@ class EndToEndTestCase(unittest.TestCase): k8s = self.k8s + pod0 = 'acid-minimal-cluster-0' + pod1 = 'acid-minimal-cluster-1' + + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), 2, "Postgres status did not enter running") + + patch_lazy_spilo_upgrade = { + "data": { + "docker_image": SPILO_CURRENT, + "enable_lazy_spilo_upgrade": "false" + } + } + k8s.update_config(patch_lazy_spilo_upgrade, step="Init baseline image version") + + self.eventuallyEqual(lambda: k8s.get_statefulset_image(), SPILO_CURRENT, "Stagefulset not updated initially") + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), 2, "Postgres status did not enter running") + + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), SPILO_CURRENT, "Rolling upgrade was not executed") + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), SPILO_CURRENT, "Rolling upgrade was not executed") + # update docker image in config and enable the lazy upgrade - conf_image = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114" + conf_image = SPILO_LAZY patch_lazy_spilo_upgrade = { "data": { "docker_image": conf_image, "enable_lazy_spilo_upgrade": "true" } } - k8s.update_config(patch_lazy_spilo_upgrade) - - pod0 = 'acid-minimal-cluster-0' - pod1 = 'acid-minimal-cluster-1' + k8s.update_config(patch_lazy_spilo_upgrade,step="patch image and lazy upgrade") + self.eventuallyEqual(lambda: k8s.get_statefulset_image(), conf_image, "Statefulset not updated to next Docker image") try: # restart the pod to get a container with the new image - k8s.api.core_v1.delete_namespaced_pod(pod0, 'default') - time.sleep(60) - - # lazy update works if the restarted pod and older pods run different Spilo versions - new_image = k8s.get_effective_pod_image(pod0) - old_image = k8s.get_effective_pod_image(pod1) - self.assertNotEqual(new_image, old_image, - "Lazy updated failed: pods have the same image {}".format(new_image)) - - # sanity check - assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image) - self.assertEqual(new_image, conf_image, assert_msg) + k8s.api.core_v1.delete_namespaced_pod(pod0, 'default') + + # verify only pod-0 which was deleted got new image from statefulset + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), conf_image, "Delete pod-0 did not get new spilo image") + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), 2, "Postgres status did not enter running") + self.assertNotEqual(lambda: k8s.get_effective_pod_image(pod1), SPILO_CURRENT, "pod-1 should not have change Docker image to {}".format(SPILO_CURRENT)) # clean up unpatch_lazy_spilo_upgrade = { @@ -367,20 +441,12 @@ class EndToEndTestCase(unittest.TestCase): "enable_lazy_spilo_upgrade": "false", } } - k8s.update_config(unpatch_lazy_spilo_upgrade) + k8s.update_config(unpatch_lazy_spilo_upgrade, step="patch lazy upgrade") # at this point operator will complete the normal rolling upgrade # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works - - # XXX there is no easy way to wait until the end of Sync() - time.sleep(60) - - image0 = k8s.get_effective_pod_image(pod0) - image1 = k8s.get_effective_pod_image(pod1) - - assert_msg = "Disabling lazy upgrade failed: pods still have different \ - images {} and {}".format(image0, image1) - self.assertEqual(image0, image1, assert_msg) + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), conf_image, "Rolling upgrade was not executed", 50, 3) + self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), conf_image, "Rolling upgrade was not executed", 50, 3) except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -412,12 +478,9 @@ class EndToEndTestCase(unittest.TestCase): "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup) try: - k8s.wait_for_logical_backup_job_creation() + self.eventuallyEqual(lambda: len(k8s.get_logical_backup_job().items), 1, "failed to create logical backup job") - jobs = k8s.get_logical_backup_job().items - self.assertEqual(1, len(jobs), "Expected 1 logical backup job, found {}".format(len(jobs))) - - job = jobs[0] + job = k8s.get_logical_backup_job().items[0] self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster", "Expected job name {}, found {}" .format("logical-backup-acid-minimal-cluster", job.metadata.name)) @@ -432,12 +495,14 @@ class EndToEndTestCase(unittest.TestCase): "logical_backup_docker_image": image, } } - k8s.update_config(patch_logical_backup_image) + k8s.update_config(patch_logical_backup_image, step="patch logical backup image") - jobs = k8s.get_logical_backup_job().items - actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image - self.assertEqual(actual_image, image, - "Expected job image {}, found {}".format(image, actual_image)) + def get_docker_image(): + jobs = k8s.get_logical_backup_job().items + return jobs[0].spec.job_template.spec.template.spec.containers[0].image + + self.eventuallyEqual(get_docker_image, image, + "Expected job image {}, found {}".format(image, "{}")) # delete the logical backup cron job pg_patch_disable_backup = { @@ -447,10 +512,8 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup) - k8s.wait_for_logical_backup_job_deletion() - jobs = k8s.get_logical_backup_job().items - self.assertEqual(0, len(jobs), - "Expected 0 logical backup jobs, found {}".format(len(jobs))) + + self.eventuallyEqual(lambda: len(k8s.get_logical_backup_job().items), 0, "failed to create logical backup job") except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -462,20 +525,18 @@ class EndToEndTestCase(unittest.TestCase): Lower resource limits below configured minimum and let operator fix it ''' k8s = self.k8s - cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' - labels = 'spilo-role=master,' + cluster_label - _, failover_targets = k8s.get_pg_nodes(cluster_label) + # self.eventuallyEqual(lambda: k8s.pg_get_status(), "Running", "Cluster not healthy at start") # configure minimum boundaries for CPU and memory limits - minCPULimit = '500m' - minMemoryLimit = '500Mi' + minCPULimit = '503m' + minMemoryLimit = '502Mi' + patch_min_resource_limits = { "data": { "min_cpu_limit": minCPULimit, "min_memory_limit": minMemoryLimit } } - k8s.update_config(patch_min_resource_limits) # lower resource limits below minimum pg_patch_resources = { @@ -494,26 +555,31 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) + + k8s.patch_statefulset({"metadata":{"annotations":{"zalando-postgres-operator-rolling-update-required": "False"}}}) + k8s.update_config(patch_min_resource_limits, "Minimum resource test") - try: - k8s.wait_for_pod_failover(failover_targets, labels) - k8s.wait_for_pod_start('spilo-role=replica') + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running") + + def verify_pod_limits(): + pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items + if len(pods)<2: + return False - pods = k8s.api.core_v1.list_namespaced_pod( - 'default', label_selector=labels).items - self.assert_master_is_unique() - masterPod = pods[0] + r = pods[0].spec.containers[0].resources.limits['memory']==minMemoryLimit + r = r and pods[0].spec.containers[0].resources.limits['cpu'] == minCPULimit + r = r and pods[1].spec.containers[0].resources.limits['memory']==minMemoryLimit + r = r and pods[1].spec.containers[0].resources.limits['cpu'] == minCPULimit + return r - self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit, - "Expected CPU limit {}, found {}" - .format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu'])) - self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit, - "Expected memory limit {}, found {}" - .format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory'])) + self.eventuallyTrue(verify_pod_limits, "Pod limits where not adjusted") - except timeout_decorator.TimeoutError: - print('Operator log: {}'.format(k8s.get_operator_log())) - raise + @classmethod + def setUp(cls): + # cls.k8s.update_config({}, step="Setup") + cls.k8s.patch_statefulset({"meta":{"annotations":{"zalando-postgres-operator-rolling-update-required": False}}}) + pass @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_multi_namespace_support(self): @@ -537,7 +603,7 @@ class EndToEndTestCase(unittest.TestCase): raise @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_node_readiness_label(self): + def test_zz_node_readiness_label(self): ''' Remove node readiness label from master node. This must cause a failover. ''' @@ -560,6 +626,7 @@ class EndToEndTestCase(unittest.TestCase): } } } + self.assertTrue(len(failover_targets)>0, "No failover targets available") for failover_target in failover_targets: k8s.api.core_v1.patch_node(failover_target, patch_readiness_label) @@ -569,18 +636,15 @@ class EndToEndTestCase(unittest.TestCase): "node_readiness_label": readiness_label + ':' + readiness_value, } } - k8s.update_config(patch_readiness_label_config) + k8s.update_config(patch_readiness_label_config, "setting readiness label") new_master_node, new_replica_nodes = self.assert_failover( current_master_node, num_replicas, failover_targets, cluster_label) # patch also node where master ran before k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label) - # wait a little before proceeding with the pod distribution test - time.sleep(30) - # toggle pod anti affinity to move replica away from master node - self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) + self.eventuallyTrue(lambda: self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label), "Pods are redistributed") except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -592,25 +656,20 @@ class EndToEndTestCase(unittest.TestCase): Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. ''' k8s = self.k8s - labels = "application=spilo,cluster-name=acid-minimal-cluster" + pod="acid-minimal-cluster-0" - try: - k8s.wait_for_pg_to_scale(3) - self.assertEqual(3, k8s.count_pods_with_label(labels)) - self.assert_master_is_unique() - - k8s.wait_for_pg_to_scale(2) - self.assertEqual(2, k8s.count_pods_with_label(labels)) - self.assert_master_is_unique() - - except timeout_decorator.TimeoutError: - print('Operator log: {}'.format(k8s.get_operator_log())) - raise + k8s.scale_cluster(3) + self.eventuallyEqual(lambda: k8s.count_running_pods(), 3, "Scale up to 3 failed") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 3, "Not all 3 nodes healthy") + + k8s.scale_cluster(2) + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "Scale down to 2 failed") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 2, "Not all members 2 healthy") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_service_annotations(self): ''' - Create a Postgres cluster with service annotations and check them. + Create a Postgres cluster with service annotations and check them. ''' k8s = self.k8s patch_custom_service_annotations = { @@ -620,32 +679,25 @@ class EndToEndTestCase(unittest.TestCase): } k8s.update_config(patch_custom_service_annotations) - try: - pg_patch_custom_annotations = { - "spec": { - "serviceAnnotations": { - "annotation.key": "value", - "foo": "bar", - } + pg_patch_custom_annotations = { + "spec": { + "serviceAnnotations": { + "annotation.key": "value", + "alice": "bob", } } - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations) + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations) - # wait a little before proceeding - time.sleep(30) - annotations = { - "annotation.key": "value", - "foo": "bar", - } - self.assertTrue(k8s.check_service_annotations( - "cluster-name=acid-minimal-cluster,spilo-role=master", annotations)) - self.assertTrue(k8s.check_service_annotations( - "cluster-name=acid-minimal-cluster,spilo-role=replica", annotations)) + annotations = { + "annotation.key": "value", + "foo": "bar", + "alice": "bob" + } - except timeout_decorator.TimeoutError: - print('Operator log: {}'.format(k8s.get_operator_log())) - raise + self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations), "Wrong annotations") + self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations), "Wrong annotations") # clean up unpatch_custom_service_annotations = { @@ -670,42 +722,43 @@ class EndToEndTestCase(unittest.TestCase): } k8s.update_config(patch_sset_propagate_annotations) - try: - pg_crd_annotations = { - "metadata": { - "annotations": { - "deployment-time": "2020-04-30 12:00:00", - "downscaler/downtime_replicas": "0", - }, - } + pg_crd_annotations = { + "metadata": { + "annotations": { + "deployment-time": "2020-04-30 12:00:00", + "downscaler/downtime_replicas": "0", + }, } - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations) + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations) + + annotations = { + "deployment-time": "2020-04-30 12:00:00", + "downscaler/downtime_replicas": "0", + } + + self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing") - # wait a little before proceeding - time.sleep(60) - annotations = { - "deployment-time": "2020-04-30 12:00:00", - "downscaler/downtime_replicas": "0", - } - self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations)) - - except timeout_decorator.TimeoutError: - print('Operator log: {}'.format(k8s.get_operator_log())) - raise @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_taint_based_eviction(self): + @unittest.skip("Skipping this test until fixed") + def test_zzz_taint_based_eviction(self): ''' Add taint "postgres=:NoExecute" to node with master. This must cause a failover. ''' k8s = self.k8s cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' + # verify we are in good state from potential previous tests + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") + # get nodes of master and replica(s) (expected target of new master) - current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label) - num_replicas = len(current_replica_nodes) - failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes) + master_nodes, replica_nodes = k8s.get_cluster_nodes() + + self.assertNotEqual(master_nodes, []) + self.assertNotEqual(replica_nodes, []) # taint node with postgres=:NoExecute to force failover body = { @@ -719,32 +772,29 @@ class EndToEndTestCase(unittest.TestCase): } } - try: - # patch node and test if master is failing over to one of the expected nodes - k8s.api.core_v1.patch_node(current_master_node, body) - new_master_node, new_replica_nodes = self.assert_failover( - current_master_node, num_replicas, failover_targets, cluster_label) + k8s.api.core_v1.patch_node(master_nodes[0], body) + self.eventuallyTrue(lambda: k8s.get_cluster_nodes()[0], replica_nodes) + self.assertNotEqual(lambda: k8s.get_cluster_nodes()[0], master_nodes) - # add toleration to pods - patch_toleration_config = { - "data": { - "toleration": "key:postgres,operator:Exists,effect:NoExecute" - } + # add toleration to pods + patch_toleration_config = { + "data": { + "toleration": "key:postgres,operator:Exists,effect:NoExecute" } - k8s.update_config(patch_toleration_config) + } + + k8s.update_config(patch_toleration_config, step="allow tainted nodes") - # wait a little before proceeding with the pod distribution test - time.sleep(30) + self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running") + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") - # toggle pod anti affinity to move replica away from master node - self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) - - except timeout_decorator.TimeoutError: - print('Operator log: {}'.format(k8s.get_operator_log())) - raise + # toggle pod anti affinity to move replica away from master node + nm, new_replica_nodes = k8s.get_cluster_nodes() + new_master_node = nm[0] + self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - def test_x_cluster_deletion(self): + def test_zzzz_cluster_deletion(self): ''' Test deletion with configured protection ''' @@ -764,6 +814,7 @@ class EndToEndTestCase(unittest.TestCase): # this delete attempt should be omitted because of missing annotations k8s.api.custom_objects_api.delete_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster") + time.sleep(5) # check that pods and services are still there k8s.wait_for_running_pods(cluster_label, 2) @@ -789,7 +840,7 @@ class EndToEndTestCase(unittest.TestCase): "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations) # wait a little before proceeding - time.sleep(10) + time.sleep(20) k8s.wait_for_running_pods(cluster_label, 2) k8s.wait_for_service(cluster_label) @@ -797,22 +848,31 @@ class EndToEndTestCase(unittest.TestCase): k8s.api.custom_objects_api.delete_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster") - # wait until cluster is deleted - time.sleep(120) + self.eventuallyEqual(lambda: len(k8s.api.custom_objects_api.list_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", label_selector="cluster-name=acid-minimal-cluster")["items"]), 0, "Manifest not deleted") # check if everything has been deleted - self.assertEqual(0, k8s.count_pods_with_label(cluster_label)) - self.assertEqual(0, k8s.count_services_with_label(cluster_label)) - self.assertEqual(0, k8s.count_endpoints_with_label(cluster_label)) - self.assertEqual(0, k8s.count_statefulsets_with_label(cluster_label)) - self.assertEqual(0, k8s.count_deployments_with_label(cluster_label)) - self.assertEqual(0, k8s.count_pdbs_with_label(cluster_label)) - self.assertEqual(0, k8s.count_secrets_with_label(cluster_label)) + self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_label), 0, "Pods not deleted") + self.eventuallyEqual(lambda: k8s.count_services_with_label(cluster_label), 0, "Service not deleted") + self.eventuallyEqual(lambda: k8s.count_endpoints_with_label(cluster_label), 0, "Endpoints not deleted") + self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted") + self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted") + self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets not deleted") except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) raise + #reset configmap + patch_delete_annotations = { + "data": { + "delete_annotation_date_key": "", + "delete_annotation_name_key": "" + } + } + k8s.update_config(patch_delete_annotations) + def get_failover_targets(self, master_node, replica_nodes): ''' If all pods live on the same node, failover will happen to other worker(s) @@ -871,7 +931,7 @@ class EndToEndTestCase(unittest.TestCase): "enable_pod_antiaffinity": "true" } } - k8s.update_config(patch_enable_antiaffinity) + k8s.update_config(patch_enable_antiaffinity, "enable antiaffinity") self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label) # now disable pod anti affintiy again which will cause yet another failover @@ -880,229 +940,11 @@ class EndToEndTestCase(unittest.TestCase): "enable_pod_antiaffinity": "false" } } - k8s.update_config(patch_disable_antiaffinity) + k8s.update_config(patch_disable_antiaffinity, "disalbe antiaffinity") k8s.wait_for_pod_start('spilo-role=master') k8s.wait_for_pod_start('spilo-role=replica') - - -class K8sApi: - - def __init__(self): - - # https://github.com/kubernetes-client/python/issues/309 - warnings.simplefilter("ignore", ResourceWarning) - - self.config = config.load_kube_config() - self.k8s_client = client.ApiClient() - - self.core_v1 = client.CoreV1Api() - self.apps_v1 = client.AppsV1Api() - self.batch_v1_beta1 = client.BatchV1beta1Api() - self.custom_objects_api = client.CustomObjectsApi() - self.policy_v1_beta1 = client.PolicyV1beta1Api() - self.storage_v1_api = client.StorageV1Api() - - -class K8s: - ''' - Wraps around K8s api client and helper methods. - ''' - - RETRY_TIMEOUT_SEC = 10 - - def __init__(self): - self.api = K8sApi() - - def get_pg_nodes(self, pg_cluster_name, namespace='default'): - master_pod_node = '' - replica_pod_nodes = [] - podsList = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pg_cluster_name) - for pod in podsList.items: - if pod.metadata.labels.get('spilo-role') == 'master': - master_pod_node = pod.spec.node_name - elif pod.metadata.labels.get('spilo-role') == 'replica': - replica_pod_nodes.append(pod.spec.node_name) - - return master_pod_node, replica_pod_nodes - - def get_cluster_leader_pod(self, pg_cluster_name, namespace='default'): - labels = { - 'application': 'spilo', - 'cluster-name': pg_cluster_name, - 'spilo-role': 'master', - } - - pods = self.api.core_v1.list_namespaced_pod( - namespace, label_selector=to_selector(labels)).items - - if pods: - return pods[0] - - def wait_for_operator_pod_start(self): - self. wait_for_pod_start("name=postgres-operator") - # HACK operator must register CRD and/or Sync existing PG clusters after start up - # for local execution ~ 10 seconds suffices - time.sleep(60) - - def get_operator_pod(self): - pods = self.api.core_v1.list_namespaced_pod( - 'default', label_selector='name=postgres-operator' - ).items - - if pods: - return pods[0] - - return None - - def get_operator_log(self): - operator_pod = self.get_operator_pod() - pod_name = operator_pod.metadata.name - return self.api.core_v1.read_namespaced_pod_log( - name=pod_name, - namespace='default' - ) - - def wait_for_pod_start(self, pod_labels, namespace='default'): - pod_phase = 'No pod running' - while pod_phase != 'Running': - pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=pod_labels).items - if pods: - pod_phase = pods[0].status.phase - - time.sleep(self.RETRY_TIMEOUT_SEC) - - def get_service_type(self, svc_labels, namespace='default'): - svc_type = '' - svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items - for svc in svcs: - svc_type = svc.spec.type - return svc_type - - def check_service_annotations(self, svc_labels, annotations, namespace='default'): - svcs = self.api.core_v1.list_namespaced_service(namespace, label_selector=svc_labels, limit=1).items - for svc in svcs: - for key, value in annotations.items(): - if key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: - print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotation)) - return False return True - def check_statefulset_annotations(self, sset_labels, annotations, namespace='default'): - ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=sset_labels, limit=1).items - for sset in ssets: - for key, value in annotations.items(): - if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: - print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotation)) - return False - return True - - def wait_for_pg_to_scale(self, number_of_instances, namespace='default'): - - body = { - "spec": { - "numberOfInstances": number_of_instances - } - } - _ = self.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) - - labels = 'application=spilo,cluster-name=acid-minimal-cluster' - while self.count_pods_with_label(labels) != number_of_instances: - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_running_pods(self, labels, number, namespace=''): - while self.count_pods_with_label(labels) != number: - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_pods_to_stop(self, labels, namespace=''): - while self.count_pods_with_label(labels) != 0: - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_service(self, labels, namespace='default'): - def get_services(): - return self.api.core_v1.list_namespaced_service( - namespace, label_selector=labels - ).items - - while not get_services(): - time.sleep(self.RETRY_TIMEOUT_SEC) - - def count_pods_with_label(self, labels, namespace='default'): - return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) - - def count_services_with_label(self, labels, namespace='default'): - return len(self.api.core_v1.list_namespaced_service(namespace, label_selector=labels).items) - - def count_endpoints_with_label(self, labels, namespace='default'): - return len(self.api.core_v1.list_namespaced_endpoints(namespace, label_selector=labels).items) - - def count_secrets_with_label(self, labels, namespace='default'): - return len(self.api.core_v1.list_namespaced_secret(namespace, label_selector=labels).items) - - def count_statefulsets_with_label(self, labels, namespace='default'): - return len(self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=labels).items) - - def count_deployments_with_label(self, labels, namespace='default'): - return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items) - - def count_pdbs_with_label(self, labels, namespace='default'): - return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget( - namespace, label_selector=labels).items) - - def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): - pod_phase = 'Failing over' - new_pod_node = '' - - while (pod_phase != 'Running') or (new_pod_node not in failover_targets): - pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items - if pods: - new_pod_node = pods[0].spec.node_name - pod_phase = pods[0].status.phase - time.sleep(self.RETRY_TIMEOUT_SEC) - - def get_logical_backup_job(self, namespace='default'): - return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") - - def wait_for_logical_backup_job(self, expected_num_of_jobs): - while (len(self.get_logical_backup_job().items) != expected_num_of_jobs): - time.sleep(self.RETRY_TIMEOUT_SEC) - - def wait_for_logical_backup_job_deletion(self): - self.wait_for_logical_backup_job(expected_num_of_jobs=0) - - def wait_for_logical_backup_job_creation(self): - self.wait_for_logical_backup_job(expected_num_of_jobs=1) - - def delete_operator_pod(self): - operator_pod = self.api.core_v1.list_namespaced_pod( - 'default', label_selector="name=postgres-operator").items[0].metadata.name - self.api.core_v1.delete_namespaced_pod(operator_pod, "default") # restart reloads the conf - self.wait_for_operator_pod_start() - - def update_config(self, config_map_patch): - self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch) - self.delete_operator_pod() - - def create_with_kubectl(self, path): - return subprocess.run( - ["kubectl", "create", "-f", path], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - def exec_with_kubectl(self, pod, cmd): - return subprocess.run(["./exec.sh", pod, cmd], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - def get_effective_pod_image(self, pod_name, namespace='default'): - ''' - Get the Spilo image pod currently uses. In case of lazy rolling updates - it may differ from the one specified in the stateful set. - ''' - pod = self.api.core_v1.list_namespaced_pod( - namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name) - return pod.items[0].spec.containers[0].image - if __name__ == '__main__': unittest.main() diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index e7a604a2d..16719a5d9 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -4,6 +4,8 @@ metadata: name: postgres-operator spec: replicas: 1 + strategy: + type: "Recreate" selector: matchLabels: name: postgres-operator diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 6aa1f6fa4..8636083c2 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -371,11 +371,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa //TODO: improve me if *c.Statefulset.Spec.Replicas != *statefulSet.Spec.Replicas { match = false - reasons = append(reasons, "new statefulset's number of replicas doesn't match the current one") + reasons = append(reasons, "new statefulset's number of replicas does not match the current one") } if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) { match = false - reasons = append(reasons, "new statefulset's annotations doesn't match the current one") + reasons = append(reasons, "new statefulset's annotations does not match the current one") } needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons) @@ -392,24 +392,24 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa if c.Statefulset.Spec.Template.Spec.ServiceAccountName != statefulSet.Spec.Template.Spec.ServiceAccountName { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's serviceAccountName service account name doesn't match the current one") + reasons = append(reasons, "new statefulset's serviceAccountName service account name does not match the current one") } if *c.Statefulset.Spec.Template.Spec.TerminationGracePeriodSeconds != *statefulSet.Spec.Template.Spec.TerminationGracePeriodSeconds { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds doesn't match the current one") + reasons = append(reasons, "new statefulset's terminationGracePeriodSeconds does not match the current one") } if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Affinity, statefulSet.Spec.Template.Spec.Affinity) { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's pod affinity doesn't match the current one") + reasons = append(reasons, "new statefulset's pod affinity does not match the current one") } // Some generated fields like creationTimestamp make it not possible to use DeepCompare on Spec.Template.ObjectMeta if !reflect.DeepEqual(c.Statefulset.Spec.Template.Labels, statefulSet.Spec.Template.Labels) { needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's metadata labels doesn't match the current one") + reasons = append(reasons, "new statefulset's metadata labels does not match the current one") } if (c.Statefulset.Spec.Selector != nil) && (statefulSet.Spec.Selector != nil) { if !reflect.DeepEqual(c.Statefulset.Spec.Selector.MatchLabels, statefulSet.Spec.Selector.MatchLabels) { @@ -420,7 +420,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa return &compareStatefulsetResult{} } needsReplace = true - reasons = append(reasons, "new statefulset's selector doesn't match the current one") + reasons = append(reasons, "new statefulset's selector does not match the current one") } } @@ -434,7 +434,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa match = false needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's pod template security context in spec doesn't match the current one") + reasons = append(reasons, "new statefulset's pod template security context in spec does not match the current one") } if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) { needsReplace = true @@ -445,17 +445,17 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa // Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta if name != statefulSet.Spec.VolumeClaimTemplates[i].Name { needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d doesn't match the current one", i)) + reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) continue } if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) { needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q doesn't match the current one", name)) + reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name)) } if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) { name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q doesn't match the current one", name)) + reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name)) } } @@ -465,14 +465,14 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa match = false needsReplace = true needsRollUpdate = true - reasons = append(reasons, "new statefulset's pod priority class in spec doesn't match the current one") + reasons = append(reasons, "new statefulset's pod priority class in spec does not match the current one") } // lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image // until they are re-created for other reasons, for example node rotation if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) { needsReplace = true - reasons = append(reasons, "lazy Spilo update: new statefulset's pod image doesn't match the current one") + reasons = append(reasons, "lazy Spilo update: new statefulset's pod image does not match the current one") } if needsRollUpdate || needsReplace { @@ -582,7 +582,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err) } if isSmaller { - c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) + c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be increased", cpuLimit, minCPULimit) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit) spec.Resources.ResourceLimits.CPU = minCPULimit } @@ -595,7 +595,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err) } if isSmaller { - c.logger.Warningf("defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) + c.logger.Warningf("defined memory limit %s is below required minimum %s and will be increased", memoryLimit, minMemoryLimit) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit) spec.Resources.ResourceLimits.Memory = minMemoryLimit } diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index f51b58a89..57758c6aa 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -527,7 +527,7 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error { continue } - c.logger.Infof("Install pooler lookup function into %s", dbname) + c.logger.Infof("install pooler lookup function into database '%s'", dbname) // golang sql will do retries couple of times if pq driver reports // connections issues (driver.ErrBadConn), but since our query is diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index ba22f24c3..9a328b7df 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1157,7 +1157,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef } // generate the spilo container - c.logger.Debugf("Generating Spilo container, environment variables: %v", spiloEnvVars) + c.logger.Debugf("Generating Spilo container, environment variables") + c.logger.Debugf("%v", spiloEnvVars) + spiloContainer := generateContainer(c.containerName(), &effectiveDockerImage, resourceRequirements, @@ -2055,7 +2057,8 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { envVars = append(envVars, v1.EnvVar{Name: "AWS_SECRET_ACCESS_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey}) } - c.logger.Debugf("Generated logical backup env vars %v", envVars) + c.logger.Debugf("Generated logical backup env vars") + c.logger.Debugf("%v", envVars) return envVars } diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index 44b2222e0..a13eb479c 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -304,9 +304,16 @@ func (c *Cluster) isSafeToRecreatePods(pods *v1.PodList) bool { after this check succeeds but before a pod is re-created */ + for _, pod := range pods.Items { + c.logger.Debugf("name=%s phase=%s ip=%s", pod.Name, pod.Status.Phase, pod.Status.PodIP) + } + for _, pod := range pods.Items { state, err := c.patroni.GetPatroniMemberState(&pod) - if err != nil || state == "creating replica" { + if err != nil { + c.logger.Errorf("failed to get Patroni state for pod: %s", err) + return false + } else if state == "creating replica" { c.logger.Warningf("cannot re-create replica %s: it is currently being initialized", pod.Name) return false } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index a9d13c124..4fb2c13c6 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -293,7 +293,7 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { // setRollingUpdateFlagForStatefulSet sets the indicator or the rolling update requirement // in the StatefulSet annotation. -func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool) { +func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, val bool, msg string) { anno := sset.GetAnnotations() if anno == nil { anno = make(map[string]string) @@ -301,13 +301,13 @@ func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *appsv1.StatefulSet, v anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val) sset.SetAnnotations(anno) - c.logger.Debugf("statefulset's rolling update annotation has been set to %t", val) + c.logger.Debugf("set statefulset's rolling update annotation to %t: caller/reason %s", val, msg) } // applyRollingUpdateFlagforStatefulSet sets the rolling update flag for the cluster's StatefulSet // and applies that setting to the actual running cluster. func (c *Cluster) applyRollingUpdateFlagforStatefulSet(val bool) error { - c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val) + c.setRollingUpdateFlagForStatefulSet(c.Statefulset, val, "applyRollingUpdateFlag") sset, err := c.updateStatefulSetAnnotations(c.Statefulset.GetAnnotations()) if err != nil { return err @@ -359,14 +359,13 @@ func (c *Cluster) mergeRollingUpdateFlagUsingCache(runningStatefulSet *appsv1.St podsRollingUpdateRequired = false } else { c.logger.Infof("found a statefulset with an unfinished rolling update of the pods") - } } return podsRollingUpdateRequired } func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*appsv1.StatefulSet, error) { - c.logger.Debugf("updating statefulset annotations") + c.logger.Debugf("patching statefulset annotations") patchData, err := metaAnnotationsPatch(annotations) if err != nil { return nil, fmt.Errorf("could not form patch for the statefulset metadata: %v", err) diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 2a3959b1a..dced69461 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -348,13 +348,13 @@ func (c *Cluster) syncStatefulSet() error { if err != nil { return fmt.Errorf("could not generate statefulset: %v", err) } - c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired) + c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "from cache") cmp := c.compareStatefulSetWith(desiredSS) if !cmp.match { if cmp.rollingUpdate && !podsRollingUpdateRequired { podsRollingUpdateRequired = true - c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired) + c.setRollingUpdateFlagForStatefulSet(desiredSS, podsRollingUpdateRequired, "statefulset changes") } c.logStatefulSetChanges(c.Statefulset, desiredSS, false, cmp.reasons) @@ -497,11 +497,11 @@ func (c *Cluster) syncSecrets() error { return fmt.Errorf("could not get current secret: %v", err) } if secretUsername != string(secret.Data["username"]) { - c.logger.Warningf("secret %q does not contain the role %q", secretSpec.Name, secretUsername) + c.logger.Warningf("secret %s does not contain the role %q", secretSpec.Name, secretUsername) continue } c.Secrets[secret.UID] = secret - c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta)) + c.logger.Debugf("secret %s already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta)) if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name { secretUsername = constants.SuperuserKeyName userMap = c.systemUsers @@ -804,7 +804,7 @@ func (c *Cluster) syncLogicalBackupJob() error { return fmt.Errorf("could not generate the desired logical backup job state: %v", err) } if match, reason := k8sutil.SameLogicalBackupJob(job, desiredJob); !match { - c.logger.Infof("logical job %q is not in the desired state and needs to be updated", + c.logger.Infof("logical job %s is not in the desired state and needs to be updated", c.getLogicalBackupJobName(), ) if reason != "" { @@ -825,12 +825,12 @@ func (c *Cluster) syncLogicalBackupJob() error { c.logger.Info("could not find the cluster's logical backup job") if err = c.createLogicalBackupJob(); err == nil { - c.logger.Infof("created missing logical backup job %q", jobName) + c.logger.Infof("created missing logical backup job %s", jobName) } else { if !k8sutil.ResourceAlreadyExists(err) { return fmt.Errorf("could not create missing logical backup job: %v", err) } - c.logger.Infof("logical backup job %q already exists", jobName) + c.logger.Infof("logical backup job %s already exists", jobName) if _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Get(context.TODO(), jobName, metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing logical backup job: %v", err) } @@ -975,7 +975,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql newConnectionPooler = &acidv1.ConnectionPooler{} } - c.logger.Infof("Old: %+v, New %+v", oldConnectionPooler, newConnectionPooler) + logNiceDiff(c.logger, oldConnectionPooler, newConnectionPooler) specSync, specReason := c.needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler) defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(newConnectionPooler, deployment) diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 7559ce3d4..d227ce155 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -18,12 +18,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "github.com/sirupsen/logrus" acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "github.com/zalando/postgres-operator/pkg/util/nicediff" "github.com/zalando/postgres-operator/pkg/util/retryutil" ) @@ -166,40 +168,59 @@ func (c *Cluster) logPDBChanges(old, new *policybeta1.PodDisruptionBudget, isUpd ) } - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec)) + logNiceDiff(c.logger, old.Spec, new.Spec) +} + +func logNiceDiff(log *logrus.Entry, old, new interface{}) { + o, erro := json.MarshalIndent(old, "", " ") + n, errn := json.MarshalIndent(new, "", " ") + + if erro != nil || errn != nil { + panic("could not marshal API objects, should not happen") + } + + nice := nicediff.Diff(string(o), string(n), true) + for _, s := range strings.Split(nice, "\n") { + // " is not needed in the value to understand + log.Debugf(strings.ReplaceAll(s, "\"", "")) + } } func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate bool, reasons []string) { if isUpdate { - c.logger.Infof("statefulset %q has been changed", util.NameFromMeta(old.ObjectMeta)) + c.logger.Infof("statefulset %s has been changed", util.NameFromMeta(old.ObjectMeta)) } else { - c.logger.Infof("statefulset %q is not in the desired state and needs to be updated", + c.logger.Infof("statefulset %s is not in the desired state and needs to be updated", util.NameFromMeta(old.ObjectMeta), ) } + + logNiceDiff(c.logger, old.Spec, new.Spec) + if !reflect.DeepEqual(old.Annotations, new.Annotations) { - c.logger.Debugf("metadata.annotation diff\n%s\n", util.PrettyDiff(old.Annotations, new.Annotations)) + c.logger.Debugf("metadata.annotation are different") + logNiceDiff(c.logger, old.Annotations, new.Annotations) } - c.logger.Debugf("spec diff between old and new statefulsets: \n%s\n", util.PrettyDiff(old.Spec, new.Spec)) if len(reasons) > 0 { for _, reason := range reasons { - c.logger.Infof("reason: %q", reason) + c.logger.Infof("reason: %s", reason) } } } func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isUpdate bool, reason string) { if isUpdate { - c.logger.Infof("%s service %q has been changed", + c.logger.Infof("%s service %s has been changed", role, util.NameFromMeta(old.ObjectMeta), ) } else { - c.logger.Infof("%s service %q is not in the desired state and needs to be updated", + c.logger.Infof("%s service %s is not in the desired state and needs to be updated", role, util.NameFromMeta(old.ObjectMeta), ) } - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old.Spec, new.Spec)) + + logNiceDiff(c.logger, old.Spec, new.Spec) if reason != "" { c.logger.Infof("reason: %s", reason) @@ -208,7 +229,7 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) { c.logger.Infof("volume specification has been changed") - c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old, new)) + logNiceDiff(c.logger, old, new) } func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index cc08f1587..3442bfcfe 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -1,9 +1,12 @@ package controller import ( + "bytes" "context" + "encoding/json" "fmt" "os" + "strings" "sync" "time" @@ -73,6 +76,10 @@ func NewController(controllerConfig *spec.ControllerConfig, controllerId string) logger := logrus.New() if controllerConfig.EnableJsonLogging { logger.SetFormatter(&logrus.JSONFormatter{}) + } else { + if os.Getenv("LOG_NOQUOTE") != "" { + logger.SetFormatter(&logrus.TextFormatter{PadLevelText: true, DisableQuote: true}) + } } var myComponentName = "postgres-operator" @@ -81,7 +88,10 @@ func NewController(controllerConfig *spec.ControllerConfig, controllerId string) } eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(logger.Infof) + + // disabling the sending of events also to the logoutput + // the operator currently duplicates a lot of log entries with this setup + // eventBroadcaster.StartLogging(logger.Infof) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: myComponentName}) c := &Controller{ @@ -190,10 +200,18 @@ func (c *Controller) warnOnDeprecatedOperatorParameters() { } } +func compactValue(v string) string { + var compact bytes.Buffer + if err := json.Compact(&compact, []byte(v)); err != nil { + panic("Hard coded json strings broken!") + } + return compact.String() +} + func (c *Controller) initPodServiceAccount() { if c.opConfig.PodServiceAccountDefinition == "" { - c.opConfig.PodServiceAccountDefinition = ` + stringValue := ` { "apiVersion": "v1", "kind": "ServiceAccount", @@ -201,6 +219,9 @@ func (c *Controller) initPodServiceAccount() { "name": "postgres-pod" } }` + + c.opConfig.PodServiceAccountDefinition = compactValue(stringValue) + } // re-uses k8s internal parsing. See k8s client-go issue #193 for explanation @@ -230,7 +251,7 @@ func (c *Controller) initRoleBinding() { // operator binds it to the cluster role with sufficient privileges // we assume the role is created by the k8s administrator if c.opConfig.PodServiceAccountRoleBindingDefinition == "" { - c.opConfig.PodServiceAccountRoleBindingDefinition = fmt.Sprintf(` + stringValue := fmt.Sprintf(` { "apiVersion": "rbac.authorization.k8s.io/v1", "kind": "RoleBinding", @@ -249,6 +270,7 @@ func (c *Controller) initRoleBinding() { } ] }`, c.PodServiceAccount.Name, c.PodServiceAccount.Name, c.PodServiceAccount.Name) + c.opConfig.PodServiceAccountRoleBindingDefinition = compactValue(stringValue) } c.logger.Info("Parse role bindings") // re-uses k8s internal parsing. See k8s client-go issue #193 for explanation @@ -267,7 +289,14 @@ func (c *Controller) initRoleBinding() { } - // actual roles bindings are deployed at the time of Postgres/Spilo cluster creation + // actual roles bindings ar*logrus.Entrye deployed at the time of Postgres/Spilo cluster creation +} + +func logMultiLineConfig(log *logrus.Entry, config string) { + lines := strings.Split(config, "\n") + for _, l := range lines { + log.Infof("%s", l) + } } func (c *Controller) initController() { @@ -301,7 +330,7 @@ func (c *Controller) initController() { c.logger.Logger.Level = logrus.DebugLevel } - c.logger.Infof("config: %s", c.opConfig.MustMarshal()) + logMultiLineConfig(c.logger, c.opConfig.MustMarshal()) roleDefs := c.getInfrastructureRoleDefinitions() if infraRoles, err := c.getInfrastructureRoles(roleDefs); err != nil { diff --git a/pkg/controller/node.go b/pkg/controller/node.go index 4ffe7e26c..2836b4f7f 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -42,7 +42,7 @@ func (c *Controller) nodeAdd(obj interface{}) { return } - c.logger.Debugf("new node has been added: %q (%s)", util.NameFromMeta(node.ObjectMeta), node.Spec.ProviderID) + c.logger.Debugf("new node has been added: %s (%s)", util.NameFromMeta(node.ObjectMeta), node.Spec.ProviderID) // check if the node became not ready while the operator was down (otherwise we would have caught it in nodeUpdate) if !c.nodeIsReady(node) { diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index c7074c7e4..23e9356e6 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -225,7 +225,7 @@ func (c *Controller) processEvent(event ClusterEvent) { switch event.EventType { case EventAdd: if clusterFound { - lg.Debugf("cluster already exists") + lg.Debugf("Recieved add event for existing cluster") return } diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 7a1ae8a41..35991248b 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -199,7 +199,7 @@ type Config struct { // MustMarshal marshals the config or panics func (c Config) MustMarshal() string { - b, err := json.MarshalIndent(c, "", "\t") + b, err := json.MarshalIndent(c, "", " ") if err != nil { panic(err) } diff --git a/pkg/util/nicediff/diff.go b/pkg/util/nicediff/diff.go new file mode 100644 index 000000000..e2793f2c7 --- /dev/null +++ b/pkg/util/nicediff/diff.go @@ -0,0 +1,191 @@ +// Copyright 2013 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package diff implements a linewise diff algorithm. +package nicediff + +import ( + "fmt" + "strings" +) + +// Chunk represents a piece of the diff. A chunk will not have both added and +// deleted lines. Equal lines are always after any added or deleted lines. +// A Chunk may or may not have any lines in it, especially for the first or last +// chunk in a computation. +type Chunk struct { + Added []string + Deleted []string + Equal []string +} + +func (c *Chunk) empty() bool { + return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0 +} + +// Diff returns a string containing a line-by-line unified diff of the linewise +// changes required to make A into B. Each line is prefixed with '+', '-', or +// ' ' to indicate if it should be added, removed, or is correct respectively. +func Diff(A, B string, skipEqual bool) string { + aLines := strings.Split(A, "\n") + bLines := strings.Split(B, "\n") + return Render(DiffChunks(aLines, bLines), skipEqual) +} + +// Render renders the slice of chunks into a representation that prefixes +// the lines with '+', '-', or ' ' depending on whether the line was added, +// removed, or equal (respectively). +func Render(chunks []Chunk, skipEqual bool) string { + buf := new(strings.Builder) + for _, c := range chunks { + for _, line := range c.Added { + fmt.Fprintf(buf, "+%s\n", line) + } + for _, line := range c.Deleted { + fmt.Fprintf(buf, "-%s\n", line) + } + if !skipEqual { + for _, line := range c.Equal { + fmt.Fprintf(buf, " %s\n", line) + } + } + } + return strings.TrimRight(buf.String(), "\n") +} + +// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm +// to compute the edits required from A to B and returns the +// edit chunks. +func DiffChunks(a, b []string) []Chunk { + // algorithm: http://www.xmailserver.org/diff2.pdf + + // We'll need these quantities a lot. + alen, blen := len(a), len(b) // M, N + + // At most, it will require len(a) deletions and len(b) additions + // to transform a into b. + maxPath := alen + blen // MAX + if maxPath == 0 { + // degenerate case: two empty lists are the same + return nil + } + + // Store the endpoint of the path for diagonals. + // We store only the a index, because the b index on any diagonal + // (which we know during the loop below) is aidx-diag. + // endpoint[maxPath] represents the 0 diagonal. + // + // Stated differently: + // endpoint[d] contains the aidx of a furthest reaching path in diagonal d + endpoint := make([]int, 2*maxPath+1) // V + + saved := make([][]int, 0, 8) // Vs + save := func() { + dup := make([]int, len(endpoint)) + copy(dup, endpoint) + saved = append(saved, dup) + } + + var editDistance int // D +dLoop: + for editDistance = 0; editDistance <= maxPath; editDistance++ { + // The 0 diag(onal) represents equality of a and b. Each diagonal to + // the left is numbered one lower, to the right is one higher, from + // -alen to +blen. Negative diagonals favor differences from a, + // positive diagonals favor differences from b. The edit distance to a + // diagonal d cannot be shorter than d itself. + // + // The iterations of this loop cover either odds or evens, but not both, + // If odd indices are inputs, even indices are outputs and vice versa. + for diag := -editDistance; diag <= editDistance; diag += 2 { // k + var aidx int // x + switch { + case diag == -editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath-editDistance+1] + 0 + case diag == editDistance: + // This is a new diagonal; copy from previous iter + aidx = endpoint[maxPath+editDistance-1] + 1 + case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]: + // diagonal d+1 was farther along, so use that + aidx = endpoint[maxPath+diag+1] + 0 + default: + // diagonal d-1 was farther (or the same), so use that + aidx = endpoint[maxPath+diag-1] + 1 + } + // On diagonal d, we can compute bidx from aidx. + bidx := aidx - diag // y + // See how far we can go on this diagonal before we find a difference. + for aidx < alen && bidx < blen && a[aidx] == b[bidx] { + aidx++ + bidx++ + } + // Store the end of the current edit chain. + endpoint[maxPath+diag] = aidx + // If we've found the end of both inputs, we're done! + if aidx >= alen && bidx >= blen { + save() // save the final path + break dLoop + } + } + save() // save the current path + } + if editDistance == 0 { + return nil + } + chunks := make([]Chunk, editDistance+1) + + x, y := alen, blen + for d := editDistance; d > 0; d-- { + endpoint := saved[d] + diag := x - y + insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1]) + + x1 := endpoint[maxPath+diag] + var x0, xM, kk int + if insert { + kk = diag + 1 + x0 = endpoint[maxPath+kk] + xM = x0 + } else { + kk = diag - 1 + x0 = endpoint[maxPath+kk] + xM = x0 + 1 + } + y0 := x0 - kk + + var c Chunk + if insert { + c.Added = b[y0:][:1] + } else { + c.Deleted = a[x0:][:1] + } + if xM < x1 { + c.Equal = a[xM:][:x1-xM] + } + + x, y = x0, y0 + chunks[d] = c + } + if x > 0 { + chunks[0].Equal = a[:x] + } + if chunks[0].empty() { + chunks = chunks[1:] + } + if len(chunks) == 0 { + return nil + } + return chunks +} diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index a9d25112b..ee5b1ac39 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -180,3 +180,13 @@ func TestIsSmallerQuantity(t *testing.T) { } } } + +/* +func TestNiceDiff(t *testing.T) { + o := "a\nb\nc\n" + n := "b\nd\n" + d := nicediff.Diff(o, n, true) + t.Log(d) + // t.Errorf("Lets see output") +} +*/ From d658b9672ea3c13c6e3c3a0fcc94820c46451a84 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Wed, 28 Oct 2020 10:40:10 +0100 Subject: [PATCH 10/12] PostgresTeam CRD for advanced team management (#1165) * PostgresTeamCRD for advanced team management * rework internal structure to be closer to CRD * superusers instead of admin * add more util functions and unit tests * fix initHumanUsers * check for superusers when creating normal teams * polishing and fixes * adding the essential missing pieces * add documentation and update rbac * reflect some feedback * reflect more feedback * fixing debug logs and raise QueueResyncPeriodTPR * add two more flags to disable CRD and its superuser support * fix chart * update go modules * move to client 1.19.3 and update codegen --- .../crds/operatorconfigurations.yaml | 4 + .../postgres-operator/crds/postgresteams.yaml | 67 ++++++ .../templates/clusterrole.yaml | 9 + charts/postgres-operator/values-crd.yaml | 5 + charts/postgres-operator/values.yaml | 7 +- docs/administrator.md | 9 +- docs/reference/operator_parameters.md | 14 +- docs/user.md | 61 ++++++ manifests/configmap.yaml | 2 + manifests/custom-team-membership.yaml | 13 ++ manifests/operator-service-account-rbac.yaml | 9 + manifests/operatorconfiguration.crd.yaml | 4 + ...gresql-operator-default-configuration.yaml | 2 + manifests/postgresteam.crd.yaml | 63 ++++++ pkg/apis/acid.zalan.do/v1/crds.go | 6 + .../v1/operator_configuration_type.go | 22 +- .../acid.zalan.do/v1/postgres_team_type.go | 33 +++ pkg/apis/acid.zalan.do/v1/register.go | 5 +- .../acid.zalan.do/v1/zz_generated.deepcopy.go | 126 ++++++++++++ pkg/cluster/cluster.go | 54 +++-- pkg/cluster/util.go | 27 ++- pkg/controller/controller.go | 46 ++++- pkg/controller/operator_config.go | 2 + pkg/controller/util.go | 33 +++ .../acid.zalan.do/v1/acid.zalan.do_client.go | 5 + .../v1/fake/fake_acid.zalan.do_client.go | 4 + .../v1/fake/fake_postgresteam.go | 136 ++++++++++++ .../acid.zalan.do/v1/generated_expansion.go | 2 + .../typed/acid.zalan.do/v1/postgresteam.go | 184 +++++++++++++++++ .../acid.zalan.do/v1/interface.go | 7 + .../acid.zalan.do/v1/postgresteam.go | 96 +++++++++ .../informers/externalversions/generic.go | 2 + .../acid.zalan.do/v1/expansion_generated.go | 8 + .../listers/acid.zalan.do/v1/postgresteam.go | 105 ++++++++++ pkg/teams/postgres_team.go | 118 +++++++++++ pkg/teams/postgres_team_test.go | 194 ++++++++++++++++++ pkg/util/config/config.go | 2 + pkg/util/util.go | 31 +++ pkg/util/util_test.go | 39 ++++ 39 files changed, 1509 insertions(+), 47 deletions(-) create mode 100644 charts/postgres-operator/crds/postgresteams.yaml create mode 100644 manifests/custom-team-membership.yaml create mode 100644 manifests/postgresteam.crd.yaml create mode 100644 pkg/apis/acid.zalan.do/v1/postgres_team_type.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go create mode 100644 pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go create mode 100644 pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go create mode 100644 pkg/generated/listers/acid.zalan.do/v1/postgresteam.go create mode 100644 pkg/teams/postgres_team.go create mode 100644 pkg/teams/postgres_team_test.go diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index 8b576822c..05b5090c2 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -319,6 +319,10 @@ spec: properties: enable_admin_role_for_users: type: boolean + enable_postgres_team_crd: + type: boolean + enable_postgres_team_crd_superusers: + type: boolean enable_team_superuser: type: boolean enable_teams_api: diff --git a/charts/postgres-operator/crds/postgresteams.yaml b/charts/postgres-operator/crds/postgresteams.yaml new file mode 100644 index 000000000..4f2e74034 --- /dev/null +++ b/charts/postgres-operator/crds/postgresteams.yaml @@ -0,0 +1,67 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: postgresteams.acid.zalan.do + labels: + app.kubernetes.io/name: postgres-operator + annotations: + "helm.sh/hook": crd-install +spec: + group: acid.zalan.do + names: + kind: PostgresTeam + listKind: PostgresTeamList + plural: postgresteams + singular: postgresteam + shortNames: + - pgteam + scope: Namespaced + subresources: + status: {} + version: v1 + validation: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - PostgresTeam + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + properties: + additionalSuperuserTeams: + type: object + description: "Map for teamId and associated additional superuser teams" + additionalProperties: + type: array + nullable: true + description: "List of teams to become Postgres superusers" + items: + type: string + additionalTeams: + type: object + description: "Map for teamId and associated additional teams" + additionalProperties: + type: array + nullable: true + description: "List of teams whose members will also be added to the Postgres cluster" + items: + type: string + additionalMembers: + type: object + description: "Map for teamId and associated additional users" + additionalProperties: + type: array + nullable: true + description: "List of users who will also be added to the Postgres cluster" + items: + type: string diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index bd34e803e..84da313d9 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -25,6 +25,15 @@ rules: - patch - update - watch +# operator only reads PostgresTeams +- apiGroups: + - acid.zalan.do + resources: + - postgresteams + verbs: + - get + - list + - watch # to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index ffa8b7f51..52892c22c 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -256,6 +256,11 @@ configTeamsApi: # team_admin_role will have the rights to grant roles coming from PG manifests # enable_admin_role_for_users: true + # operator watches for PostgresTeam CRs to assign additional teams and members to clusters + enable_postgres_team_crd: true + # toogle to create additional superuser teams from PostgresTeam CRs + # enable_postgres_team_crd_superusers: "false" + # toggle to grant superuser to team members created from the Teams API enable_team_superuser: false # toggles usage of the Teams API by the operator diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index d4acfe1aa..ba5c7458c 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.5.0 + tag: v1.5.0-61-ged2b3239-dirty pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -248,6 +248,11 @@ configTeamsApi: # team_admin_role will have the rights to grant roles coming from PG manifests # enable_admin_role_for_users: "true" + # operator watches for PostgresTeam CRs to assign additional teams and members to clusters + enable_postgres_team_crd: "true" + # toogle to create additional superuser teams from PostgresTeam CRs + # enable_postgres_team_crd_superusers: "false" + # toggle to grant superuser to team members created from the Teams API # enable_team_superuser: "false" diff --git a/docs/administrator.md b/docs/administrator.md index 1a1b5e8f9..5357ddb74 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -561,9 +561,12 @@ database. * **Human users** originate from the [Teams API](user.md#teams-api-roles) that returns a list of the team members given a team id. The operator differentiates between (a) product teams that own a particular Postgres cluster and are granted -admin rights to maintain it, and (b) Postgres superuser teams that get the -superuser access to all Postgres databases running in a K8s cluster for the -purposes of maintaining and troubleshooting. +admin rights to maintain it, (b) Postgres superuser teams that get superuser +access to all Postgres databases running in a K8s cluster for the purposes of +maintaining and troubleshooting, and (c) additional teams, superuser teams or +members associated with the owning team. The latter is managed via the +[PostgresTeam CRD](user.md#additional-teams-and-members-per-cluster). + ## Understanding rolling update of Spilo pods diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 465465432..bd12eb922 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -598,8 +598,8 @@ key. The default is `"log_statement:all"` * **enable_team_superuser** - whether to grant superuser to team members created from the Teams API. - The default is `false`. + whether to grant superuser to members of the cluster's owning team created + from the Teams API. The default is `false`. * **team_admin_role** role name to grant to team members created from the Teams API. The default is @@ -632,6 +632,16 @@ key. cluster to administer Postgres and maintain infrastructure built around it. The default is empty. +* **enable_postgres_team_crd** + toggle to make the operator watch for created or updated `PostgresTeam` CRDs + and create roles for specified additional teams and members. + The default is `true`. + +* **enable_postgres_team_crd_superusers** + in a `PostgresTeam` CRD additional superuser teams can assigned to teams that + own clusters. With this flag set to `false`, it will be ignored. + The default is `false`. + ## Logging and REST API Parameters affecting logging and REST API listener. In the CRD-based diff --git a/docs/user.md b/docs/user.md index 9a9e01b9a..db107dccb 100644 --- a/docs/user.md +++ b/docs/user.md @@ -269,6 +269,67 @@ to choose superusers, group roles, [PAM configuration](https://github.com/CyberD etc. An OAuth2 token can be passed to the Teams API via a secret. The name for this secret is configurable with the `oauth_token_secret_name` parameter. +### Additional teams and members per cluster + +Postgres clusters are associated with one team by providing the `teamID` in +the manifest. Additional superuser teams can be configured as mentioned in +the previous paragraph. However, this is a global setting. To assign +additional teams, superuser teams and single users to clusters of a given +team, use the [PostgresTeam CRD](../manifests/postgresteam.yaml). It provides +a simple mapping structure. + + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: custom-team-membership +spec: + additionalSuperuserTeams: + acid: + - "postgres_superusers" + additionalTeams: + acid: [] + additionalMembers: + acid: + - "elephant" +``` + +One `PostgresTeam` resource could contain mappings of multiple teams but you +can choose to create separate CRDs, alternatively. On each CRD creation or +update the operator will gather all mappings to create additional human users +in databases the next time they are synced. Additional teams are resolved +transitively, meaning you will also add users for their `additionalTeams` +or (not and) `additionalSuperuserTeams`. + +For each additional team the Teams API would be queried. Additional members +will be added either way. There can be "virtual teams" that do not exists in +your Teams API but users of associated teams as well as members will get +created. With `PostgresTeams` it's also easy to cover team name changes. Just +add the mapping between old and new team name and the rest can stay the same. + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: virtualteam-membership +spec: + additionalSuperuserTeams: + acid: + - "virtual_superusers" + virtual_superusers: + - "real_teamA" + - "real_teamB" + real_teamA: + - "real_teamA_renamed" + additionalTeams: + real_teamA: + - "real_teamA_renamed" + additionalMembers: + virtual_superusers: + - "foo" +``` + ## Prepared databases with roles and default privileges The `users` section in the manifests only allows for creating database roles diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 970f845bf..ce20dfa58 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -41,6 +41,8 @@ data: enable_master_load_balancer: "false" # enable_pod_antiaffinity: "false" # enable_pod_disruption_budget: "true" + # enable_postgres_team_crd: "true" + # enable_postgres_team_crd_superusers: "false" enable_replica_load_balancer: "false" # enable_shm_volume: "true" # enable_sidecars: "true" diff --git a/manifests/custom-team-membership.yaml b/manifests/custom-team-membership.yaml new file mode 100644 index 000000000..9af153962 --- /dev/null +++ b/manifests/custom-team-membership.yaml @@ -0,0 +1,13 @@ +apiVersion: "acid.zalan.do/v1" +kind: PostgresTeam +metadata: + name: custom-team-membership +spec: + additionalSuperuserTeams: + acid: + - "postgres_superusers" + additionalTeams: + acid: [] + additionalMembers: + acid: + - "elephant" diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index 266df30c5..15ed7f53b 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -26,6 +26,15 @@ rules: - patch - update - watch +# operator only reads PostgresTeams +- apiGroups: + - acid.zalan.do + resources: + - postgresteams + verbs: + - get + - list + - watch # to create or get/update CRDs when starting up - apiGroups: - apiextensions.k8s.io diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 515f87438..d0f020f52 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -325,6 +325,10 @@ spec: properties: enable_admin_role_for_users: type: boolean + enable_postgres_team_crd: + type: boolean + enable_postgres_team_crd_superusers: + type: boolean enable_team_superuser: type: boolean enable_teams_api: diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 5fb77bf76..71408ac43 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -122,6 +122,8 @@ configuration: enable_database_access: true teams_api: # enable_admin_role_for_users: true + # enable_postgres_team_crd: true + # enable_postgres_team_crd_superusers: false enable_team_superuser: false enable_teams_api: false # pam_configuration: "" diff --git a/manifests/postgresteam.crd.yaml b/manifests/postgresteam.crd.yaml new file mode 100644 index 000000000..5f55bdfcb --- /dev/null +++ b/manifests/postgresteam.crd.yaml @@ -0,0 +1,63 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: postgresteams.acid.zalan.do +spec: + group: acid.zalan.do + names: + kind: PostgresTeam + listKind: PostgresTeamList + plural: postgresteams + singular: postgresteam + shortNames: + - pgteam + scope: Namespaced + subresources: + status: {} + version: v1 + validation: + openAPIV3Schema: + type: object + required: + - kind + - apiVersion + - spec + properties: + kind: + type: string + enum: + - PostgresTeam + apiVersion: + type: string + enum: + - acid.zalan.do/v1 + spec: + type: object + properties: + additionalSuperuserTeams: + type: object + description: "Map for teamId and associated additional superuser teams" + additionalProperties: + type: array + nullable: true + description: "List of teams to become Postgres superusers" + items: + type: string + additionalTeams: + type: object + description: "Map for teamId and associated additional teams" + additionalProperties: + type: array + nullable: true + description: "List of teams whose members will also be added to the Postgres cluster" + items: + type: string + additionalMembers: + type: object + description: "Map for teamId and associated additional users" + additionalProperties: + type: array + nullable: true + description: "List of users who will also be added to the Postgres cluster" + items: + type: string diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index a7d9bccf0..0dca0c94b 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -1235,6 +1235,12 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation "enable_admin_role_for_users": { Type: "boolean", }, + "enable_postgres_team_crd": { + Type: "boolean", + }, + "enable_postgres_team_crd_superusers": { + Type: "boolean", + }, "enable_team_superuser": { Type: "boolean", }, diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 179b7e751..9dae0089b 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -135,16 +135,18 @@ type OperatorDebugConfiguration struct { // TeamsAPIConfiguration defines the configuration of TeamsAPI type TeamsAPIConfiguration struct { - EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` - TeamsAPIUrl string `json:"teams_api_url,omitempty"` - TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` - EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` - EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"` - TeamAdminRole string `json:"team_admin_role,omitempty"` - PamRoleName string `json:"pam_role_name,omitempty"` - PamConfiguration string `json:"pam_configuration,omitempty"` - ProtectedRoles []string `json:"protected_role_names,omitempty"` - PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` + EnableTeamsAPI bool `json:"enable_teams_api,omitempty"` + TeamsAPIUrl string `json:"teams_api_url,omitempty"` + TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"` + EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"` + EnableAdminRoleForUsers bool `json:"enable_admin_role_for_users,omitempty"` + TeamAdminRole string `json:"team_admin_role,omitempty"` + PamRoleName string `json:"pam_role_name,omitempty"` + PamConfiguration string `json:"pam_configuration,omitempty"` + ProtectedRoles []string `json:"protected_role_names,omitempty"` + PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` + EnablePostgresTeamCRD *bool `json:"enable_postgres_team_crd,omitempty"` + EnablePostgresTeamCRDSuperusers bool `json:"enable_postgres_team_crd_superusers,omitempty"` } // LoggingRESTAPIConfiguration defines Logging API conf diff --git a/pkg/apis/acid.zalan.do/v1/postgres_team_type.go b/pkg/apis/acid.zalan.do/v1/postgres_team_type.go new file mode 100644 index 000000000..5697c193e --- /dev/null +++ b/pkg/apis/acid.zalan.do/v1/postgres_team_type.go @@ -0,0 +1,33 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PostgresTeam defines Custom Resource Definition Object for team management. +type PostgresTeam struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresTeamSpec `json:"spec"` +} + +// PostgresTeamSpec defines the specification for the PostgresTeam TPR. +type PostgresTeamSpec struct { + AdditionalSuperuserTeams map[string][]string `json:"additionalSuperuserTeams,omitempty"` + AdditionalTeams map[string][]string `json:"additionalTeams,omitempty"` + AdditionalMembers map[string][]string `json:"additionalMembers,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PostgresTeamList defines a list of PostgresTeam definitions. +type PostgresTeamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []PostgresTeam `json:"items"` +} diff --git a/pkg/apis/acid.zalan.do/v1/register.go b/pkg/apis/acid.zalan.do/v1/register.go index 1c30e35fb..9dcbf2baf 100644 --- a/pkg/apis/acid.zalan.do/v1/register.go +++ b/pkg/apis/acid.zalan.do/v1/register.go @@ -1,11 +1,10 @@ package v1 import ( + acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" ) // APIVersion of the `postgresql` and `operator` CRDs @@ -44,6 +43,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { // TODO: User uppercase CRDResourceKind of our types in the next major API version scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresql"), &Postgresql{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresqlList"), &PostgresqlList{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeam"), &PostgresTeam{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("PostgresTeamList"), &PostgresTeamList{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfiguration"), &OperatorConfiguration{}) scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfigurationList"), diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 34e6b46e8..80a00f491 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -711,6 +711,127 @@ func (in *PostgresStatus) DeepCopy() *PostgresStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeam) DeepCopyInto(out *PostgresTeam) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeam. +func (in *PostgresTeam) DeepCopy() *PostgresTeam { + if in == nil { + return nil + } + out := new(PostgresTeam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresTeam) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeamList) DeepCopyInto(out *PostgresTeamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresTeam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamList. +func (in *PostgresTeamList) DeepCopy() *PostgresTeamList { + if in == nil { + return nil + } + out := new(PostgresTeamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresTeamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresTeamSpec) DeepCopyInto(out *PostgresTeamSpec) { + *out = *in + if in.AdditionalSuperuserTeams != nil { + in, out := &in.AdditionalSuperuserTeams, &out.AdditionalSuperuserTeams + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AdditionalTeams != nil { + in, out := &in.AdditionalTeams, &out.AdditionalTeams + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AdditionalMembers != nil { + in, out := &in.AdditionalMembers, &out.AdditionalMembers + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresTeamSpec. +func (in *PostgresTeamSpec) DeepCopy() *PostgresTeamSpec { + if in == nil { + return nil + } + out := new(PostgresTeamSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) { *out = *in @@ -993,6 +1114,11 @@ func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.EnablePostgresTeamCRD != nil { + in, out := &in.EnablePostgresTeamCRD, &out.EnablePostgresTeamCRD + *out = new(bool) + **out = **in + } return } diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 8636083c2..06388d731 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -14,19 +14,10 @@ import ( "github.com/r3labs/diff" "github.com/sirupsen/logrus" - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - policybeta1 "k8s.io/api/policy/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" "github.com/zalando/postgres-operator/pkg/spec" + pgteams "github.com/zalando/postgres-operator/pkg/teams" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" @@ -34,7 +25,16 @@ import ( "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/users" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + policybeta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" ) var ( @@ -48,6 +48,7 @@ var ( type Config struct { OpConfig config.Config RestConfig *rest.Config + PgTeamMap pgteams.PostgresTeamMap InfrastructureRoles map[string]spec.PgUser // inherited from the controller PodServiceAccount *v1.ServiceAccount PodServiceAccountRoleBinding *rbacv1.RoleBinding @@ -1107,7 +1108,7 @@ func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) e if c.shouldAvoidProtectedOrSystemRole(username, "API role") { continue } - if c.OpConfig.EnableTeamSuperuser || isPostgresSuperuserTeam { + if (c.OpConfig.EnableTeamSuperuser && teamID == c.Spec.TeamID) || isPostgresSuperuserTeam { flags = append(flags, constants.RoleFlagSuperuser) } else { if c.OpConfig.TeamAdminRole != "" { @@ -1136,17 +1137,38 @@ func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) e func (c *Cluster) initHumanUsers() error { var clusterIsOwnedBySuperuserTeam bool + superuserTeams := []string{} + + if c.OpConfig.EnablePostgresTeamCRDSuperusers { + superuserTeams = c.PgTeamMap.GetAdditionalSuperuserTeams(c.Spec.TeamID, true) + } for _, postgresSuperuserTeam := range c.OpConfig.PostgresSuperuserTeams { - err := c.initTeamMembers(postgresSuperuserTeam, true) - if err != nil { - return fmt.Errorf("Cannot create a team %q of Postgres superusers: %v", postgresSuperuserTeam, err) + if !(util.SliceContains(superuserTeams, postgresSuperuserTeam)) { + superuserTeams = append(superuserTeams, postgresSuperuserTeam) } - if postgresSuperuserTeam == c.Spec.TeamID { + } + + for _, superuserTeam := range superuserTeams { + err := c.initTeamMembers(superuserTeam, true) + if err != nil { + return fmt.Errorf("Cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err) + } + if superuserTeam == c.Spec.TeamID { clusterIsOwnedBySuperuserTeam = true } } + additionalTeams := c.PgTeamMap.GetAdditionalTeams(c.Spec.TeamID, true) + for _, additionalTeam := range additionalTeams { + if !(util.SliceContains(superuserTeams, additionalTeam)) { + err := c.initTeamMembers(additionalTeam, false) + if err != nil { + return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err) + } + } + } + if clusterIsOwnedBySuperuserTeam { c.logger.Infof("Team %q owning the cluster is also a team of superusers. Created superuser roles for its members instead of admin roles.", c.Spec.TeamID) return nil @@ -1154,7 +1176,7 @@ func (c *Cluster) initHumanUsers() error { err := c.initTeamMembers(c.Spec.TeamID, false) if err != nil { - return fmt.Errorf("Cannot create a team %q of admins owning the PG cluster: %v", c.Spec.TeamID, err) + return fmt.Errorf("Cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err) } return nil diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index d227ce155..b8ddb7087 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -238,24 +238,37 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { return nil, fmt.Errorf("no teamId specified") } + c.logger.Debugf("fetching possible additional team members for team %q", teamID) + members := []string{} + additionalMembers := c.PgTeamMap[c.Spec.TeamID].AdditionalMembers + for _, member := range additionalMembers { + members = append(members, member) + } + if !c.OpConfig.EnableTeamsAPI { - c.logger.Debugf("team API is disabled, returning empty list of members for team %q", teamID) - return []string{}, nil + c.logger.Debugf("team API is disabled, only returning %d members for team %q", len(members), teamID) + return members, nil } token, err := c.oauthTokenGetter.getOAuthToken() if err != nil { - c.logger.Warnf("could not get oauth token to authenticate to team service API, returning empty list of team members: %v", err) - return []string{}, nil + c.logger.Warnf("could not get oauth token to authenticate to team service API, only returning %d members for team %q: %v", len(members), teamID, err) + return members, nil } teamInfo, err := c.teamsAPIClient.TeamInfo(teamID, token) if err != nil { - c.logger.Warnf("could not get team info for team %q, returning empty list of team members: %v", teamID, err) - return []string{}, nil + c.logger.Warnf("could not get team info for team %q, only returning %d members: %v", teamID, len(members), err) + return members, nil } - return teamInfo.Members, nil + for _, member := range teamInfo.Members { + if !(util.SliceContains(members, member)) { + members = append(members, member) + } + } + + return members, nil } func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) { diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 3442bfcfe..2169beb76 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -16,6 +16,7 @@ import ( "github.com/zalando/postgres-operator/pkg/cluster" acidv1informer "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/teams" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" @@ -34,8 +35,9 @@ import ( // Controller represents operator controller type Controller struct { - config spec.ControllerConfig - opConfig *config.Config + config spec.ControllerConfig + opConfig *config.Config + pgTeamMap teams.PostgresTeamMap logger *logrus.Entry KubeClient k8sutil.KubernetesClient @@ -56,10 +58,11 @@ type Controller struct { clusterHistory map[spec.NamespacedName]ringlog.RingLogger // history of the cluster changes teamClusters map[string][]spec.NamespacedName - postgresqlInformer cache.SharedIndexInformer - podInformer cache.SharedIndexInformer - nodesInformer cache.SharedIndexInformer - podCh chan cluster.PodEvent + postgresqlInformer cache.SharedIndexInformer + postgresTeamInformer cache.SharedIndexInformer + podInformer cache.SharedIndexInformer + nodesInformer cache.SharedIndexInformer + podCh chan cluster.PodEvent clusterEventQueues []*cache.FIFO // [workerID]Queue lastClusterSyncTime int64 @@ -326,6 +329,12 @@ func (c *Controller) initController() { c.initSharedInformers() + if c.opConfig.EnablePostgresTeamCRD != nil && *c.opConfig.EnablePostgresTeamCRD { + c.loadPostgresTeams() + } else { + c.pgTeamMap = teams.PostgresTeamMap{} + } + if c.opConfig.DebugLogging { c.logger.Logger.Level = logrus.DebugLevel } @@ -357,6 +366,7 @@ func (c *Controller) initController() { func (c *Controller) initSharedInformers() { + // Postgresqls c.postgresqlInformer = acidv1informer.NewPostgresqlInformer( c.KubeClient.AcidV1ClientSet, c.opConfig.WatchedNamespace, @@ -369,6 +379,20 @@ func (c *Controller) initSharedInformers() { DeleteFunc: c.postgresqlDelete, }) + // PostgresTeams + if c.opConfig.EnablePostgresTeamCRD != nil && *c.opConfig.EnablePostgresTeamCRD { + c.postgresTeamInformer = acidv1informer.NewPostgresTeamInformer( + c.KubeClient.AcidV1ClientSet, + c.opConfig.WatchedNamespace, + constants.QueueResyncPeriodTPR*6, // 30 min + cache.Indexers{}) + + c.postgresTeamInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.postgresTeamAdd, + UpdateFunc: c.postgresTeamUpdate, + }) + } + // Pods podLw := &cache.ListWatch{ ListFunc: c.podListFunc, @@ -429,6 +453,10 @@ func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { go c.apiserver.Run(stopCh, wg) go c.kubeNodesInformer(stopCh, wg) + if c.opConfig.EnablePostgresTeamCRD != nil && *c.opConfig.EnablePostgresTeamCRD { + go c.runPostgresTeamInformer(stopCh, wg) + } + c.logger.Info("started working in background") } @@ -444,6 +472,12 @@ func (c *Controller) runPostgresqlInformer(stopCh <-chan struct{}, wg *sync.Wait c.postgresqlInformer.Run(stopCh) } +func (c *Controller) runPostgresTeamInformer(stopCh <-chan struct{}, wg *sync.WaitGroup) { + defer wg.Done() + + c.postgresTeamInformer.Run(stopCh) +} + func queueClusterKey(eventType EventType, uid types.UID) string { return fmt.Sprintf("%s-%s", eventType, uid) } diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 7e4880712..3ad09ad28 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -163,6 +163,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.PamConfiguration = util.Coalesce(fromCRD.TeamsAPI.PamConfiguration, "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees") result.ProtectedRoles = util.CoalesceStrArr(fromCRD.TeamsAPI.ProtectedRoles, []string{"admin"}) result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams + result.EnablePostgresTeamCRD = util.CoalesceBool(fromCRD.TeamsAPI.EnablePostgresTeamCRD, util.True()) + result.EnablePostgresTeamCRDSuperusers = fromCRD.TeamsAPI.EnablePostgresTeamCRDSuperusers // logging REST API config result.APIPort = util.CoalesceInt(fromCRD.LoggingRESTAPI.APIPort, 8080) diff --git a/pkg/controller/util.go b/pkg/controller/util.go index e460db2a5..57196d371 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -15,6 +15,7 @@ import ( acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/cluster" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/teams" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/k8sutil" @@ -30,6 +31,7 @@ func (c *Controller) makeClusterConfig() cluster.Config { return cluster.Config{ RestConfig: c.config.RestConfig, OpConfig: config.Copy(c.opConfig), + PgTeamMap: c.pgTeamMap, InfrastructureRoles: infrastructureRoles, PodServiceAccount: c.PodServiceAccount, } @@ -394,6 +396,37 @@ func (c *Controller) getInfrastructureRole( return roles, nil } +func (c *Controller) loadPostgresTeams() { + // reset team map + c.pgTeamMap = teams.PostgresTeamMap{} + + pgTeams, err := c.KubeClient.AcidV1ClientSet.AcidV1().PostgresTeams(c.opConfig.WatchedNamespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + c.logger.Errorf("could not list postgres team objects: %v", err) + } + + c.pgTeamMap.Load(pgTeams) + c.logger.Debugf("Internal Postgres Team Cache: %#v", c.pgTeamMap) +} + +func (c *Controller) postgresTeamAdd(obj interface{}) { + pgTeam, ok := obj.(*acidv1.PostgresTeam) + if !ok { + c.logger.Errorf("could not cast to PostgresTeam spec") + } + c.logger.Debugf("PostgreTeam %q added. Reloading postgres team CRDs and overwriting cached map", pgTeam.Name) + c.loadPostgresTeams() +} + +func (c *Controller) postgresTeamUpdate(prev, obj interface{}) { + pgTeam, ok := obj.(*acidv1.PostgresTeam) + if !ok { + c.logger.Errorf("could not cast to PostgresTeam spec") + } + c.logger.Debugf("PostgreTeam %q updated. Reloading postgres team CRDs and overwriting cached map", pgTeam.Name) + c.loadPostgresTeams() +} + func (c *Controller) podClusterName(pod *v1.Pod) spec.NamespacedName { if name, ok := pod.Labels[c.opConfig.ClusterNameLabel]; ok { return spec.NamespacedName{ diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go index 1879b9514..e48e2d2a7 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go @@ -33,6 +33,7 @@ import ( type AcidV1Interface interface { RESTClient() rest.Interface OperatorConfigurationsGetter + PostgresTeamsGetter PostgresqlsGetter } @@ -45,6 +46,10 @@ func (c *AcidV1Client) OperatorConfigurations(namespace string) OperatorConfigur return newOperatorConfigurations(c, namespace) } +func (c *AcidV1Client) PostgresTeams(namespace string) PostgresTeamInterface { + return newPostgresTeams(c, namespace) +} + func (c *AcidV1Client) Postgresqls(namespace string) PostgresqlInterface { return newPostgresqls(c, namespace) } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go index 8cd4dc9da..9e31f5192 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go @@ -38,6 +38,10 @@ func (c *FakeAcidV1) OperatorConfigurations(namespace string) v1.OperatorConfigu return &FakeOperatorConfigurations{c, namespace} } +func (c *FakeAcidV1) PostgresTeams(namespace string) v1.PostgresTeamInterface { + return &FakePostgresTeams{c, namespace} +} + func (c *FakeAcidV1) Postgresqls(namespace string) v1.PostgresqlInterface { return &FakePostgresqls{c, namespace} } diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go new file mode 100644 index 000000000..20c8ec809 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go @@ -0,0 +1,136 @@ +/* +Copyright 2020 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePostgresTeams implements PostgresTeamInterface +type FakePostgresTeams struct { + Fake *FakeAcidV1 + ns string +} + +var postgresteamsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "postgresteams"} + +var postgresteamsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "PostgresTeam"} + +// Get takes name of the postgresTeam, and returns the corresponding postgresTeam object, and an error if there is any. +func (c *FakePostgresTeams) Get(ctx context.Context, name string, options v1.GetOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(postgresteamsResource, c.ns, name), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// List takes label and field selectors, and returns the list of PostgresTeams that match those selectors. +func (c *FakePostgresTeams) List(ctx context.Context, opts v1.ListOptions) (result *acidzalandov1.PostgresTeamList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(postgresteamsResource, postgresteamsKind, c.ns, opts), &acidzalandov1.PostgresTeamList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &acidzalandov1.PostgresTeamList{ListMeta: obj.(*acidzalandov1.PostgresTeamList).ListMeta} + for _, item := range obj.(*acidzalandov1.PostgresTeamList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested postgresTeams. +func (c *FakePostgresTeams) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(postgresteamsResource, c.ns, opts)) + +} + +// Create takes the representation of a postgresTeam and creates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *FakePostgresTeams) Create(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts v1.CreateOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(postgresteamsResource, c.ns, postgresTeam), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// Update takes the representation of a postgresTeam and updates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *FakePostgresTeams) Update(ctx context.Context, postgresTeam *acidzalandov1.PostgresTeam, opts v1.UpdateOptions) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(postgresteamsResource, c.ns, postgresTeam), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} + +// Delete takes name of the postgresTeam and deletes it. Returns an error if one occurs. +func (c *FakePostgresTeams) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(postgresteamsResource, c.ns, name), &acidzalandov1.PostgresTeam{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePostgresTeams) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(postgresteamsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &acidzalandov1.PostgresTeamList{}) + return err +} + +// Patch applies the patch and returns the patched postgresTeam. +func (c *FakePostgresTeams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *acidzalandov1.PostgresTeam, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(postgresteamsResource, c.ns, name, pt, data, subresources...), &acidzalandov1.PostgresTeam{}) + + if obj == nil { + return nil, err + } + return obj.(*acidzalandov1.PostgresTeam), err +} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go index fd5707c75..bcd80f922 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go @@ -26,4 +26,6 @@ package v1 type OperatorConfigurationExpansion interface{} +type PostgresTeamExpansion interface{} + type PostgresqlExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..82157dceb --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,184 @@ +/* +Copyright 2020 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + scheme "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PostgresTeamsGetter has a method to return a PostgresTeamInterface. +// A group's client should implement this interface. +type PostgresTeamsGetter interface { + PostgresTeams(namespace string) PostgresTeamInterface +} + +// PostgresTeamInterface has methods to work with PostgresTeam resources. +type PostgresTeamInterface interface { + Create(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.CreateOptions) (*v1.PostgresTeam, error) + Update(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.UpdateOptions) (*v1.PostgresTeam, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PostgresTeam, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PostgresTeamList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PostgresTeam, err error) + PostgresTeamExpansion +} + +// postgresTeams implements PostgresTeamInterface +type postgresTeams struct { + client rest.Interface + ns string +} + +// newPostgresTeams returns a PostgresTeams +func newPostgresTeams(c *AcidV1Client, namespace string) *postgresTeams { + return &postgresTeams{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the postgresTeam, and returns the corresponding postgresTeam object, and an error if there is any. +func (c *postgresTeams) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PostgresTeams that match those selectors. +func (c *postgresTeams) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PostgresTeamList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PostgresTeamList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested postgresTeams. +func (c *postgresTeams) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a postgresTeam and creates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *postgresTeams) Create(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.CreateOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Post(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(postgresTeam). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a postgresTeam and updates it. Returns the server's representation of the postgresTeam, and an error, if there is any. +func (c *postgresTeams) Update(ctx context.Context, postgresTeam *v1.PostgresTeam, opts metav1.UpdateOptions) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Put(). + Namespace(c.ns). + Resource("postgresteams"). + Name(postgresTeam.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(postgresTeam). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the postgresTeam and deletes it. Returns an error if one occurs. +func (c *postgresTeams) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *postgresTeams) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("postgresteams"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched postgresTeam. +func (c *postgresTeams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PostgresTeam, err error) { + result = &v1.PostgresTeam{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("postgresteams"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go index 30090afee..b83d4d0f0 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go @@ -30,6 +30,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // PostgresTeams returns a PostgresTeamInformer. + PostgresTeams() PostgresTeamInformer // Postgresqls returns a PostgresqlInformer. Postgresqls() PostgresqlInformer } @@ -45,6 +47,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// PostgresTeams returns a PostgresTeamInformer. +func (v *version) PostgresTeams() PostgresTeamInformer { + return &postgresTeamInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Postgresqls returns a PostgresqlInformer. func (v *version) Postgresqls() PostgresqlInformer { return &postgresqlInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..7ae532cbd --- /dev/null +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,96 @@ +/* +Copyright 2020 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + acidzalandov1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + versioned "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" + internalinterfaces "github.com/zalando/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/zalando/postgres-operator/pkg/generated/listers/acid.zalan.do/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PostgresTeamInformer provides access to a shared informer and lister for +// PostgresTeams. +type PostgresTeamInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PostgresTeamLister +} + +type postgresTeamInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPostgresTeamInformer constructs a new informer for PostgresTeam type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPostgresTeamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPostgresTeamInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPostgresTeamInformer constructs a new informer for PostgresTeam type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPostgresTeamInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcidV1().PostgresTeams(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AcidV1().PostgresTeams(namespace).Watch(context.TODO(), options) + }, + }, + &acidzalandov1.PostgresTeam{}, + resyncPeriod, + indexers, + ) +} + +func (f *postgresTeamInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPostgresTeamInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *postgresTeamInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&acidzalandov1.PostgresTeam{}, f.defaultInformer) +} + +func (f *postgresTeamInformer) Lister() v1.PostgresTeamLister { + return v1.NewPostgresTeamLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 562dec419..7dff3e4e5 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -59,6 +59,8 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=acid.zalan.do, Version=v1 + case v1.SchemeGroupVersion.WithResource("postgresteams"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Acid().V1().PostgresTeams().Informer()}, nil case v1.SchemeGroupVersion.WithResource("postgresqls"): return &genericInformer{resource: resource.GroupResource(), informer: f.Acid().V1().Postgresqls().Informer()}, nil diff --git a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go index 1b96a7c76..81e829926 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go +++ b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go @@ -24,6 +24,14 @@ SOFTWARE. package v1 +// PostgresTeamListerExpansion allows custom methods to be added to +// PostgresTeamLister. +type PostgresTeamListerExpansion interface{} + +// PostgresTeamNamespaceListerExpansion allows custom methods to be added to +// PostgresTeamNamespaceLister. +type PostgresTeamNamespaceListerExpansion interface{} + // PostgresqlListerExpansion allows custom methods to be added to // PostgresqlLister. type PostgresqlListerExpansion interface{} diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go b/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go new file mode 100644 index 000000000..102dae832 --- /dev/null +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 Compose, Zalando SE + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PostgresTeamLister helps list PostgresTeams. +// All objects returned here must be treated as read-only. +type PostgresTeamLister interface { + // List lists all PostgresTeams in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) + // PostgresTeams returns an object that can list and get PostgresTeams. + PostgresTeams(namespace string) PostgresTeamNamespaceLister + PostgresTeamListerExpansion +} + +// postgresTeamLister implements the PostgresTeamLister interface. +type postgresTeamLister struct { + indexer cache.Indexer +} + +// NewPostgresTeamLister returns a new PostgresTeamLister. +func NewPostgresTeamLister(indexer cache.Indexer) PostgresTeamLister { + return &postgresTeamLister{indexer: indexer} +} + +// List lists all PostgresTeams in the indexer. +func (s *postgresTeamLister) List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PostgresTeam)) + }) + return ret, err +} + +// PostgresTeams returns an object that can list and get PostgresTeams. +func (s *postgresTeamLister) PostgresTeams(namespace string) PostgresTeamNamespaceLister { + return postgresTeamNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PostgresTeamNamespaceLister helps list and get PostgresTeams. +// All objects returned here must be treated as read-only. +type PostgresTeamNamespaceLister interface { + // List lists all PostgresTeams in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) + // Get retrieves the PostgresTeam from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PostgresTeam, error) + PostgresTeamNamespaceListerExpansion +} + +// postgresTeamNamespaceLister implements the PostgresTeamNamespaceLister +// interface. +type postgresTeamNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PostgresTeams in the indexer for a given namespace. +func (s postgresTeamNamespaceLister) List(selector labels.Selector) (ret []*v1.PostgresTeam, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PostgresTeam)) + }) + return ret, err +} + +// Get retrieves the PostgresTeam from the indexer for a given namespace and name. +func (s postgresTeamNamespaceLister) Get(name string) (*v1.PostgresTeam, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("postgresteam"), name) + } + return obj.(*v1.PostgresTeam), nil +} diff --git a/pkg/teams/postgres_team.go b/pkg/teams/postgres_team.go new file mode 100644 index 000000000..7fb725765 --- /dev/null +++ b/pkg/teams/postgres_team.go @@ -0,0 +1,118 @@ +package teams + +import ( + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util" +) + +// PostgresTeamMap is the operator's internal representation of all PostgresTeam CRDs +type PostgresTeamMap map[string]postgresTeamMembership + +type postgresTeamMembership struct { + AdditionalSuperuserTeams []string + AdditionalTeams []string + AdditionalMembers []string +} + +type teamHashSet map[string]map[string]struct{} + +func (ths *teamHashSet) has(team string) bool { + _, ok := (*ths)[team] + return ok +} + +func (ths *teamHashSet) add(newTeam string, newSet []string) { + set := make(map[string]struct{}) + if ths.has(newTeam) { + set = (*ths)[newTeam] + } + for _, t := range newSet { + set[t] = struct{}{} + } + (*ths)[newTeam] = set +} + +func (ths *teamHashSet) toMap() map[string][]string { + newTeamMap := make(map[string][]string) + for team, items := range *ths { + list := []string{} + for item := range items { + list = append(list, item) + } + newTeamMap[team] = list + } + return newTeamMap +} + +func (ths *teamHashSet) mergeCrdMap(crdTeamMap map[string][]string) { + for t, at := range crdTeamMap { + ths.add(t, at) + } +} + +func fetchTeams(teamset *map[string]struct{}, set teamHashSet) { + for key := range set { + (*teamset)[key] = struct{}{} + } +} + +func (ptm *PostgresTeamMap) fetchAdditionalTeams(team string, superuserTeams bool, transitive bool, exclude []string) []string { + + var teams []string + + if superuserTeams { + teams = (*ptm)[team].AdditionalSuperuserTeams + } else { + teams = (*ptm)[team].AdditionalTeams + } + if transitive { + exclude = append(exclude, team) + for _, additionalTeam := range teams { + if !(util.SliceContains(exclude, additionalTeam)) { + transitiveTeams := (*ptm).fetchAdditionalTeams(additionalTeam, superuserTeams, transitive, exclude) + for _, transitiveTeam := range transitiveTeams { + if !(util.SliceContains(exclude, transitiveTeam)) && !(util.SliceContains(teams, transitiveTeam)) { + teams = append(teams, transitiveTeam) + } + } + } + } + } + + return teams +} + +// GetAdditionalTeams function to retrieve list of additional teams +func (ptm *PostgresTeamMap) GetAdditionalTeams(team string, transitive bool) []string { + return ptm.fetchAdditionalTeams(team, false, transitive, []string{}) +} + +// GetAdditionalSuperuserTeams function to retrieve list of additional superuser teams +func (ptm *PostgresTeamMap) GetAdditionalSuperuserTeams(team string, transitive bool) []string { + return ptm.fetchAdditionalTeams(team, true, transitive, []string{}) +} + +// Load function to import data from PostgresTeam CRD +func (ptm *PostgresTeamMap) Load(pgTeams *acidv1.PostgresTeamList) { + superuserTeamSet := teamHashSet{} + teamSet := teamHashSet{} + teamMemberSet := teamHashSet{} + teamIDs := make(map[string]struct{}) + + for _, pgTeam := range pgTeams.Items { + superuserTeamSet.mergeCrdMap(pgTeam.Spec.AdditionalSuperuserTeams) + teamSet.mergeCrdMap(pgTeam.Spec.AdditionalTeams) + teamMemberSet.mergeCrdMap(pgTeam.Spec.AdditionalMembers) + } + fetchTeams(&teamIDs, superuserTeamSet) + fetchTeams(&teamIDs, teamSet) + fetchTeams(&teamIDs, teamMemberSet) + + for teamID := range teamIDs { + (*ptm)[teamID] = postgresTeamMembership{ + AdditionalSuperuserTeams: util.CoalesceStrArr(superuserTeamSet.toMap()[teamID], []string{}), + AdditionalTeams: util.CoalesceStrArr(teamSet.toMap()[teamID], []string{}), + AdditionalMembers: util.CoalesceStrArr(teamMemberSet.toMap()[teamID], []string{}), + } + } +} diff --git a/pkg/teams/postgres_team_test.go b/pkg/teams/postgres_team_test.go new file mode 100644 index 000000000..f8c3a21d8 --- /dev/null +++ b/pkg/teams/postgres_team_test.go @@ -0,0 +1,194 @@ +package teams + +import ( + "testing" + + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + "github.com/zalando/postgres-operator/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + True = true + False = false + pgTeamList = acidv1.PostgresTeamList{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + Items: []acidv1.PostgresTeam{ + { + TypeMeta: metav1.TypeMeta{ + Kind: "PostgresTeam", + APIVersion: "acid.zalan.do/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "teamAB", + }, + Spec: acidv1.PostgresTeamSpec{ + AdditionalSuperuserTeams: map[string][]string{"teamA": []string{"teamB", "team24x7"}, "teamB": []string{"teamA", "teamC", "team24x7"}}, + AdditionalTeams: map[string][]string{"teamA": []string{"teamC"}, "teamB": []string{}}, + AdditionalMembers: map[string][]string{"team24x7": []string{"optimusprime"}, "teamB": []string{"drno"}}, + }, + }, { + TypeMeta: metav1.TypeMeta{ + Kind: "PostgresTeam", + APIVersion: "acid.zalan.do/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "teamC", + }, + Spec: acidv1.PostgresTeamSpec{ + AdditionalSuperuserTeams: map[string][]string{"teamC": []string{"team24x7"}}, + AdditionalTeams: map[string][]string{"teamA": []string{"teamC"}, "teamC": []string{"teamA", "teamB", "acid"}}, + AdditionalMembers: map[string][]string{"acid": []string{"batman"}}, + }, + }, + }, + } +) + +// PostgresTeamMap is the operator's internal representation of all PostgresTeam CRDs +func TestLoadingPostgresTeamCRD(t *testing.T) { + tests := []struct { + name string + crd acidv1.PostgresTeamList + ptm PostgresTeamMap + error string + }{ + { + "Check that CRD is imported correctly into the internal format", + pgTeamList, + PostgresTeamMap{ + "teamA": { + AdditionalSuperuserTeams: []string{"teamB", "team24x7"}, + AdditionalTeams: []string{"teamC"}, + AdditionalMembers: []string{}, + }, + "teamB": { + AdditionalSuperuserTeams: []string{"teamA", "teamC", "team24x7"}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"drno"}, + }, + "teamC": { + AdditionalSuperuserTeams: []string{"team24x7"}, + AdditionalTeams: []string{"teamA", "teamB", "acid"}, + AdditionalMembers: []string{}, + }, + "team24x7": { + AdditionalSuperuserTeams: []string{}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"optimusprime"}, + }, + "acid": { + AdditionalSuperuserTeams: []string{}, + AdditionalTeams: []string{}, + AdditionalMembers: []string{"batman"}, + }, + }, + "Mismatch between PostgresTeam CRD and internal map", + }, + } + + for _, tt := range tests { + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&tt.crd) + for team, ptmeamMembership := range postgresTeamMap { + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalSuperuserTeams, tt.ptm[team].AdditionalSuperuserTeams) { + t.Errorf("%s: %v: expected additional members %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalTeams, tt.ptm[team].AdditionalTeams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + if !util.IsEqualIgnoreOrder(ptmeamMembership.AdditionalMembers, tt.ptm[team].AdditionalMembers) { + t.Errorf("%s: %v: expected additional superuser teams %#v, got %#v", tt.name, tt.error, tt.ptm, postgresTeamMap) + } + } + } +} + +// TestGetAdditionalTeams if returns teams with and without transitive dependencies +func TestGetAdditionalTeams(t *testing.T) { + tests := []struct { + name string + team string + transitive bool + teams []string + error string + }{ + { + "Check that additional teams are returned", + "teamA", + false, + []string{"teamC"}, + "GetAdditionalTeams returns wrong list", + }, + { + "Check that additional teams are returned incl. transitive teams", + "teamA", + true, + []string{"teamC", "teamB", "acid"}, + "GetAdditionalTeams returns wrong list", + }, + { + "Check that empty list is returned", + "teamB", + false, + []string{}, + "GetAdditionalTeams returns wrong list", + }, + } + + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&pgTeamList) + + for _, tt := range tests { + additionalTeams := postgresTeamMap.GetAdditionalTeams(tt.team, tt.transitive) + if !util.IsEqualIgnoreOrder(additionalTeams, tt.teams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.teams, additionalTeams) + } + } +} + +// TestGetAdditionalSuperuserTeams if returns teams with and without transitive dependencies +func TestGetAdditionalSuperuserTeams(t *testing.T) { + tests := []struct { + name string + team string + transitive bool + teams []string + error string + }{ + { + "Check that additional superuser teams are returned", + "teamA", + false, + []string{"teamB", "team24x7"}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + { + "Check that additional superuser teams are returned incl. transitive superuser teams", + "teamA", + true, + []string{"teamB", "teamC", "team24x7"}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + { + "Check that empty list is returned", + "team24x7", + false, + []string{}, + "GetAdditionalSuperuserTeams returns wrong list", + }, + } + + postgresTeamMap := PostgresTeamMap{} + postgresTeamMap.Load(&pgTeamList) + + for _, tt := range tests { + additionalTeams := postgresTeamMap.GetAdditionalSuperuserTeams(tt.team, tt.transitive) + if !util.IsEqualIgnoreOrder(additionalTeams, tt.teams) { + t.Errorf("%s: %v: expected additional teams %#v, got %#v", tt.name, tt.error, tt.teams, additionalTeams) + } + } +} diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 35991248b..b6c583399 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -169,6 +169,8 @@ type Config struct { EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"` TeamAdminRole string `name:"team_admin_role" default:"admin"` EnableAdminRoleForUsers bool `name:"enable_admin_role_for_users" default:"true"` + EnablePostgresTeamCRD *bool `name:"enable_postgres_team_crd" default:"true"` + EnablePostgresTeamCRDSuperusers bool `name:"enable_postgres_team_crd_superusers" default:"false"` EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` CustomServiceAnnotations map[string]string `name:"custom_service_annotations"` diff --git a/pkg/util/util.go b/pkg/util/util.go index abb9be01f..20e2915ba 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -10,7 +10,9 @@ import ( "fmt" "math/big" "math/rand" + "reflect" "regexp" + "sort" "strings" "time" @@ -134,6 +136,21 @@ func PrettyDiff(a, b interface{}) string { return strings.Join(Diff(a, b), "\n") } +// Compare two string slices while ignoring the order of elements +func IsEqualIgnoreOrder(a, b []string) bool { + if len(a) != len(b) { + return false + } + a_copy := make([]string, len(a)) + b_copy := make([]string, len(b)) + copy(a_copy, a) + copy(b_copy, b) + sort.Strings(a_copy) + sort.Strings(b_copy) + + return reflect.DeepEqual(a_copy, b_copy) +} + // SubstractStringSlices finds elements in a that are not in b and return them as a result slice. func SubstractStringSlices(a []string, b []string) (result []string, equal bool) { // Slices are assumed to contain unique elements only @@ -176,6 +193,20 @@ func FindNamedStringSubmatch(r *regexp.Regexp, s string) map[string]string { return res } +// SliceContains +func SliceContains(slice interface{}, item interface{}) bool { + s := reflect.ValueOf(slice) + if s.Kind() != reflect.Slice { + panic("Invalid data-type") + } + for i := 0; i < s.Len(); i++ { + if s.Index(i).Interface() == item { + return true + } + } + return false +} + // MapContains returns true if and only if haystack contains all the keys from the needle with matching corresponding values func MapContains(haystack, needle map[string]string) bool { if len(haystack) < len(needle) { diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index ee5b1ac39..c02d2c075 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -43,6 +43,17 @@ var prettyDiffTest = []struct { {[]int{1, 2, 3, 4}, []int{1, 2, 3, 4}, ""}, } +var isEqualIgnoreOrderTest = []struct { + inA []string + inB []string + outEqual bool +}{ + {[]string{"a", "b", "c"}, []string{"a", "b", "c"}, true}, + {[]string{"a", "b", "c"}, []string{"a", "c", "b"}, true}, + {[]string{"a", "b"}, []string{"a", "c", "b"}, false}, + {[]string{"a", "b", "c"}, []string{"a", "d", "c"}, false}, +} + var substractTest = []struct { inA []string inB []string @@ -53,6 +64,16 @@ var substractTest = []struct { {[]string{"a", "b", "c", "d"}, []string{"a", "bb", "c", "d"}, []string{"b"}, false}, } +var sliceContaintsTest = []struct { + slice []string + item string + out bool +}{ + {[]string{"a", "b", "c"}, "a", true}, + {[]string{"a", "b", "c"}, "d", false}, + {[]string{}, "d", false}, +} + var mapContaintsTest = []struct { inA map[string]string inB map[string]string @@ -136,6 +157,15 @@ func TestPrettyDiff(t *testing.T) { } } +func TestIsEqualIgnoreOrder(t *testing.T) { + for _, tt := range isEqualIgnoreOrderTest { + actualEqual := IsEqualIgnoreOrder(tt.inA, tt.inB) + if actualEqual != tt.outEqual { + t.Errorf("IsEqualIgnoreOrder expected: %t, got: %t", tt.outEqual, actualEqual) + } + } +} + func TestSubstractSlices(t *testing.T) { for _, tt := range substractTest { actualRes, actualEqual := SubstractStringSlices(tt.inA, tt.inB) @@ -160,6 +190,15 @@ func TestFindNamedStringSubmatch(t *testing.T) { } } +func TestSliceContains(t *testing.T) { + for _, tt := range sliceContaintsTest { + res := SliceContains(tt.slice, tt.item) + if res != tt.out { + t.Errorf("SliceContains expected: %#v, got: %#v", tt.out, res) + } + } +} + func TestMapContains(t *testing.T) { for _, tt := range mapContaintsTest { res := MapContains(tt.inA, tt.inB) From e10e0fec9e41d4f6c25aa6ef3f06f8115c82d14d Mon Sep 17 00:00:00 2001 From: Jakub Warczarek Date: Wed, 28 Oct 2020 10:56:50 +0100 Subject: [PATCH 11/12] Add support in UI for custom S3 endpoints for backups (#1152) * Support custom S3 endpoint for backups * Log info about AWS S3 endpoint during start up --- ui/operator_ui/main.py | 3 +++ ui/operator_ui/spiloutils.py | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index dc2450b9f..d159bee2d 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -104,6 +104,8 @@ USE_AWS_INSTANCE_PROFILE = ( getenv('USE_AWS_INSTANCE_PROFILE', 'false').lower() != 'false' ) +AWS_ENDPOINT = getenv('AWS_ENDPOINT') + tokens.configure() tokens.manage('read-only') tokens.start() @@ -1055,6 +1057,7 @@ def main(port, secret_key, debug, clusters: list): logger.info(f'Tokeninfo URL: {TOKENINFO_URL}') logger.info(f'Use AWS instance_profile: {USE_AWS_INSTANCE_PROFILE}') logger.info(f'WAL-E S3 endpoint: {WALE_S3_ENDPOINT}') + logger.info(f'AWS S3 endpoint: {AWS_ENDPOINT}') if TARGET_NAMESPACE is None: @on_exception( diff --git a/ui/operator_ui/spiloutils.py b/ui/operator_ui/spiloutils.py index ea347a84d..7a71f6dab 100644 --- a/ui/operator_ui/spiloutils.py +++ b/ui/operator_ui/spiloutils.py @@ -16,6 +16,8 @@ logger = getLogger(__name__) session = Session() +AWS_ENDPOINT = getenv('AWS_ENDPOINT') + OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name') COMMON_CLUSTER_LABEL = getenv('COMMON_CLUSTER_LABEL', '{"application":"spilo"}') @@ -266,7 +268,7 @@ def read_stored_clusters(bucket, prefix, delimiter='/'): return [ prefix['Prefix'].split('/')[-2] for prefix in these( - client('s3').list_objects( + client('s3', endpoint_url=AWS_ENDPOINT).list_objects( Bucket=bucket, Delimiter=delimiter, Prefix=prefix, @@ -287,7 +289,7 @@ def read_versions( return [ 'base' if uid == 'wal' else uid for prefix in these( - client('s3').list_objects( + client('s3', endpoint_url=AWS_ENDPOINT).list_objects( Bucket=bucket, Delimiter=delimiter, Prefix=prefix + pg_cluster + delimiter, From 9a11e85d57392916ad2b9b80aabcda02ad7c7a09 Mon Sep 17 00:00:00 2001 From: Felix Kunde Date: Wed, 28 Oct 2020 17:51:37 +0100 Subject: [PATCH 12/12] disable PostgresTeam by default (#1186) * disable PostgresTeam by default * fix version in chart --- charts/postgres-operator/values-crd.yaml | 2 +- charts/postgres-operator/values.yaml | 4 ++-- docs/reference/operator_parameters.md | 2 +- docs/user.md | 5 +++++ manifests/configmap.yaml | 2 +- manifests/postgresql-operator-default-configuration.yaml | 2 +- pkg/apis/acid.zalan.do/v1/operator_configuration_type.go | 2 +- pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go | 5 ----- pkg/controller/controller.go | 6 +++--- pkg/controller/operator_config.go | 2 +- pkg/util/config/config.go | 2 +- 11 files changed, 17 insertions(+), 17 deletions(-) diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 52892c22c..6196c6fb2 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -257,7 +257,7 @@ configTeamsApi: # enable_admin_role_for_users: true # operator watches for PostgresTeam CRs to assign additional teams and members to clusters - enable_postgres_team_crd: true + enable_postgres_team_crd: false # toogle to create additional superuser teams from PostgresTeam CRs # enable_postgres_team_crd_superusers: "false" diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index ba5c7458c..231b0b9ac 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: registry.opensource.zalan.do repository: acid/postgres-operator - tag: v1.5.0-61-ged2b3239-dirty + tag: v1.5.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -249,7 +249,7 @@ configTeamsApi: # enable_admin_role_for_users: "true" # operator watches for PostgresTeam CRs to assign additional teams and members to clusters - enable_postgres_team_crd: "true" + enable_postgres_team_crd: "false" # toogle to create additional superuser teams from PostgresTeam CRs # enable_postgres_team_crd_superusers: "false" diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index bd12eb922..bd8c80d9c 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -635,7 +635,7 @@ key. * **enable_postgres_team_crd** toggle to make the operator watch for created or updated `PostgresTeam` CRDs and create roles for specified additional teams and members. - The default is `true`. + The default is `false`. * **enable_postgres_team_crd_superusers** in a `PostgresTeam` CRD additional superuser teams can assigned to teams that diff --git a/docs/user.md b/docs/user.md index db107dccb..8cacad0e8 100644 --- a/docs/user.md +++ b/docs/user.md @@ -330,6 +330,11 @@ spec: - "foo" ``` +Note, by default the `PostgresTeam` support is disabled in the configuration. +Switch `enable_postgres_team_crd` flag to `true` and the operator will start to +watch for this CRD. Make sure, the cluster role is up to date and contains a +section for [PostgresTeam](../manifests/operator-service-account-rbac.yaml#L30). + ## Prepared databases with roles and default privileges The `users` section in the manifests only allows for creating database roles diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index ce20dfa58..e59bfcea0 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -41,7 +41,7 @@ data: enable_master_load_balancer: "false" # enable_pod_antiaffinity: "false" # enable_pod_disruption_budget: "true" - # enable_postgres_team_crd: "true" + # enable_postgres_team_crd: "false" # enable_postgres_team_crd_superusers: "false" enable_replica_load_balancer: "false" # enable_shm_volume: "true" diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 71408ac43..14acc4356 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -122,7 +122,7 @@ configuration: enable_database_access: true teams_api: # enable_admin_role_for_users: true - # enable_postgres_team_crd: true + # enable_postgres_team_crd: false # enable_postgres_team_crd_superusers: false enable_team_superuser: false enable_teams_api: false diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 9dae0089b..a9abcf0ee 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -145,7 +145,7 @@ type TeamsAPIConfiguration struct { PamConfiguration string `json:"pam_configuration,omitempty"` ProtectedRoles []string `json:"protected_role_names,omitempty"` PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"` - EnablePostgresTeamCRD *bool `json:"enable_postgres_team_crd,omitempty"` + EnablePostgresTeamCRD bool `json:"enable_postgres_team_crd,omitempty"` EnablePostgresTeamCRDSuperusers bool `json:"enable_postgres_team_crd_superusers,omitempty"` } diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 80a00f491..364b3e161 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -1114,11 +1114,6 @@ func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.EnablePostgresTeamCRD != nil { - in, out := &in.EnablePostgresTeamCRD, &out.EnablePostgresTeamCRD - *out = new(bool) - **out = **in - } return } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 2169beb76..0c29275e6 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -329,7 +329,7 @@ func (c *Controller) initController() { c.initSharedInformers() - if c.opConfig.EnablePostgresTeamCRD != nil && *c.opConfig.EnablePostgresTeamCRD { + if c.opConfig.EnablePostgresTeamCRD { c.loadPostgresTeams() } else { c.pgTeamMap = teams.PostgresTeamMap{} @@ -380,7 +380,7 @@ func (c *Controller) initSharedInformers() { }) // PostgresTeams - if c.opConfig.EnablePostgresTeamCRD != nil && *c.opConfig.EnablePostgresTeamCRD { + if c.opConfig.EnablePostgresTeamCRD { c.postgresTeamInformer = acidv1informer.NewPostgresTeamInformer( c.KubeClient.AcidV1ClientSet, c.opConfig.WatchedNamespace, @@ -453,7 +453,7 @@ func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) { go c.apiserver.Run(stopCh, wg) go c.kubeNodesInformer(stopCh, wg) - if c.opConfig.EnablePostgresTeamCRD != nil && *c.opConfig.EnablePostgresTeamCRD { + if c.opConfig.EnablePostgresTeamCRD { go c.runPostgresTeamInformer(stopCh, wg) } diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 3ad09ad28..9b2713da8 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -163,7 +163,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.PamConfiguration = util.Coalesce(fromCRD.TeamsAPI.PamConfiguration, "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees") result.ProtectedRoles = util.CoalesceStrArr(fromCRD.TeamsAPI.ProtectedRoles, []string{"admin"}) result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams - result.EnablePostgresTeamCRD = util.CoalesceBool(fromCRD.TeamsAPI.EnablePostgresTeamCRD, util.True()) + result.EnablePostgresTeamCRD = fromCRD.TeamsAPI.EnablePostgresTeamCRD result.EnablePostgresTeamCRDSuperusers = fromCRD.TeamsAPI.EnablePostgresTeamCRDSuperusers // logging REST API config diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index b6c583399..47a120227 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -169,7 +169,7 @@ type Config struct { EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"` TeamAdminRole string `name:"team_admin_role" default:"admin"` EnableAdminRoleForUsers bool `name:"enable_admin_role_for_users" default:"true"` - EnablePostgresTeamCRD *bool `name:"enable_postgres_team_crd" default:"true"` + EnablePostgresTeamCRD bool `name:"enable_postgres_team_crd" default:"false"` EnablePostgresTeamCRDSuperusers bool `name:"enable_postgres_team_crd_superusers" default:"false"` EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"`