Merge branch 'master' into feature/actions
This commit is contained in:
commit
87eee1ed72
|
|
@ -1,4 +1,5 @@
|
|||
dist: trusty
|
||||
sudo: false
|
||||
|
||||
branches:
|
||||
only:
|
||||
|
|
@ -7,7 +8,7 @@ branches:
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.9
|
||||
- "1.10.x"
|
||||
|
||||
before_install:
|
||||
- go get github.com/Masterminds/glide
|
||||
|
|
|
|||
|
|
@ -0,0 +1,2 @@
|
|||
# global owners
|
||||
* @alexeyklyukin @erthalion @zerg-junior @Jan-M @CyberDem0n @avaczi
|
||||
|
|
@ -1,2 +1,3 @@
|
|||
Murat Kabilov <murat.kabilov@zalando.de>
|
||||
Oleksii Kliukin <oleksii.kliukin@zalando.de>
|
||||
Dmitrii Dolgov <dmitrii.dolgov@zalando.de>
|
||||
Sergey Dudoladov <sergey.dudoladov@zalando.de>
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
[](https://travis-ci.org/zalando-incubator/postgres-operator)
|
||||
[](https://coveralls.io/github/zalando-incubator/postgres-operator)
|
||||
[](https://goreportcard.com/report/github.com/zalando-incubator/postgres-operator)
|
||||
[](https://godoc.org/github.com/zalando-incubator/postgres-operator)
|
||||
|
||||
## Introduction
|
||||
|
||||
|
|
|
|||
20
cmd/main.go
20
cmd/main.go
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/controller"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
|
|
@ -20,6 +21,14 @@ var (
|
|||
config spec.ControllerConfig
|
||||
)
|
||||
|
||||
func mustParseDuration(d string) time.Duration {
|
||||
duration, err := time.ParseDuration(d)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&kubeConfigFile, "kubeconfig", "", "Path to kubeconfig file with authorization and master location information.")
|
||||
flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.")
|
||||
|
|
@ -38,6 +47,17 @@ func init() {
|
|||
log.Printf("Fully qualified configmap name: %v", config.ConfigMapName)
|
||||
|
||||
}
|
||||
if crd_interval := os.Getenv("CRD_READY_WAIT_INTERVAL"); crd_interval != "" {
|
||||
config.CRDReadyWaitInterval = mustParseDuration(crd_interval)
|
||||
} else {
|
||||
config.CRDReadyWaitInterval = 4 * time.Second
|
||||
}
|
||||
|
||||
if crd_timeout := os.Getenv("CRD_READY_WAIT_TIMEOUT"); crd_timeout != "" {
|
||||
config.CRDReadyWaitTimeout = mustParseDuration(crd_timeout)
|
||||
} else {
|
||||
config.CRDReadyWaitTimeout = 30 * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
|
|
|||
|
|
@ -90,13 +90,13 @@ namespace. The operator performs **no** further syncing of this account.
|
|||
|
||||
## Role-based access control for the operator
|
||||
|
||||
The `manifests/operator-rbac.yaml` defines cluster roles and bindings needed
|
||||
The `manifests/operator-service-account-rbac.yaml` defines cluster roles and bindings needed
|
||||
for the operator to function under access control restrictions. To deploy the
|
||||
operator with this RBAC policy use:
|
||||
|
||||
```bash
|
||||
$ kubectl create -f manifests/configmap.yaml
|
||||
$ kubectl create -f manifests/operator-rbac.yaml
|
||||
$ kubectl create -f manifests/operator-service-account-rbac.yaml
|
||||
$ kubectl create -f manifests/postgres-operator.yaml
|
||||
$ kubectl create -f manifests/minimal-postgres-manifest.yaml
|
||||
```
|
||||
|
|
@ -199,3 +199,24 @@ cluster manifest. In the case any of these variables are omitted from the
|
|||
manifest, the operator configmap's settings `enable_master_load_balancer` and
|
||||
`enable_replica_load_balancer` apply. Note that the operator settings affect
|
||||
all Postgresql services running in a namespace watched by the operator.
|
||||
|
||||
## Running periodic 'autorepair' scans of Kubernetes objects
|
||||
|
||||
The Postgres operator periodically scans all Kubernetes objects belonging to
|
||||
each cluster and repairs all discrepancies between them and the definitions
|
||||
generated from the current cluster manifest. There are two types of scans: a
|
||||
`sync scan`, running every `resync_period` seconds for every cluster, and the
|
||||
`repair scan`, coming every `repair_period` only for those clusters that didn't
|
||||
report success as a result of the last operation applied to them.
|
||||
|
||||
## Postgres roles supported by the operator
|
||||
|
||||
The operator is capable of maintaining roles of multiple kinds within a Postgres database cluster:
|
||||
|
||||
1. **System roles** are roles necessary for the proper work of Postgres itself such as a replication role or the initial superuser role. The operator delegates creating such roles to Patroni and only establishes relevant secrets.
|
||||
|
||||
2. **Infrastructure roles** are roles for processes originating from external systems, e.g. monitoring robots. The operator creates such roles in all PG clusters it manages assuming k8s secrets with the relevant credentials exist beforehand.
|
||||
|
||||
3. **Per-cluster robot users** are also roles for processes originating from external systems but defined for an individual Postgres cluster in its manifest. A typical example is a role for connections from an application that uses the database.
|
||||
|
||||
4. **Human users** originate from the Teams API that returns list of the team members given a team id. Operator differentiates between (a) product teams that own a particular Postgres cluster and are granted admin rights to maintain it, and (b) Postgres superuser teams that get the superuser access to all PG databases running in a k8s cluster for the purposes of maintaining and troubleshooting.
|
||||
|
|
@ -151,6 +151,20 @@ minikube. The following steps will get you the docker image built and deployed.
|
|||
$ sed -e "s/\(image\:.*\:\).*$/\1$TAG/" manifests/postgres-operator.yaml|kubectl --context minikube create -f -
|
||||
```
|
||||
|
||||
# Code generation
|
||||
|
||||
The operator employs k8s-provided code generation to obtain deep copy methods and Kubernetes-like APIs for its custom resource definitons, namely the Postgres CRD and the operator CRD. The usage of the code generation follows conventions from the k8s community. Relevant scripts live in the `hack` directory: the `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`,
|
||||
the `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI). The `/pkg/generated/` contains the resultant code. To make these scripts work, you may need to `export GOPATH=$(go env GOPATH)`
|
||||
|
||||
References for code generation are:
|
||||
* [Relevant pull request](https://github.com/zalando-incubator/postgres-operator/pull/369)
|
||||
See comments there for minor issues that can sometimes broke the generation process.
|
||||
* [Code generator source code](https://github.com/kubernetes/code-generator)
|
||||
* [Code Generation for CustomResources](https://blog.openshift.com/kubernetes-deep-dive-code-generation-customresources/) - intro post on the topic
|
||||
* Code generation in [Prometheus](https://github.com/coreos/prometheus-operator) and [etcd](https://github.com/coreos/etcd-operator) operators
|
||||
|
||||
To debug the generated API locally, use the [kubectl proxy](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) and `kubectl --v=8` log level to display contents of HTTP requests (run the operator itself with `--v=8` to log all REST API requests). To attach a debugger to the operator, use the `-outofcluster` option to run the operator locally on the developer's laptop (and not in a docker container).
|
||||
|
||||
# Debugging the operator
|
||||
|
||||
There is a web interface in the operator to observe its internal state. The
|
||||
|
|
|
|||
|
|
@ -89,7 +89,14 @@ Those are parameters grouped directly under the `spec` key in the manifest.
|
|||
examples](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/)
|
||||
for details on tolerations and possible values of those keys. When set, this
|
||||
value overrides the `pod_toleration` setting from the operator. Optional.
|
||||
|
||||
|
||||
* **podPriorityClassName**
|
||||
a name of the [priority
|
||||
class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass)
|
||||
that should be assigned to the cluster pods. When not specified, the value
|
||||
is taken from the `pod_priority_class_name` operator parameter, if not set
|
||||
then the default priority class is taken. The priority class itself must be defined in advance.
|
||||
|
||||
## Postgres parameters
|
||||
|
||||
Those parameters are grouped under the `postgresql` top-level key.
|
||||
|
|
@ -213,3 +220,21 @@ properties of the persistent storage that stores postgres data.
|
|||
See [Kubernetes
|
||||
documentation](https://kubernetes.io/docs/concepts/storage/storage-classes/)
|
||||
for the details on storage classes. Optional.
|
||||
|
||||
### Sidecar definitions
|
||||
|
||||
Those parameters are defined under the `sidecars` key. They consist of a list
|
||||
of dictionaries, each defining one sidecar (an extra container running
|
||||
along the main postgres container on the same pod). The following keys can be
|
||||
defined in the sidecar dictionary:
|
||||
|
||||
* **name**
|
||||
name of the sidecar. Required.
|
||||
|
||||
* **image**
|
||||
docker image of the sidecar. Required.
|
||||
|
||||
* **env**
|
||||
a dictionary of environment variables. Use usual Kubernetes definition
|
||||
(https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/)
|
||||
for environment variables. Optional.
|
||||
|
|
|
|||
|
|
@ -48,3 +48,11 @@ The following environment variables are accepted by the operator:
|
|||
* **SCALYR_API_KEY**
|
||||
the value of the Scalyr API key to supply to the pods. Overrides the
|
||||
`scalyr_api_key` operator parameter.
|
||||
|
||||
* **CRD_READY_WAIT_TIMEOUT**
|
||||
defines the timeout for the complete postgres CRD creation. When not set
|
||||
default is 30s.
|
||||
|
||||
* **CRD_READY_WAIT_INTERVAL**
|
||||
defines the interval between consecutive attempts waiting for the postgres
|
||||
CRD to be created. The default is 5s.
|
||||
|
|
|
|||
|
|
@ -1,9 +1,54 @@
|
|||
There are two mutually-exclusive methods to set the Postgres Operator
|
||||
configuration.
|
||||
|
||||
Postgres operator is configured via a ConfigMap defined by the
|
||||
`CONFIG_MAP_NAME` environment variable. Variable names are underscore-separated
|
||||
words.
|
||||
* ConfigMaps-based, the legacy one. The configuration is supplied in a
|
||||
key-value configmap, defined by the `CONFIG_MAP_NAME` environment variable.
|
||||
Non-scalar values, i.e. lists or maps, are encoded in the value strings using
|
||||
the comma-based syntax for lists and coma-separated `key:value` syntax for
|
||||
maps. String values containing ':' should be enclosed in quotes. The
|
||||
configuration is flat, parameter group names below are not reflected in the
|
||||
configuration structure. There is an
|
||||
[example](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/configmap.yaml)
|
||||
|
||||
* CRD-based configuration. The configuration is stored in the custom YAML
|
||||
manifest, an instance of the custom resource definition (CRD) called
|
||||
`OperatorConfiguration`. This CRD is registered by the operator
|
||||
during the start when `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` variable is
|
||||
set to a non-empty value. The CRD-based configuration is a regular YAML
|
||||
document; non-scalar keys are simply represented in the usual YAML way. The
|
||||
usage of the CRD-based configuration is triggered by setting the
|
||||
`POSTGRES_OPERATOR_CONFIGURATION_OBJECT` variable, which should point to the
|
||||
`postgresql-operator-configuration` object name in the operators namespace.
|
||||
There are no default values built-in in the operator, each parameter that is
|
||||
not supplied in the configuration receives an empty value. In order to
|
||||
create your own configuration just copy the [default
|
||||
one](https://github.com/zalando-incubator/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
|
||||
and change it.
|
||||
|
||||
CRD-based configuration is more natural and powerful then the one based on
|
||||
ConfigMaps and should be used unless there is a compatibility requirement to
|
||||
use an already existing configuration. Even in that case, it should be rather
|
||||
straightforward to convert the configmap based configuration into the CRD-based
|
||||
one and restart the operator. The ConfigMaps-based configuration will be
|
||||
deprecated and subsequently removed in future releases.
|
||||
|
||||
Note that for the CRD-based configuration configuration groups below correspond
|
||||
to the non-leaf keys in the target YAML (i.e. for the Kubernetes resources the
|
||||
key is `kubernetes`). The key is mentioned alongside the group description. The
|
||||
ConfigMap-based configuration is flat and does not allow non-leaf keys.
|
||||
|
||||
Since in the CRD-based case the operator needs to create a CRD first, which is
|
||||
controlled by the `resource_check_interval` and `resource_check_timeout`
|
||||
parameters, those parameters have no effect and are replaced by the
|
||||
`CRD_READY_WAIT_INTERVAL` and `CRD_READY_WAIT_TIMEOUT` environment variables.
|
||||
They will be deprecated and removed in the future.
|
||||
|
||||
Variable names are underscore-separated words.
|
||||
|
||||
## General
|
||||
|
||||
Those are top-level keys, containing both leaf keys and groups.
|
||||
|
||||
* **etcd_host**
|
||||
Etcd connection string for Patroni defined as `host:port`. Not required when
|
||||
Patroni native Kubernetes support is used. The default is empty (use
|
||||
|
|
@ -15,6 +60,11 @@ words.
|
|||
your own Spilo image from the [github
|
||||
repository](https://github.com/zalando/spilo).
|
||||
|
||||
* **sidecar_docker_images**
|
||||
a map of sidecar names to docker images for the containers to run alongside
|
||||
Spilo. In case of the name conflict with the definition in the cluster
|
||||
manifest the cluster-specific one is preferred.
|
||||
|
||||
* **workers**
|
||||
number of working routines the operator spawns to process requests to
|
||||
create/update/delete/sync clusters concurrently. The default is `4`.
|
||||
|
|
@ -30,9 +80,16 @@ words.
|
|||
are applied. The default is `-1`.
|
||||
|
||||
* **resync_period**
|
||||
period between consecutive sync requests. The default is `5m`.
|
||||
period between consecutive sync requests. The default is `30m`.
|
||||
|
||||
* **repair_period**
|
||||
period between consecutive repair requests. The default is `5m`.
|
||||
|
||||
## Postgres users
|
||||
|
||||
Parameters describing Postgres users. In a CRD-configuration, they are grouped
|
||||
under the `users` key.
|
||||
|
||||
* **super_username**
|
||||
postgres `superuser` name to be created by `initdb`. The default is
|
||||
`postgres`.
|
||||
|
|
@ -42,6 +99,11 @@ words.
|
|||
`standby`.
|
||||
|
||||
## Kubernetes resources
|
||||
|
||||
Parameters to configure cluster-related Kubernetes objects created by the
|
||||
operator, as well as some timeouts associated with them. In a CRD-based
|
||||
configuration they are grouped under the `kubernetes` key.
|
||||
|
||||
* **pod_service_account_name**
|
||||
service account used by Patroni running on individual Pods to communicate
|
||||
with the operator. Required even if native Kubernetes support in Patroni is
|
||||
|
|
@ -51,11 +113,18 @@ words.
|
|||
* **pod_service_account_definition**
|
||||
The operator tries to create the pod Service Account in the namespace that
|
||||
doesn't define such an account using the YAML definition provided by this
|
||||
option. If not defined, a simple definition that contains only the name will
|
||||
option. If not defined, a simple definition that contains only the name will be used. The default is empty.
|
||||
|
||||
* **pod_service_account_role_binding_definition**
|
||||
This definition must bind pod service account to a role with permission
|
||||
sufficient for the pods to start and for Patroni to access k8s endpoints;
|
||||
service account on its own lacks any such rights starting with k8s v1.8. If
|
||||
not excplicitly defined by the user, a simple definition that binds the
|
||||
account to the operator's own 'zalando-postgres-operator' cluster role will
|
||||
be used. The default is empty.
|
||||
|
||||
* **pod_terminate_grace_period**
|
||||
Patroni pods are [terminated
|
||||
Postgres pods are [terminated
|
||||
forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods)
|
||||
after this timeout. The default is `5m`.
|
||||
|
||||
|
|
@ -87,7 +156,7 @@ words.
|
|||
name of the secret containing infrastructure roles names and passwords.
|
||||
|
||||
* **pod_role_label**
|
||||
name of the label assigned to the postgres pods (and services/endpoints) by
|
||||
name of the label assigned to the Postgres pods (and services/endpoints) by
|
||||
the operator. The default is `spilo-role`.
|
||||
|
||||
* **cluster_labels**
|
||||
|
|
@ -104,7 +173,7 @@ words.
|
|||
considered `ready`. The operator uses values of those labels to detect the
|
||||
start of the Kubernetes cluster upgrade procedure and move master pods off
|
||||
the nodes to be decommissioned. When the set is not empty, the operator also
|
||||
assigns the `Affinity` clause to the postgres pods to be scheduled only on
|
||||
assigns the `Affinity` clause to the Postgres pods to be scheduled only on
|
||||
`ready` nodes. The default is empty.
|
||||
|
||||
* **toleration**
|
||||
|
|
@ -120,8 +189,20 @@ words.
|
|||
All variables from that ConfigMap are injected to the pod's environment, on
|
||||
conflicts they are overridden by the environment variables generated by the
|
||||
operator. The default is empty.
|
||||
|
||||
* **pod_priority_class_name**
|
||||
a name of the [priority
|
||||
class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass)
|
||||
that should be assigned to the Postgres pods. The priority class itself must be defined in advance.
|
||||
Default is empty (use the default priority class).
|
||||
|
||||
|
||||
## Kubernetes resource requests
|
||||
|
||||
This group allows you to configure resource requests for the Postgres pods.
|
||||
Those parameters are grouped under the `postgres_pod_resources` key in a
|
||||
CRD-based configuration.
|
||||
|
||||
* **default_cpu_request**
|
||||
CPU request value for the postgres containers, unless overridden by
|
||||
cluster-specific settings. The default is `100m`.
|
||||
|
|
@ -139,6 +220,13 @@ words.
|
|||
settings. The default is `1Gi`.
|
||||
|
||||
## Operator timeouts
|
||||
|
||||
This set of parameters define various timeouts related to some operator
|
||||
actions, affecting pod operations and CRD creation. In the CRD-based
|
||||
configuration `resource_check_interval` and `resource_check_timeout` have no
|
||||
effect, and the parameters are grouped under the `timeouts` key in the
|
||||
CRD-based configuration.
|
||||
|
||||
* **resource_check_interval**
|
||||
interval to wait between consecutive attempts to check for the presence of
|
||||
some Kubernetes resource (i.e. `StatefulSet` or `PodDisruptionBudget`). The
|
||||
|
|
@ -155,8 +243,8 @@ words.
|
|||
possible issues faster. The default is `10m`.
|
||||
|
||||
* **pod_deletion_wait_timeout**
|
||||
timeout when waiting for the pods to be deleted when removing the cluster or
|
||||
recreating pods. The default is `10m`.
|
||||
timeout when waiting for the Postgres pods to be deleted when removing the
|
||||
cluster or recreating pods. The default is `10m`.
|
||||
|
||||
* **ready_wait_interval**
|
||||
the interval between consecutive attempts waiting for the postgres CRD to be
|
||||
|
|
@ -166,6 +254,10 @@ words.
|
|||
the timeout for the complete postgres CRD creation. The default is `30s`.
|
||||
|
||||
## Load balancer related options
|
||||
|
||||
Those options affect the behavior of load balancers created by the operator.
|
||||
In the CRD-based configuration they are grouped under the `load_balancer` key.
|
||||
|
||||
* **db_hosted_zone**
|
||||
DNS zone for the cluster DNS name when the load balancer is configured for
|
||||
the cluster. Only used when combined with
|
||||
|
|
@ -197,22 +289,35 @@ words.
|
|||
No other placeholders are allowed.
|
||||
|
||||
## AWS or GSC interaction
|
||||
|
||||
The options in this group configure operator interactions with non-Kubernetes
|
||||
objects from AWS or Google cloud. They have no effect unless you are using
|
||||
either. In the CRD-based configuration those options are grouped under the
|
||||
`aws_or_gcp` key.
|
||||
|
||||
* **wal_s3_bucket**
|
||||
S3 bucket to use for shipping WAL segments with WAL-E. A bucket has to be
|
||||
present and accessible by Patroni managed pods. At the moment, supported
|
||||
services by Spilo are S3 and GCS. The default is empty.
|
||||
present and accessible by Postgres pods. At the moment, supported services by
|
||||
Spilo are S3 and GCS. The default is empty.
|
||||
|
||||
* **log_s3_bucket**
|
||||
S3 bucket to use for shipping postgres daily logs. Works only with S3 on AWS.
|
||||
The bucket has to be present and accessible by Patroni managed pods. At the
|
||||
moment Spilo does not yet support this. The default is empty.
|
||||
The bucket has to be present and accessible by Postgres pods. At the moment
|
||||
Spilo does not yet support this. The default is empty.
|
||||
|
||||
* **kube_iam_role**
|
||||
AWS IAM role to supply in the `iam.amazonaws.com/role` annotation of Patroni
|
||||
AWS IAM role to supply in the `iam.amazonaws.com/role` annotation of Postgres
|
||||
pods. Only used when combined with
|
||||
[kube2iam](https://github.com/jtblin/kube2iam) project on AWS. The default is empty.
|
||||
[kube2iam](https://github.com/jtblin/kube2iam) project on AWS. The default is
|
||||
empty.
|
||||
|
||||
* **aws_region**
|
||||
AWS region used to store ESB volumes. The default is `eu-central-1`.
|
||||
|
||||
## Debugging the operator
|
||||
|
||||
Options to aid debugging of the operator itself. Grouped under the `debug` key.
|
||||
|
||||
* **debug_logging**
|
||||
boolean parameter that toggles verbose debug logs from the operator. The
|
||||
default is `true`.
|
||||
|
|
@ -222,7 +327,12 @@ words.
|
|||
access to the postgres database, i.e. creating databases and users. The default
|
||||
is `true`.
|
||||
|
||||
### Automatic creation of human users in the database
|
||||
## Automatic creation of human users in the database
|
||||
|
||||
Options to automate creation of human users with the aid of the teams API
|
||||
service. In the CRD-based configuration those are grouped under the `teams_api`
|
||||
key.
|
||||
|
||||
* **enable_teams_api**
|
||||
boolean parameter that toggles usage of the Teams API by the operator.
|
||||
The default is `true`.
|
||||
|
|
@ -267,7 +377,13 @@ words.
|
|||
List of roles that cannot be overwritten by an application, team or
|
||||
infrastructure role. The default is `admin`.
|
||||
|
||||
* **postgres_superuser_teams**
|
||||
List of teams which members need the superuser role in each PG database cluster to administer Postgres and maintain infrastructure built around it. The default is `postgres_superuser`.
|
||||
|
||||
## Logging and REST API
|
||||
|
||||
Parameters affecting logging and REST API listener. In the CRD-based configuration they are grouped under the `logging_rest_api` key.
|
||||
|
||||
* **api_port**
|
||||
REST API listener listens to this port. The default is `8080`.
|
||||
|
||||
|
|
@ -278,6 +394,11 @@ words.
|
|||
number of entries in the cluster history ring buffer. The default is `1000`.
|
||||
|
||||
## Scalyr options
|
||||
|
||||
Those parameters define the resource requests/limits and properties of the
|
||||
scalyr sidecar. In the CRD-based configuration they are grouped under the
|
||||
`scalyr` key.
|
||||
|
||||
* **scalyr_api_key**
|
||||
API key for the Scalyr sidecar. The default is empty.
|
||||
|
||||
|
|
|
|||
32
docs/user.md
32
docs/user.md
|
|
@ -241,6 +241,38 @@ metadata:
|
|||
Note that timezone required for `timestamp` (offset relative to UTC, see RFC
|
||||
3339 section 5.6)
|
||||
|
||||
|
||||
## Sidecar Support
|
||||
|
||||
Each cluster can specify arbitrary sidecars to run. These containers could be used for
|
||||
log aggregation, monitoring, backups or other tasks. A sidecar can be specified like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: postgresql
|
||||
|
||||
metadata:
|
||||
name: acid-minimal-cluster
|
||||
spec:
|
||||
...
|
||||
sidecars:
|
||||
- name: "container-name"
|
||||
image: "company/image:tag"
|
||||
env:
|
||||
- name: "ENV_VAR_NAME"
|
||||
value: "any-k8s-env-things"
|
||||
```
|
||||
|
||||
In addition to any environment variables you specify, the following environment variables
|
||||
are always passed to sidecars:
|
||||
|
||||
- `POD_NAME` - field reference to `metadata.name`
|
||||
- `POD_NAMESPACE` - field reference to `metadata.namespace`
|
||||
- `POSTGRES_USER` - the superuser that can be used to connect to the database
|
||||
- `POSTGRES_PASSWORD` - the password for the superuser
|
||||
|
||||
The PostgreSQL volume is shared with sidecars and is mounted at `/home/postgres/pgdata`.
|
||||
|
||||
## Increase volume size
|
||||
|
||||
PostgreSQL operator supports statefulset volume resize if you're using the
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
hash: 688e15147f1217da635b83ee33f20a3741a400a493787d79992d1650f6e4c514
|
||||
updated: 2018-05-17T10:46:49.090929+02:00
|
||||
hash: bd5394acf101795aac9da20c104a57344a6c4fd71080bf1b16845367e6360578
|
||||
updated: 2018-08-14T15:18:08.144086+02:00
|
||||
imports:
|
||||
- name: github.com/aws/aws-sdk-go
|
||||
version: ee7b4b1162937cba700de23bd90acb742982e626
|
||||
version: f831d5a0822a1ad72420ab18c6269bca1ddaf490
|
||||
subpackages:
|
||||
- aws
|
||||
- aws/awserr
|
||||
|
|
@ -14,6 +14,7 @@ imports:
|
|||
- aws/credentials/ec2rolecreds
|
||||
- aws/credentials/endpointcreds
|
||||
- aws/credentials/stscreds
|
||||
- aws/csm
|
||||
- aws/defaults
|
||||
- aws/ec2metadata
|
||||
- aws/endpoints
|
||||
|
|
@ -22,6 +23,7 @@ imports:
|
|||
- aws/signer/v4
|
||||
- internal/sdkio
|
||||
- internal/sdkrand
|
||||
- internal/sdkuri
|
||||
- internal/shareddefaults
|
||||
- private/protocol
|
||||
- private/protocol/ec2query
|
||||
|
|
@ -32,40 +34,17 @@ imports:
|
|||
- service/ec2
|
||||
- service/sts
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
|
||||
version: 782f4967f2dc4564575ca782fe2d04090b5faca8
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/docker/distribution
|
||||
version: cd27f179f2c10c5d300e6d09025b538c475b0d51
|
||||
subpackages:
|
||||
- digest
|
||||
- reference
|
||||
- name: github.com/docker/spdystream
|
||||
version: 449fdfce4d962303d702fec724ef0ad181c92528
|
||||
subpackages:
|
||||
- spdy
|
||||
- name: github.com/emicklei/go-restful
|
||||
version: ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
subpackages:
|
||||
- log
|
||||
- name: github.com/emicklei/go-restful-swagger12
|
||||
version: dcef7f55730566d41eae5db10e7d6981829720f6
|
||||
- name: github.com/ghodss/yaml
|
||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
- name: github.com/go-ini/ini
|
||||
version: c787282c39ac1fc618827141a1f762240def08a3
|
||||
- name: github.com/go-openapi/analysis
|
||||
version: b44dc874b601d9e4e2f6e19140e794ba24bead3b
|
||||
- name: github.com/go-openapi/jsonpointer
|
||||
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
|
||||
- name: github.com/go-openapi/jsonreference
|
||||
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
|
||||
- name: github.com/go-openapi/loads
|
||||
version: 18441dfa706d924a39a030ee2c3b1d8d81917b38
|
||||
- name: github.com/go-openapi/spec
|
||||
version: 6aced65f8501fe1217321abf0749d354824ba2ff
|
||||
- name: github.com/go-openapi/swag
|
||||
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
||||
version: d58d458bec3cb5adec4b7ddb41131855eac0b33f
|
||||
- name: github.com/gogo/protobuf
|
||||
version: c0656edd0d9eab7c66d1eb0c568f9039345796f7
|
||||
subpackages:
|
||||
|
|
@ -73,83 +52,125 @@ imports:
|
|||
- sortkeys
|
||||
- name: github.com/golang/glog
|
||||
version: 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
- name: github.com/golang/protobuf
|
||||
version: b4deda0973fb4c70b50d226b1af49f3da59f5265
|
||||
subpackages:
|
||||
- proto
|
||||
- ptypes
|
||||
- ptypes/any
|
||||
- ptypes/duration
|
||||
- ptypes/timestamp
|
||||
- name: github.com/google/btree
|
||||
version: 7d79101e329e5a3adf994758c578dab82b90c017
|
||||
- name: github.com/google/gofuzz
|
||||
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||
- name: github.com/googleapis/gnostic
|
||||
version: 0c5108395e2debce0d731cf0287ddf7242066aba
|
||||
subpackages:
|
||||
- OpenAPIv2
|
||||
- compiler
|
||||
- extensions
|
||||
- name: github.com/gregjones/httpcache
|
||||
version: 787624de3eb7bd915c329cba748687a3b22666a6
|
||||
subpackages:
|
||||
- diskcache
|
||||
- name: github.com/hashicorp/golang-lru
|
||||
version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
|
||||
subpackages:
|
||||
- simplelru
|
||||
- name: github.com/howeyc/gopass
|
||||
version: bf9dde6d0d2c004a008c27aaee91170c786f6db8
|
||||
- name: github.com/imdario/mergo
|
||||
version: 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
- name: github.com/jmespath/go-jmespath
|
||||
version: bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
- name: github.com/juju/ratelimit
|
||||
version: 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
||||
version: c2b33e8439af944379acbdd9c3a5fe0bc44bd8a5
|
||||
- name: github.com/json-iterator/go
|
||||
version: f2b4162afba35581b6d4a50d3b8f34e33c144682
|
||||
- name: github.com/kr/text
|
||||
version: 7cafcd837844e784b526369c9bce262804aebc60
|
||||
version: e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f
|
||||
- name: github.com/lib/pq
|
||||
version: b77235e3890a962fe8a6f8c4c7198679ca7814e7
|
||||
version: 90697d60dd844d5ef6ff15135d0203f65d2f53b8
|
||||
subpackages:
|
||||
- oid
|
||||
- name: github.com/mailru/easyjson
|
||||
version: d5b7844b561a7bc640052f1b935f7b800330d7e0
|
||||
subpackages:
|
||||
- buffer
|
||||
- jlexer
|
||||
- jwriter
|
||||
- name: github.com/modern-go/concurrent
|
||||
version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94
|
||||
- name: github.com/modern-go/reflect2
|
||||
version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
|
||||
- name: github.com/mohae/deepcopy
|
||||
version: c48cc78d482608239f6c4c92a4abd87eb8761c90
|
||||
- name: github.com/motomux/pretty
|
||||
version: b2aad2c9a95d14eb978f29baa6e3a5c3c20eef30
|
||||
- name: github.com/PuerkitoBio/purell
|
||||
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
|
||||
- name: github.com/PuerkitoBio/urlesc
|
||||
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
|
||||
- name: github.com/peterbourgon/diskv
|
||||
version: 5f041e8faa004a95c88a202771f4cc3e991971e6
|
||||
- name: github.com/Sirupsen/logrus
|
||||
version: c155da19408a8799da419ed3eeb0cb5db0ad5dbc
|
||||
version: 3e01752db0189b9157070a0e1668a620f9a85da2
|
||||
- name: github.com/spf13/pflag
|
||||
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
||||
- name: github.com/ugorji/go
|
||||
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
|
||||
subpackages:
|
||||
- codec
|
||||
version: 583c0c0531f06d5278b7d917446061adc344b5cd
|
||||
- name: golang.org/x/crypto
|
||||
version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
|
||||
version: c126467f60eb25f8f27e5a981f32a87e3965053f
|
||||
subpackages:
|
||||
- ssh/terminal
|
||||
- name: golang.org/x/net
|
||||
version: f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- context
|
||||
- http2
|
||||
- http2/hpack
|
||||
- idna
|
||||
- lex/httplex
|
||||
- name: golang.org/x/sys
|
||||
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
|
||||
version: 95c6576299259db960f6c5b9b69ea52422860fce
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
- name: golang.org/x/text
|
||||
version: 2910a502d2bf9e43193af9d68ca516529614eed3
|
||||
version: b19bf474d317b857955b12035d2c5acb57ce8b01
|
||||
subpackages:
|
||||
- cases
|
||||
- internal/tag
|
||||
- language
|
||||
- runes
|
||||
- secure/bidirule
|
||||
- secure/precis
|
||||
- transform
|
||||
- unicode/bidi
|
||||
- unicode/norm
|
||||
- width
|
||||
- name: golang.org/x/time
|
||||
version: f51c12702a4d776e4c1fa9b0fabab841babae631
|
||||
subpackages:
|
||||
- rate
|
||||
- name: gopkg.in/inf.v0
|
||||
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
|
||||
version: 5420a8b6744d3b0345ab293f6fcba19c978f1183
|
||||
- name: k8s.io/api
|
||||
version: 2d6f90ab1293a1fb871cf149423ebb72aa7423aa
|
||||
subpackages:
|
||||
- admissionregistration/v1alpha1
|
||||
- admissionregistration/v1beta1
|
||||
- apps/v1
|
||||
- apps/v1beta1
|
||||
- apps/v1beta2
|
||||
- authentication/v1
|
||||
- authentication/v1beta1
|
||||
- authorization/v1
|
||||
- authorization/v1beta1
|
||||
- autoscaling/v1
|
||||
- autoscaling/v2beta1
|
||||
- batch/v1
|
||||
- batch/v1beta1
|
||||
- batch/v2alpha1
|
||||
- certificates/v1beta1
|
||||
- core/v1
|
||||
- events/v1beta1
|
||||
- extensions/v1beta1
|
||||
- imagepolicy/v1alpha1
|
||||
- networking/v1
|
||||
- policy/v1beta1
|
||||
- rbac/v1
|
||||
- rbac/v1alpha1
|
||||
- rbac/v1beta1
|
||||
- scheduling/v1alpha1
|
||||
- scheduling/v1beta1
|
||||
- settings/v1alpha1
|
||||
- storage/v1
|
||||
- storage/v1alpha1
|
||||
- storage/v1beta1
|
||||
- name: k8s.io/apiextensions-apiserver
|
||||
version: fcd622fe88a4a6efcb5aea9e94ee87324ac1b036
|
||||
version: cc9cd5d998df84cc405d398e9030d29c95acff18
|
||||
subpackages:
|
||||
- pkg/apis/apiextensions
|
||||
- pkg/apis/apiextensions/v1beta1
|
||||
|
|
@ -157,24 +178,19 @@ imports:
|
|||
- pkg/client/clientset/clientset/scheme
|
||||
- pkg/client/clientset/clientset/typed/apiextensions/v1beta1
|
||||
- name: k8s.io/apimachinery
|
||||
version: 1cb2cdd78d38df243e686d1b572b76e190469842
|
||||
version: 103fd098999dc9c0c88536f5c9ad2e5da39373ae
|
||||
subpackages:
|
||||
- pkg/api/equality
|
||||
- pkg/api/errors
|
||||
- pkg/api/meta
|
||||
- pkg/api/resource
|
||||
- pkg/apimachinery
|
||||
- pkg/apimachinery/announced
|
||||
- pkg/apimachinery/registered
|
||||
- pkg/apis/meta/internalversion
|
||||
- pkg/apis/meta/v1
|
||||
- pkg/apis/meta/v1/unstructured
|
||||
- pkg/apis/meta/v1alpha1
|
||||
- pkg/apis/meta/v1beta1
|
||||
- pkg/conversion
|
||||
- pkg/conversion/queryparams
|
||||
- pkg/conversion/unstructured
|
||||
- pkg/fields
|
||||
- pkg/labels
|
||||
- pkg/openapi
|
||||
- pkg/runtime
|
||||
- pkg/runtime/schema
|
||||
- pkg/runtime/serializer
|
||||
|
|
@ -194,85 +210,65 @@ imports:
|
|||
- pkg/util/httpstream/spdy
|
||||
- pkg/util/intstr
|
||||
- pkg/util/json
|
||||
- pkg/util/mergepatch
|
||||
- pkg/util/net
|
||||
- pkg/util/rand
|
||||
- pkg/util/remotecommand
|
||||
- pkg/util/runtime
|
||||
- pkg/util/sets
|
||||
- pkg/util/strategicpatch
|
||||
- pkg/util/validation
|
||||
- pkg/util/validation/field
|
||||
- pkg/util/wait
|
||||
- pkg/util/yaml
|
||||
- pkg/version
|
||||
- pkg/watch
|
||||
- third_party/forked/golang/json
|
||||
- third_party/forked/golang/netutil
|
||||
- third_party/forked/golang/reflect
|
||||
- name: k8s.io/client-go
|
||||
version: d92e8497f71b7b4e0494e5bd204b48d34bd6f254
|
||||
version: 1f13a808da65775f22cbf47862c4e5898d8f4ca1
|
||||
subpackages:
|
||||
- discovery
|
||||
- discovery/fake
|
||||
- kubernetes
|
||||
- kubernetes/scheme
|
||||
- kubernetes/typed/admissionregistration/v1alpha1
|
||||
- kubernetes/typed/admissionregistration/v1beta1
|
||||
- kubernetes/typed/apps/v1
|
||||
- kubernetes/typed/apps/v1beta1
|
||||
- kubernetes/typed/apps/v1beta2
|
||||
- kubernetes/typed/authentication/v1
|
||||
- kubernetes/typed/authentication/v1beta1
|
||||
- kubernetes/typed/authorization/v1
|
||||
- kubernetes/typed/authorization/v1beta1
|
||||
- kubernetes/typed/autoscaling/v1
|
||||
- kubernetes/typed/autoscaling/v2alpha1
|
||||
- kubernetes/typed/autoscaling/v2beta1
|
||||
- kubernetes/typed/batch/v1
|
||||
- kubernetes/typed/batch/v1beta1
|
||||
- kubernetes/typed/batch/v2alpha1
|
||||
- kubernetes/typed/certificates/v1beta1
|
||||
- kubernetes/typed/core/v1
|
||||
- kubernetes/typed/events/v1beta1
|
||||
- kubernetes/typed/extensions/v1beta1
|
||||
- kubernetes/typed/networking/v1
|
||||
- kubernetes/typed/policy/v1beta1
|
||||
- kubernetes/typed/rbac/v1
|
||||
- kubernetes/typed/rbac/v1alpha1
|
||||
- kubernetes/typed/rbac/v1beta1
|
||||
- kubernetes/typed/scheduling/v1alpha1
|
||||
- kubernetes/typed/scheduling/v1beta1
|
||||
- kubernetes/typed/settings/v1alpha1
|
||||
- kubernetes/typed/storage/v1
|
||||
- kubernetes/typed/storage/v1alpha1
|
||||
- kubernetes/typed/storage/v1beta1
|
||||
- pkg/api
|
||||
- pkg/api/v1
|
||||
- pkg/api/v1/ref
|
||||
- pkg/apis/admissionregistration
|
||||
- pkg/apis/admissionregistration/v1alpha1
|
||||
- pkg/apis/apps
|
||||
- pkg/apis/apps/v1beta1
|
||||
- pkg/apis/authentication
|
||||
- pkg/apis/authentication/v1
|
||||
- pkg/apis/authentication/v1beta1
|
||||
- pkg/apis/authorization
|
||||
- pkg/apis/authorization/v1
|
||||
- pkg/apis/authorization/v1beta1
|
||||
- pkg/apis/autoscaling
|
||||
- pkg/apis/autoscaling/v1
|
||||
- pkg/apis/autoscaling/v2alpha1
|
||||
- pkg/apis/batch
|
||||
- pkg/apis/batch/v1
|
||||
- pkg/apis/batch/v2alpha1
|
||||
- pkg/apis/certificates
|
||||
- pkg/apis/certificates/v1beta1
|
||||
- pkg/apis/extensions
|
||||
- pkg/apis/extensions/v1beta1
|
||||
- pkg/apis/networking
|
||||
- pkg/apis/networking/v1
|
||||
- pkg/apis/policy
|
||||
- pkg/apis/policy/v1beta1
|
||||
- pkg/apis/rbac
|
||||
- pkg/apis/rbac/v1alpha1
|
||||
- pkg/apis/rbac/v1beta1
|
||||
- pkg/apis/settings
|
||||
- pkg/apis/settings/v1alpha1
|
||||
- pkg/apis/storage
|
||||
- pkg/apis/storage/v1
|
||||
- pkg/apis/storage/v1beta1
|
||||
- pkg/util
|
||||
- pkg/util/parsers
|
||||
- pkg/apis/clientauthentication
|
||||
- pkg/apis/clientauthentication/v1alpha1
|
||||
- pkg/apis/clientauthentication/v1beta1
|
||||
- pkg/version
|
||||
- plugin/pkg/client/auth/exec
|
||||
- rest
|
||||
- rest/watch
|
||||
- testing
|
||||
- tools/auth
|
||||
- tools/cache
|
||||
- tools/clientcmd
|
||||
|
|
@ -280,11 +276,25 @@ imports:
|
|||
- tools/clientcmd/api/latest
|
||||
- tools/clientcmd/api/v1
|
||||
- tools/metrics
|
||||
- tools/pager
|
||||
- tools/reference
|
||||
- tools/remotecommand
|
||||
- transport
|
||||
- transport/spdy
|
||||
- util/buffer
|
||||
- util/cert
|
||||
- util/connrotation
|
||||
- util/exec
|
||||
- util/flowcontrol
|
||||
- util/homedir
|
||||
- util/integer
|
||||
- util/retry
|
||||
- name: k8s.io/code-generator
|
||||
version: 6702109cc68eb6fe6350b83e14407c8d7309fd1a
|
||||
- name: k8s.io/gengo
|
||||
version: 906d99f89cd644eecf75ab547b29bf9f876f0b59
|
||||
- name: k8s.io/kube-openapi
|
||||
version: 91cfa479c814065e420cee7ed227db0f63a5854e
|
||||
subpackages:
|
||||
- pkg/util/proto
|
||||
testImports: []
|
||||
|
|
|
|||
38
glide.yaml
38
glide.yaml
|
|
@ -10,38 +10,14 @@ import:
|
|||
- service/ec2
|
||||
- package: github.com/lib/pq
|
||||
- package: github.com/motomux/pretty
|
||||
- package: k8s.io/apiextensions-apiserver
|
||||
subpackages:
|
||||
- pkg/client/clientset/clientset
|
||||
- package: k8s.io/apimachinery
|
||||
version: release-1.7
|
||||
subpackages:
|
||||
- pkg/api/errors
|
||||
- pkg/api/resource
|
||||
- pkg/apis/meta/v1
|
||||
- pkg/labels
|
||||
- pkg/runtime
|
||||
- pkg/runtime/schema
|
||||
- pkg/runtime/serializer
|
||||
- pkg/types
|
||||
- pkg/util/intstr
|
||||
- pkg/util/remotecommand
|
||||
- pkg/watch
|
||||
version: kubernetes-1.11.3-beta.0
|
||||
- package: k8s.io/apiextensions-apiserver
|
||||
version: kubernetes-1.11.3-beta.0
|
||||
- package: k8s.io/client-go
|
||||
version: ^4.0.0
|
||||
subpackages:
|
||||
- kubernetes
|
||||
- kubernetes/scheme
|
||||
- kubernetes/typed/apps/v1beta1
|
||||
- kubernetes/typed/core/v1
|
||||
- kubernetes/typed/extensions/v1beta1
|
||||
- pkg/api
|
||||
- pkg/api/v1
|
||||
- pkg/apis/apps/v1beta1
|
||||
- pkg/apis/extensions/v1beta1
|
||||
- rest
|
||||
- tools/cache
|
||||
- tools/clientcmd
|
||||
- tools/remotecommand
|
||||
version: kubernetes-1.11.3-beta.0
|
||||
- package: k8s.io/code-generator
|
||||
version: kubernetes-1.11.3-beta.0
|
||||
- package: k8s.io/gengo
|
||||
- package: gopkg.in/yaml.v2
|
||||
- package: github.com/mohae/deepcopy
|
||||
|
|
|
|||
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
Copyright YEAR Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ${GOPATH}/src/k8s.io/code-generator)}
|
||||
|
||||
vendor/k8s.io/code-generator/generate-groups.sh all \
|
||||
github.com/zalando-incubator/postgres-operator/pkg/generated github.com/zalando-incubator/postgres-operator/pkg/apis \
|
||||
acid.zalan.do:v1 \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
DIFFROOT="${SCRIPT_ROOT}/pkg"
|
||||
TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg"
|
||||
_tmp="${SCRIPT_ROOT}/_tmp"
|
||||
|
||||
cleanup() {
|
||||
rm -rf "${_tmp}"
|
||||
}
|
||||
trap "cleanup" EXIT SIGINT
|
||||
|
||||
cleanup
|
||||
|
||||
mkdir -p "${TMP_DIFFROOT}"
|
||||
cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
|
||||
|
||||
"${SCRIPT_ROOT}/hack/update-codegen.sh"
|
||||
echo "diffing ${DIFFROOT} against freshly generated codegen"
|
||||
ret=0
|
||||
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
|
||||
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
|
||||
if [[ $ret -eq 0 ]]
|
||||
then
|
||||
echo "${DIFFROOT} up to date."
|
||||
else
|
||||
echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -25,6 +25,7 @@ data:
|
|||
# pam_role_name: zalandos
|
||||
# pam_configuration: |
|
||||
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
|
||||
aws_region: eu-central-1
|
||||
db_hosted_zone: db.example.com
|
||||
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}'
|
||||
replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}'
|
||||
|
|
|
|||
|
|
@ -123,6 +123,21 @@ rules:
|
|||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- "rbac.authorization.k8s.io"
|
||||
resources:
|
||||
- clusterroles
|
||||
verbs:
|
||||
- bind
|
||||
resourceNames:
|
||||
- zalando-postgres-operator
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ spec:
|
|||
serviceAccountName: zalando-postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:1352c4a
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.0.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
# provided additional ENV vars can overwrite individual config map entries
|
||||
|
|
|
|||
|
|
@ -0,0 +1,83 @@
|
|||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: OperatorConfiguration
|
||||
metadata:
|
||||
name: postgresql-operator-default-configuration
|
||||
configuration:
|
||||
etcd_host: ""
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8
|
||||
workers: 4
|
||||
min_instances: -1
|
||||
max_instances: -1
|
||||
resync_period: 30m
|
||||
repair_period: 5m
|
||||
|
||||
#sidecar_docker_images:
|
||||
# example: "exampleimage:exampletag"
|
||||
users:
|
||||
super_username: postgres
|
||||
replication_username: standby
|
||||
kubernetes:
|
||||
pod_service_account_name: operator
|
||||
pod_terminate_grace_period: 5m
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
oauth_token_secret_name: postgresql-operator
|
||||
pod_role_label: spilo-role
|
||||
cluster_labels:
|
||||
application: spilo
|
||||
cluster_name_label: cluster-name
|
||||
# watched_namespace:""
|
||||
# node_readiness_label: ""
|
||||
# toleration: {}
|
||||
# infrastructure_roles_secret_name: ""
|
||||
# pod_environment_configmap: ""
|
||||
postgres_pod_resources:
|
||||
default_cpu_request: 100m
|
||||
default_memory_request: 100Mi
|
||||
default_cpu_limit: "3"
|
||||
default_memory_limit: 1Gi
|
||||
timeouts:
|
||||
resource_check_interval: 3s
|
||||
resource_check_timeout: 10m
|
||||
pod_label_wait_timeout: 10m
|
||||
pod_deletion_wait_timeout: 10m
|
||||
ready_wait_interval: 4s
|
||||
ready_wait_timeout: 30s
|
||||
load_balancer:
|
||||
enable_master_load_balancer: false
|
||||
enable_replica_load_balancer: false
|
||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
aws_or_gcp:
|
||||
# db_hosted_zone: ""
|
||||
# wal_s3_bucket: ""
|
||||
# log_s3_bucket: ""
|
||||
# kube_iam_role: ""
|
||||
aws_region: eu-central-1
|
||||
debug:
|
||||
debug_logging: true
|
||||
enable_database_access: true
|
||||
teams_api:
|
||||
enable_teams_api: false
|
||||
team_api_role_configuration:
|
||||
log_statement: all
|
||||
enable_team_superuser: false
|
||||
team_admin_role: admin
|
||||
pam_role_name: zalandos
|
||||
# pam_configuration: ""
|
||||
protected_role_names:
|
||||
- admin
|
||||
# teams_api_url: ""
|
||||
logging_rest_api:
|
||||
api_port: 8008
|
||||
ring_log_lines: 100
|
||||
cluster_history_entries: 1000
|
||||
scalyr:
|
||||
scalyr_cpu_request: 100m
|
||||
scalyr_memory_request: 50Mi
|
||||
scalyr_cpu_limit: "1"
|
||||
scalyr_memory_limit: 1Gi
|
||||
# scalyr_api_key: ""
|
||||
# scalyr_image: ""
|
||||
# scalyr_server_url: ""
|
||||
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
package acidzalando
|
||||
|
||||
const (
|
||||
// GroupName is the group name for the operator CRDs
|
||||
GroupName = "acid.zalan.do"
|
||||
)
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
package v1
|
||||
|
||||
const (
|
||||
serviceNameMaxLength = 63
|
||||
clusterNameMaxLength = serviceNameMaxLength - len("-repl")
|
||||
serviceNameRegexString = `^[a-z]([-a-z0-9]*[a-z0-9])?$`
|
||||
|
||||
ClusterStatusUnknown PostgresStatus = ""
|
||||
ClusterStatusCreating PostgresStatus = "Creating"
|
||||
ClusterStatusUpdating PostgresStatus = "Updating"
|
||||
ClusterStatusUpdateFailed PostgresStatus = "UpdateFailed"
|
||||
ClusterStatusSyncFailed PostgresStatus = "SyncFailed"
|
||||
ClusterStatusAddFailed PostgresStatus = "CreateFailed"
|
||||
ClusterStatusRunning PostgresStatus = "Running"
|
||||
ClusterStatusInvalid PostgresStatus = "Invalid"
|
||||
)
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
// +k8s:deepcopy-gen=package,register
|
||||
|
||||
// Package v1 is the v1 version of the API.
|
||||
// +groupName=acid.zalan.do
|
||||
|
||||
package v1
|
||||
|
|
@ -0,0 +1,130 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type postgresqlCopy Postgresql
|
||||
|
||||
// MarshalJSON converts a maintenance window definition to JSON.
|
||||
func (m *MaintenanceWindow) MarshalJSON() ([]byte, error) {
|
||||
if m.Everyday {
|
||||
return []byte(fmt.Sprintf("\"%s-%s\"",
|
||||
m.StartTime.Format("15:04"),
|
||||
m.EndTime.Format("15:04"))), nil
|
||||
}
|
||||
|
||||
return []byte(fmt.Sprintf("\"%s:%s-%s\"",
|
||||
m.Weekday.String()[:3],
|
||||
m.StartTime.Format("15:04"),
|
||||
m.EndTime.Format("15:04"))), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON converts a JSON to the maintenance window definition.
|
||||
func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error {
|
||||
var (
|
||||
got MaintenanceWindow
|
||||
err error
|
||||
)
|
||||
|
||||
parts := strings.Split(string(data[1:len(data)-1]), "-")
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("incorrect maintenance window format")
|
||||
}
|
||||
|
||||
fromParts := strings.Split(parts[0], ":")
|
||||
switch len(fromParts) {
|
||||
case 3:
|
||||
got.Everyday = false
|
||||
got.Weekday, err = parseWeekday(fromParts[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse weekday: %v", err)
|
||||
}
|
||||
|
||||
got.StartTime, err = parseTime(fromParts[1] + ":" + fromParts[2])
|
||||
case 2:
|
||||
got.Everyday = true
|
||||
got.StartTime, err = parseTime(fromParts[0] + ":" + fromParts[1])
|
||||
default:
|
||||
return fmt.Errorf("incorrect maintenance window format")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse start time: %v", err)
|
||||
}
|
||||
|
||||
got.EndTime, err = parseTime(parts[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse end time: %v", err)
|
||||
}
|
||||
|
||||
if got.EndTime.Before(&got.StartTime) {
|
||||
return fmt.Errorf("'From' time must be prior to the 'To' time")
|
||||
}
|
||||
|
||||
*m = got
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON converts a JSON into the PostgreSQL object.
|
||||
func (p *Postgresql) UnmarshalJSON(data []byte) error {
|
||||
var tmp postgresqlCopy
|
||||
|
||||
err := json.Unmarshal(data, &tmp)
|
||||
if err != nil {
|
||||
metaErr := json.Unmarshal(data, &tmp.ObjectMeta)
|
||||
if metaErr != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp.Error = err.Error()
|
||||
tmp.Status = ClusterStatusInvalid
|
||||
|
||||
*p = Postgresql(tmp)
|
||||
|
||||
return nil
|
||||
}
|
||||
tmp2 := Postgresql(tmp)
|
||||
|
||||
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
|
||||
tmp2.Error = err.Error()
|
||||
tmp2.Status = ClusterStatusInvalid
|
||||
} else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil {
|
||||
tmp2.Error = err.Error()
|
||||
tmp2.Status = ClusterStatusInvalid
|
||||
} else {
|
||||
tmp2.Spec.ClusterName = clusterName
|
||||
}
|
||||
|
||||
*p = tmp2
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||
var (
|
||||
v interface{}
|
||||
err error
|
||||
)
|
||||
if err = json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
switch val := v.(type) {
|
||||
case string:
|
||||
t, err := time.ParseDuration(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = Duration(t)
|
||||
return nil
|
||||
case float64:
|
||||
t := time.Duration(val)
|
||||
*d = Duration(t)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("could not recognize type %T as a valid type to unmarshal to Duration", val)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,150 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:onlyVerbs=get
|
||||
// +genclient:noStatus
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type OperatorConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Configuration OperatorConfigurationData `json:"configuration"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type OperatorConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []OperatorConfiguration `json:"items"`
|
||||
}
|
||||
|
||||
type PostgresUsersConfiguration struct {
|
||||
SuperUsername string `json:"super_username,omitempty"`
|
||||
ReplicationUsername string `json:"replication_username,omitempty"`
|
||||
}
|
||||
|
||||
type KubernetesMetaConfiguration struct {
|
||||
PodServiceAccountName string `json:"pod_service_account_name,omitempty"`
|
||||
// TODO: change it to the proper json
|
||||
PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"`
|
||||
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
|
||||
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
|
||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
|
||||
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
|
||||
InfrastructureRolesSecretName spec.NamespacedName `json:"infrastructure_roles_secret_name,omitempty"`
|
||||
PodRoleLabel string `json:"pod_role_label,omitempty"`
|
||||
ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
|
||||
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
|
||||
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
|
||||
// TODO: use a proper toleration structure?
|
||||
PodToleration map[string]string `json:"toleration,omitempty"`
|
||||
// TODO: use namespacedname
|
||||
PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"`
|
||||
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
||||
}
|
||||
|
||||
type PostgresPodResourcesDefaults struct {
|
||||
DefaultCPURequest string `json:"default_cpu_request,omitempty"`
|
||||
DefaultMemoryRequest string `json:"default_memory_request,omitempty"`
|
||||
DefaultCPULimit string `json:"default_cpu_limit,omitempty"`
|
||||
DefaultMemoryLimit string `json:"default_memory_limit,omitempty"`
|
||||
}
|
||||
|
||||
type OperatorTimeouts struct {
|
||||
ResourceCheckInterval Duration `json:"resource_check_interval,omitempty"`
|
||||
ResourceCheckTimeout Duration `json:"resource_check_timeout,omitempty"`
|
||||
PodLabelWaitTimeout Duration `json:"pod_label_wait_timeout,omitempty"`
|
||||
PodDeletionWaitTimeout Duration `json:"pod_deletion_wait_timeout,omitempty"`
|
||||
ReadyWaitInterval Duration `json:"ready_wait_interval,omitempty"`
|
||||
ReadyWaitTimeout Duration `json:"ready_wait_timeout,omitempty"`
|
||||
}
|
||||
|
||||
type LoadBalancerConfiguration struct {
|
||||
DbHostedZone string `json:"db_hosted_zone,omitempty"`
|
||||
EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"`
|
||||
EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"`
|
||||
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
|
||||
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
|
||||
}
|
||||
|
||||
type AWSGCPConfiguration struct {
|
||||
WALES3Bucket string `json:"wal_s3_bucket,omitempty"`
|
||||
AWSRegion string `json:"aws_region,omitempty"`
|
||||
LogS3Bucket string `json:"log_s3_bucket,omitempty"`
|
||||
KubeIAMRole string `json:"kube_iam_role,omitempty"`
|
||||
}
|
||||
|
||||
type OperatorDebugConfiguration struct {
|
||||
DebugLogging bool `json:"debug_logging,omitempty"`
|
||||
EnableDBAccess bool `json:"enable_database_access,omitempty"`
|
||||
}
|
||||
|
||||
type TeamsAPIConfiguration struct {
|
||||
EnableTeamsAPI bool `json:"enable_teams_api,omitempty"`
|
||||
TeamsAPIUrl string `json:"teams_api_url,omitempty"`
|
||||
TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"`
|
||||
EnableTeamSuperuser bool `json:"enable_team_superuser,omitempty"`
|
||||
TeamAdminRole string `json:"team_admin_role,omitempty"`
|
||||
PamRoleName string `json:"pam_role_name,omitempty"`
|
||||
PamConfiguration string `json:"pam_configuration,omitempty"`
|
||||
ProtectedRoles []string `json:"protected_role_names,omitempty"`
|
||||
PostgresSuperuserTeams []string `json:"postgres_superuser_teams,omitempty"`
|
||||
}
|
||||
|
||||
type LoggingRESTAPIConfiguration struct {
|
||||
APIPort int `json:"api_port,omitempty"`
|
||||
RingLogLines int `json:"ring_log_lines,omitempty"`
|
||||
ClusterHistoryEntries int `json:"cluster_history_entries,omitempty"`
|
||||
}
|
||||
|
||||
type ScalyrConfiguration struct {
|
||||
ScalyrAPIKey string `json:"scalyr_api_key,omitempty"`
|
||||
ScalyrImage string `json:"scalyr_image,omitempty"`
|
||||
ScalyrServerURL string `json:"scalyr_server_url,omitempty"`
|
||||
ScalyrCPURequest string `json:"scalyr_cpu_request,omitempty"`
|
||||
ScalyrMemoryRequest string `json:"scalyr_memory_request,omitempty"`
|
||||
ScalyrCPULimit string `json:"scalyr_cpu_limit,omitempty"`
|
||||
ScalyrMemoryLimit string `json:"scalyr_memory_limit,omitempty"`
|
||||
}
|
||||
|
||||
type OperatorConfigurationData struct {
|
||||
EtcdHost string `json:"etcd_host,omitempty"`
|
||||
DockerImage string `json:"docker_image,omitempty"`
|
||||
Workers uint32 `json:"workers,omitempty"`
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
|
||||
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
||||
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
||||
PostgresPodResources PostgresPodResourcesDefaults `json:"postgres_pod_resources"`
|
||||
Timeouts OperatorTimeouts `json:"timeouts"`
|
||||
LoadBalancer LoadBalancerConfiguration `json:"load_balancer"`
|
||||
AWSGCP AWSGCPConfiguration `json:"aws_or_gcp"`
|
||||
OperatorDebug OperatorDebugConfiguration `json:"debug"`
|
||||
TeamsAPI TeamsAPIConfiguration `json:"teams_api"`
|
||||
LoggingRESTAPI LoggingRESTAPIConfiguration `json:"logging_rest_api"`
|
||||
Scalyr ScalyrConfiguration `json:"scalyr"`
|
||||
}
|
||||
|
||||
type OperatorConfigurationUsers struct {
|
||||
SuperUserName string `json:"superuser_name,omitempty"`
|
||||
Replication string `json:"replication_user_name,omitempty"`
|
||||
ProtectedRoles []string `json:"protected_roles,omitempty"`
|
||||
TeamAPIRoleConfiguration map[string]string `json:"team_api_role_configuration,omitempty"`
|
||||
}
|
||||
|
||||
type Duration time.Duration
|
||||
|
|
@ -0,0 +1,127 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
//Postgresql defines PostgreSQL Custom Resource Definition Object.
|
||||
type Postgresql struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec PostgresSpec `json:"spec"`
|
||||
Status PostgresStatus `json:"status,omitempty"`
|
||||
Error string `json:"-"`
|
||||
}
|
||||
|
||||
// PostgresSpec defines the specification for the PostgreSQL TPR.
|
||||
type PostgresSpec struct {
|
||||
PostgresqlParam `json:"postgresql"`
|
||||
Volume `json:"volume,omitempty"`
|
||||
Patroni `json:"patroni,omitempty"`
|
||||
Resources `json:"resources,omitempty"`
|
||||
|
||||
TeamID string `json:"teamId"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
|
||||
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
||||
// in that case the var evaluates to nil and the value is taken from the operator config
|
||||
EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"`
|
||||
EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"`
|
||||
|
||||
// deprecated load balancer settings maintained for backward compatibility
|
||||
// see "Load balancers" operator docs
|
||||
UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"`
|
||||
ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"`
|
||||
|
||||
// load balancers' source ranges are the same for master and replica services
|
||||
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
||||
|
||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||
Users map[string]UserFlags `json:"users"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||
Clone CloneDescription `json:"clone"`
|
||||
ClusterName string `json:"-"`
|
||||
Databases map[string]string `json:"databases,omitempty"`
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
Sidecars []Sidecar `json:"sidecars,omitempty"`
|
||||
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// PostgresqlList defines a list of PostgreSQL clusters.
|
||||
type PostgresqlList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []Postgresql `json:"items"`
|
||||
}
|
||||
|
||||
// MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster.
|
||||
type MaintenanceWindow struct {
|
||||
Everyday bool
|
||||
Weekday time.Weekday
|
||||
StartTime metav1.Time // Start time
|
||||
EndTime metav1.Time // End time
|
||||
}
|
||||
|
||||
// Volume describes a single volume in the manifest.
|
||||
type Volume struct {
|
||||
Size string `json:"size"`
|
||||
StorageClass string `json:"storageClass"`
|
||||
}
|
||||
|
||||
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
|
||||
type PostgresqlParam struct {
|
||||
PgVersion string `json:"version"`
|
||||
Parameters map[string]string `json:"parameters"`
|
||||
}
|
||||
|
||||
// ResourceDescription describes CPU and memory resources defined for a cluster.
|
||||
type ResourceDescription struct {
|
||||
CPU string `json:"cpu"`
|
||||
Memory string `json:"memory"`
|
||||
}
|
||||
|
||||
// Resources describes requests and limits for the cluster resouces.
|
||||
type Resources struct {
|
||||
ResourceRequest ResourceDescription `json:"requests,omitempty"`
|
||||
ResourceLimits ResourceDescription `json:"limits,omitempty"`
|
||||
}
|
||||
|
||||
// Patroni contains Patroni-specific configuration
|
||||
type Patroni struct {
|
||||
InitDB map[string]string `json:"initdb"`
|
||||
PgHba []string `json:"pg_hba"`
|
||||
TTL uint32 `json:"ttl"`
|
||||
LoopWait uint32 `json:"loop_wait"`
|
||||
RetryTimeout uint32 `json:"retry_timeout"`
|
||||
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
|
||||
}
|
||||
|
||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||
type CloneDescription struct {
|
||||
ClusterName string `json:"cluster,omitempty"`
|
||||
UID string `json:"uid,omitempty"`
|
||||
EndTimestamp string `json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
// Sidecar defines a container to be run in the same pod as the Postgres container.
|
||||
type Sidecar struct {
|
||||
Resources `json:"resources,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
DockerImage string `json:"image,omitempty"`
|
||||
Ports []v1.ContainerPort `json:"ports,omitempty"`
|
||||
Env []v1.EnvVar `json:"env,omitempty"`
|
||||
}
|
||||
|
||||
// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users
|
||||
type UserFlags []string
|
||||
|
||||
// PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.)
|
||||
type PostgresStatus string
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do"
|
||||
)
|
||||
|
||||
const (
|
||||
PostgresCRDResourceKind = "postgresql"
|
||||
PostgresCRDResourcePlural = "postgresqls"
|
||||
PostgresCRDResouceName = PostgresCRDResourcePlural + "." + acidzalando.GroupName
|
||||
PostgresCRDResourceShort = "pg"
|
||||
|
||||
OperatorConfigCRDResouceKind = "OperatorConfiguration"
|
||||
OperatorConfigCRDResourcePlural = "operatorconfigurations"
|
||||
OperatorConfigCRDResourceName = OperatorConfigCRDResourcePlural + "." + acidzalando.GroupName
|
||||
OperatorConfigCRDResourceShort = "opconfig"
|
||||
|
||||
APIVersion = "v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
SchemeGroupVersion = schema.GroupVersion{Group: acidzalando.GroupName, Version: APIVersion}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes)
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
// Adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
// AddKnownType assumes derives the type kind from the type name, which is always uppercase.
|
||||
// For our CRDs we use lowercase names historically, therefore we have to supply the name separately.
|
||||
// TODO: User uppercase CRDResourceKind of our types in the next major API version
|
||||
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresql"), &Postgresql{})
|
||||
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("postgresqlList"), &PostgresqlList{})
|
||||
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfiguration"),
|
||||
&OperatorConfiguration{})
|
||||
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("OperatorConfigurationList"),
|
||||
&OperatorConfigurationList{})
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,94 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
weekdays = map[string]int{"Sun": 0, "Mon": 1, "Tue": 2, "Wed": 3, "Thu": 4, "Fri": 5, "Sat": 6}
|
||||
serviceNameRegex = regexp.MustCompile(serviceNameRegexString)
|
||||
)
|
||||
|
||||
func (p *Postgresql) Clone() *Postgresql {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return p.DeepCopy()
|
||||
}
|
||||
|
||||
func parseTime(s string) (metav1.Time, error) {
|
||||
parts := strings.Split(s, ":")
|
||||
if len(parts) != 2 {
|
||||
return metav1.Time{}, fmt.Errorf("incorrect time format")
|
||||
}
|
||||
timeLayout := "15:04"
|
||||
|
||||
tp, err := time.Parse(timeLayout, s)
|
||||
if err != nil {
|
||||
return metav1.Time{}, err
|
||||
}
|
||||
|
||||
return metav1.Time{Time: tp.UTC()}, nil
|
||||
}
|
||||
|
||||
func parseWeekday(s string) (time.Weekday, error) {
|
||||
weekday, ok := weekdays[s]
|
||||
if !ok {
|
||||
return time.Weekday(0), fmt.Errorf("incorrect weekday")
|
||||
}
|
||||
|
||||
return time.Weekday(weekday), nil
|
||||
}
|
||||
|
||||
func extractClusterName(clusterName string, teamName string) (string, error) {
|
||||
teamNameLen := len(teamName)
|
||||
if len(clusterName) < teamNameLen+2 {
|
||||
return "", fmt.Errorf("cluster name must match {TEAM}-{NAME} format. Got cluster name '%v', team name '%v'", clusterName, teamName)
|
||||
}
|
||||
|
||||
if teamNameLen == 0 {
|
||||
return "", fmt.Errorf("team name is empty")
|
||||
}
|
||||
|
||||
if strings.ToLower(clusterName[:teamNameLen+1]) != strings.ToLower(teamName)+"-" {
|
||||
return "", fmt.Errorf("name must match {TEAM}-{NAME} format")
|
||||
}
|
||||
if len(clusterName) > clusterNameMaxLength {
|
||||
return "", fmt.Errorf("name cannot be longer than %d characters", clusterNameMaxLength)
|
||||
}
|
||||
if !serviceNameRegex.MatchString(clusterName) {
|
||||
return "", fmt.Errorf("name must confirm to DNS-1035, regex used for validation is %q",
|
||||
serviceNameRegexString)
|
||||
}
|
||||
|
||||
return clusterName[teamNameLen+1:], nil
|
||||
}
|
||||
|
||||
func validateCloneClusterDescription(clone *CloneDescription) error {
|
||||
// when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name
|
||||
if clone.ClusterName != "" && clone.EndTimestamp == "" {
|
||||
if !serviceNameRegex.MatchString(clone.ClusterName) {
|
||||
return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q",
|
||||
serviceNameRegexString)
|
||||
}
|
||||
if len(clone.ClusterName) > serviceNameMaxLength {
|
||||
return fmt.Errorf("clone cluster name must be no longer than %d characters", serviceNameMaxLength)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (status PostgresStatus) Success() bool {
|
||||
return status != ClusterStatusAddFailed &&
|
||||
status != ClusterStatusUpdateFailed &&
|
||||
status != ClusterStatusSyncFailed
|
||||
}
|
||||
|
||||
func (status PostgresStatus) String() string {
|
||||
return string(status)
|
||||
}
|
||||
|
|
@ -1,26 +1,27 @@
|
|||
package spec
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var parseTimeTests = []struct {
|
||||
in string
|
||||
out time.Time
|
||||
out metav1.Time
|
||||
err error
|
||||
}{
|
||||
{"16:08", mustParseTime("16:08"), nil},
|
||||
{"11:00", mustParseTime("11:00"), nil},
|
||||
{"23:59", mustParseTime("23:59"), nil},
|
||||
|
||||
{"26:09", time.Now(), errors.New(`parsing time "26:09": hour out of range`)},
|
||||
{"23:69", time.Now(), errors.New(`parsing time "23:69": minute out of range`)},
|
||||
{"26:09", metav1.Now(), errors.New(`parsing time "26:09": hour out of range`)},
|
||||
{"23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)},
|
||||
}
|
||||
|
||||
var parseWeekdayTests = []struct {
|
||||
|
|
@ -49,8 +50,11 @@ var clusterNames = []struct {
|
|||
{"acid-test", "test", "", errors.New("name must match {TEAM}-{NAME} format")},
|
||||
{"-test", "", "", errors.New("team name is empty")},
|
||||
{"-test", "-", "", errors.New("name must match {TEAM}-{NAME} format")},
|
||||
{"", "-", "", errors.New("name is too short")},
|
||||
{"-", "-", "", errors.New("name is too short")},
|
||||
{"", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '', team name '-'")},
|
||||
{"-", "-", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name '-', team name '-'")},
|
||||
// user may specify the team part of the full cluster name differently from the team name returned by the Teams API
|
||||
// in the case the actual Teams API name is long enough, this will fail the check
|
||||
{"foo-bar", "qwerty", "", errors.New("cluster name must match {TEAM}-{NAME} format. Got cluster name 'foo-bar', team name 'qwerty'")},
|
||||
}
|
||||
|
||||
var cloneClusterDescriptions = []struct {
|
||||
|
|
@ -125,13 +129,13 @@ var unmarshalCluster = []struct {
|
|||
Name: "acid-testcluster1",
|
||||
},
|
||||
Status: ClusterStatusInvalid,
|
||||
Error: &json.UnmarshalTypeError{
|
||||
Error: (&json.UnmarshalTypeError{
|
||||
Value: "number",
|
||||
Type: reflect.TypeOf(""),
|
||||
Offset: 126,
|
||||
Struct: "PostgresSpec",
|
||||
Field: "teamId",
|
||||
},
|
||||
}).Error(),
|
||||
},
|
||||
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil},
|
||||
{[]byte(`{
|
||||
|
|
@ -264,7 +268,7 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
ClusterName: "testcluster1",
|
||||
},
|
||||
Error: nil,
|
||||
Error: "",
|
||||
},
|
||||
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}}}`), nil},
|
||||
{
|
||||
|
|
@ -279,7 +283,7 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
Spec: PostgresSpec{TeamID: "acid"},
|
||||
Status: ClusterStatusInvalid,
|
||||
Error: errors.New("name must match {TEAM}-{NAME} format"),
|
||||
Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
|
||||
},
|
||||
[]byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":"Invalid"}`), nil},
|
||||
{
|
||||
|
|
@ -299,7 +303,7 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
ClusterName: "testcluster1",
|
||||
},
|
||||
Error: nil,
|
||||
Error: "",
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{"cluster":"team-batman"}}}`), err: nil},
|
||||
{[]byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1"`),
|
||||
|
|
@ -344,7 +348,7 @@ var postgresqlList = []struct {
|
|||
NumberOfInstances: 1,
|
||||
},
|
||||
Status: ClusterStatusRunning,
|
||||
Error: nil,
|
||||
Error: "",
|
||||
}},
|
||||
},
|
||||
nil},
|
||||
|
|
@ -352,13 +356,13 @@ var postgresqlList = []struct {
|
|||
PostgresqlList{},
|
||||
errors.New("unexpected end of JSON input")}}
|
||||
|
||||
func mustParseTime(s string) time.Time {
|
||||
func mustParseTime(s string) metav1.Time {
|
||||
v, err := time.Parse("15:04", s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return v.UTC()
|
||||
return metav1.Time{Time: v.UTC()}
|
||||
}
|
||||
|
||||
func TestParseTime(t *testing.T) {
|
||||
|
|
@ -509,25 +513,6 @@ func TestPostgresMeta(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalPostgresList(t *testing.T) {
|
||||
for _, tt := range postgresqlList {
|
||||
var list PostgresqlList
|
||||
err := list.UnmarshalJSON(tt.in)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("PostgresqlList unmarshal expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
continue
|
||||
} else if tt.err != nil {
|
||||
t.Errorf("Expected error: %v", tt.err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(list, tt.out) {
|
||||
t.Errorf("Postgresql list unmarshall expected: %#v, got: %#v", tt.out, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgresListMeta(t *testing.T) {
|
||||
for _, tt := range postgresqlList {
|
||||
if tt.err != nil {
|
||||
|
|
@ -549,7 +534,7 @@ func TestPostgresListMeta(t *testing.T) {
|
|||
func TestPostgresqlClone(t *testing.T) {
|
||||
for _, tt := range unmarshalCluster {
|
||||
cp := &tt.out
|
||||
cp.Error = nil
|
||||
cp.Error = ""
|
||||
clone := cp.Clone()
|
||||
if !reflect.DeepEqual(clone, cp) {
|
||||
t.Errorf("TestPostgresqlClone expected: \n%#v\n, got \n%#v", cp, clone)
|
||||
|
|
@ -0,0 +1,686 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AWSGCPConfiguration) DeepCopyInto(out *AWSGCPConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSGCPConfiguration.
|
||||
func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AWSGCPConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CloneDescription) DeepCopyInto(out *CloneDescription) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneDescription.
|
||||
func (in *CloneDescription) DeepCopy() *CloneDescription {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CloneDescription)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) {
|
||||
*out = *in
|
||||
out.OAuthTokenSecretName = in.OAuthTokenSecretName
|
||||
out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName
|
||||
if in.ClusterLabels != nil {
|
||||
in, out := &in.ClusterLabels, &out.ClusterLabels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.NodeReadinessLabel != nil {
|
||||
in, out := &in.NodeReadinessLabel, &out.NodeReadinessLabel
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.PodToleration != nil {
|
||||
in, out := &in.PodToleration, &out.PodToleration
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesMetaConfiguration.
|
||||
func (in *KubernetesMetaConfiguration) DeepCopy() *KubernetesMetaConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubernetesMetaConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LoadBalancerConfiguration) DeepCopyInto(out *LoadBalancerConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerConfiguration.
|
||||
func (in *LoadBalancerConfiguration) DeepCopy() *LoadBalancerConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LoadBalancerConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LoggingRESTAPIConfiguration) DeepCopyInto(out *LoggingRESTAPIConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingRESTAPIConfiguration.
|
||||
func (in *LoggingRESTAPIConfiguration) DeepCopy() *LoggingRESTAPIConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LoggingRESTAPIConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MaintenanceWindow) DeepCopyInto(out *MaintenanceWindow) {
|
||||
*out = *in
|
||||
in.StartTime.DeepCopyInto(&out.StartTime)
|
||||
in.EndTime.DeepCopyInto(&out.EndTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindow.
|
||||
func (in *MaintenanceWindow) DeepCopy() *MaintenanceWindow {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MaintenanceWindow)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Configuration.DeepCopyInto(&out.Configuration)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfiguration.
|
||||
func (in *OperatorConfiguration) DeepCopy() *OperatorConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OperatorConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *OperatorConfiguration) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData) {
|
||||
*out = *in
|
||||
if in.Sidecars != nil {
|
||||
in, out := &in.Sidecars, &out.Sidecars
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.PostgresUsersConfiguration = in.PostgresUsersConfiguration
|
||||
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
|
||||
out.PostgresPodResources = in.PostgresPodResources
|
||||
out.Timeouts = in.Timeouts
|
||||
out.LoadBalancer = in.LoadBalancer
|
||||
out.AWSGCP = in.AWSGCP
|
||||
out.OperatorDebug = in.OperatorDebug
|
||||
in.TeamsAPI.DeepCopyInto(&out.TeamsAPI)
|
||||
out.LoggingRESTAPI = in.LoggingRESTAPI
|
||||
out.Scalyr = in.Scalyr
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationData.
|
||||
func (in *OperatorConfigurationData) DeepCopy() *OperatorConfigurationData {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OperatorConfigurationData)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OperatorConfigurationList) DeepCopyInto(out *OperatorConfigurationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]OperatorConfiguration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationList.
|
||||
func (in *OperatorConfigurationList) DeepCopy() *OperatorConfigurationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OperatorConfigurationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *OperatorConfigurationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OperatorConfigurationUsers) DeepCopyInto(out *OperatorConfigurationUsers) {
|
||||
*out = *in
|
||||
if in.ProtectedRoles != nil {
|
||||
in, out := &in.ProtectedRoles, &out.ProtectedRoles
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.TeamAPIRoleConfiguration != nil {
|
||||
in, out := &in.TeamAPIRoleConfiguration, &out.TeamAPIRoleConfiguration
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorConfigurationUsers.
|
||||
func (in *OperatorConfigurationUsers) DeepCopy() *OperatorConfigurationUsers {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OperatorConfigurationUsers)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OperatorDebugConfiguration) DeepCopyInto(out *OperatorDebugConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorDebugConfiguration.
|
||||
func (in *OperatorDebugConfiguration) DeepCopy() *OperatorDebugConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OperatorDebugConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OperatorTimeouts) DeepCopyInto(out *OperatorTimeouts) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorTimeouts.
|
||||
func (in *OperatorTimeouts) DeepCopy() *OperatorTimeouts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OperatorTimeouts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Patroni) DeepCopyInto(out *Patroni) {
|
||||
*out = *in
|
||||
if in.InitDB != nil {
|
||||
in, out := &in.InitDB, &out.InitDB
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.PgHba != nil {
|
||||
in, out := &in.PgHba, &out.PgHba
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patroni.
|
||||
func (in *Patroni) DeepCopy() *Patroni {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Patroni)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PostgresPodResourcesDefaults) DeepCopyInto(out *PostgresPodResourcesDefaults) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresPodResourcesDefaults.
|
||||
func (in *PostgresPodResourcesDefaults) DeepCopy() *PostgresPodResourcesDefaults {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PostgresPodResourcesDefaults)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||
*out = *in
|
||||
in.PostgresqlParam.DeepCopyInto(&out.PostgresqlParam)
|
||||
out.Volume = in.Volume
|
||||
in.Patroni.DeepCopyInto(&out.Patroni)
|
||||
out.Resources = in.Resources
|
||||
if in.EnableMasterLoadBalancer != nil {
|
||||
in, out := &in.EnableMasterLoadBalancer, &out.EnableMasterLoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EnableReplicaLoadBalancer != nil {
|
||||
in, out := &in.EnableReplicaLoadBalancer, &out.EnableReplicaLoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.UseLoadBalancer != nil {
|
||||
in, out := &in.UseLoadBalancer, &out.UseLoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.ReplicaLoadBalancer != nil {
|
||||
in, out := &in.ReplicaLoadBalancer, &out.ReplicaLoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.AllowedSourceRanges != nil {
|
||||
in, out := &in.AllowedSourceRanges, &out.AllowedSourceRanges
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Users != nil {
|
||||
in, out := &in.Users, &out.Users
|
||||
*out = make(map[string]UserFlags, len(*in))
|
||||
for key, val := range *in {
|
||||
var outVal []string
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
in, out := &val, &outVal
|
||||
*out = make(UserFlags, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.MaintenanceWindows != nil {
|
||||
in, out := &in.MaintenanceWindows, &out.MaintenanceWindows
|
||||
*out = make([]MaintenanceWindow, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
out.Clone = in.Clone
|
||||
if in.Databases != nil {
|
||||
in, out := &in.Databases, &out.Databases
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]corev1.Toleration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Sidecars != nil {
|
||||
in, out := &in.Sidecars, &out.Sidecars
|
||||
*out = make([]Sidecar, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSpec.
|
||||
func (in *PostgresSpec) DeepCopy() *PostgresSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PostgresSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PostgresUsersConfiguration) DeepCopyInto(out *PostgresUsersConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresUsersConfiguration.
|
||||
func (in *PostgresUsersConfiguration) DeepCopy() *PostgresUsersConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PostgresUsersConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Postgresql) DeepCopyInto(out *Postgresql) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Postgresql.
|
||||
func (in *Postgresql) DeepCopy() *Postgresql {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Postgresql)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Postgresql) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PostgresqlList) DeepCopyInto(out *PostgresqlList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Postgresql, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlList.
|
||||
func (in *PostgresqlList) DeepCopy() *PostgresqlList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PostgresqlList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PostgresqlList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PostgresqlParam) DeepCopyInto(out *PostgresqlParam) {
|
||||
*out = *in
|
||||
if in.Parameters != nil {
|
||||
in, out := &in.Parameters, &out.Parameters
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresqlParam.
|
||||
func (in *PostgresqlParam) DeepCopy() *PostgresqlParam {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PostgresqlParam)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDescription.
|
||||
func (in *ResourceDescription) DeepCopy() *ResourceDescription {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceDescription)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Resources) DeepCopyInto(out *Resources) {
|
||||
*out = *in
|
||||
out.ResourceRequest = in.ResourceRequest
|
||||
out.ResourceLimits = in.ResourceLimits
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources.
|
||||
func (in *Resources) DeepCopy() *Resources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Resources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScalyrConfiguration) DeepCopyInto(out *ScalyrConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalyrConfiguration.
|
||||
func (in *ScalyrConfiguration) DeepCopy() *ScalyrConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScalyrConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Sidecar) DeepCopyInto(out *Sidecar) {
|
||||
*out = *in
|
||||
out.Resources = in.Resources
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]corev1.ContainerPort, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Env != nil {
|
||||
in, out := &in.Env, &out.Env
|
||||
*out = make([]corev1.EnvVar, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar.
|
||||
func (in *Sidecar) DeepCopy() *Sidecar {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Sidecar)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TeamsAPIConfiguration) DeepCopyInto(out *TeamsAPIConfiguration) {
|
||||
*out = *in
|
||||
if in.TeamAPIRoleConfiguration != nil {
|
||||
in, out := &in.TeamAPIRoleConfiguration, &out.TeamAPIRoleConfiguration
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ProtectedRoles != nil {
|
||||
in, out := &in.ProtectedRoles, &out.ProtectedRoles
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PostgresSuperuserTeams != nil {
|
||||
in, out := &in.PostgresSuperuserTeams, &out.PostgresSuperuserTeams
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamsAPIConfiguration.
|
||||
func (in *TeamsAPIConfiguration) DeepCopy() *TeamsAPIConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TeamsAPIConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in UserFlags) DeepCopyInto(out *UserFlags) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(UserFlags, len(*in))
|
||||
copy(*out, *in)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserFlags.
|
||||
func (in UserFlags) DeepCopy() UserFlags {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UserFlags)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Volume) DeepCopyInto(out *Volume) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
|
||||
func (in *Volume) DeepCopy() *Volume {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Volume)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/cluster"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
|
||||
|
|
@ -30,14 +31,14 @@ type controllerInformer interface {
|
|||
GetOperatorConfig() *config.Config
|
||||
GetStatus() *spec.ControllerStatus
|
||||
TeamClusterList() map[string][]spec.NamespacedName
|
||||
ClusterStatus(team, namespace, cluster string) (*spec.ClusterStatus, error)
|
||||
ClusterStatus(team, namespace, cluster string) (*cluster.ClusterStatus, error)
|
||||
ClusterLogs(team, namespace, cluster string) ([]*spec.LogEntry, error)
|
||||
ClusterHistory(team, namespace, cluster string) ([]*spec.Diff, error)
|
||||
ClusterDatabasesMap() map[string][]string
|
||||
WorkerLogs(workerID uint32) ([]*spec.LogEntry, error)
|
||||
ListQueue(workerID uint32) (*spec.QueueDump, error)
|
||||
GetWorkersCnt() uint32
|
||||
WorkerStatus(workerID uint32) (*spec.WorkerStatus, error)
|
||||
WorkerStatus(workerID uint32) (*cluster.WorkerStatus, error)
|
||||
}
|
||||
|
||||
// Server describes HTTP API server
|
||||
|
|
@ -92,12 +93,14 @@ func New(controller controllerInformer, port int, logger *logrus.Logger) *Server
|
|||
|
||||
// Run starts the HTTP server
|
||||
func (s *Server) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {
|
||||
|
||||
var err error
|
||||
|
||||
defer wg.Done()
|
||||
|
||||
go func() {
|
||||
err := s.http.ListenAndServe()
|
||||
if err != http.ErrServerClosed {
|
||||
s.logger.Fatalf("Could not start http server: %v", err)
|
||||
if err2 := s.http.ListenAndServe(); err2 != http.ErrServerClosed {
|
||||
s.logger.Fatalf("Could not start http server: %v", err2)
|
||||
}
|
||||
}()
|
||||
s.logger.Infof("listening on %s", s.http.Addr)
|
||||
|
|
@ -106,21 +109,27 @@ func (s *Server) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {
|
|||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
||||
defer cancel()
|
||||
err := s.http.Shutdown(ctx)
|
||||
if err = s.http.Shutdown(ctx); err == nil {
|
||||
s.logger.Infoln("Http server shut down")
|
||||
return
|
||||
}
|
||||
if err == context.DeadlineExceeded {
|
||||
s.logger.Warningf("Shutdown timeout exceeded. closing http server")
|
||||
s.http.Close()
|
||||
} else if err != nil {
|
||||
s.logger.Errorf("Could not shutdown http server: %v", err)
|
||||
if err = s.http.Close(); err != nil {
|
||||
s.logger.Errorf("could not close http connection: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
s.logger.Infoln("Http server shut down")
|
||||
s.logger.Errorf("Could not shutdown http server: %v", err)
|
||||
}
|
||||
|
||||
func (s *Server) respond(obj interface{}, err error, w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
json.NewEncoder(w).Encode(map[string]interface{}{"error": err.Error()})
|
||||
if err2 := json.NewEncoder(w).Encode(map[string]interface{}{"error": err.Error()}); err2 != nil {
|
||||
s.logger.Errorf("could not encode error response %q: %v", err, err2)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -149,7 +158,7 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) {
|
|||
)
|
||||
|
||||
if matches := util.FindNamedStringSubmatch(clusterStatusURL, req.URL.Path); matches != nil {
|
||||
namespace, _ := matches["namespace"]
|
||||
namespace := matches["namespace"]
|
||||
resp, err = s.controller.ClusterStatus(matches["team"], namespace, matches["cluster"])
|
||||
} else if matches := util.FindNamedStringSubmatch(teamURL, req.URL.Path); matches != nil {
|
||||
teamClusters := s.controller.TeamClusterList()
|
||||
|
|
@ -166,10 +175,10 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) {
|
|||
|
||||
resp, err = clusterNames, nil
|
||||
} else if matches := util.FindNamedStringSubmatch(clusterLogsURL, req.URL.Path); matches != nil {
|
||||
namespace, _ := matches["namespace"]
|
||||
namespace := matches["namespace"]
|
||||
resp, err = s.controller.ClusterLogs(matches["team"], namespace, matches["cluster"])
|
||||
} else if matches := util.FindNamedStringSubmatch(clusterHistoryURL, req.URL.Path); matches != nil {
|
||||
namespace, _ := matches["namespace"]
|
||||
namespace := matches["namespace"]
|
||||
resp, err = s.controller.ClusterHistory(matches["team"], namespace, matches["cluster"])
|
||||
} else if req.URL.Path == clustersURL {
|
||||
clusterNamesPerTeam := make(map[string][]string)
|
||||
|
|
@ -186,6 +195,14 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) {
|
|||
s.respond(resp, err, w)
|
||||
}
|
||||
|
||||
func mustConvertToUint32(s string) uint32 {
|
||||
result, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("mustConvertToUint32 called for %s: %v", s, err))
|
||||
}
|
||||
return uint32(result)
|
||||
}
|
||||
|
||||
func (s *Server) workers(w http.ResponseWriter, req *http.Request) {
|
||||
var (
|
||||
resp interface{}
|
||||
|
|
@ -195,30 +212,30 @@ func (s *Server) workers(w http.ResponseWriter, req *http.Request) {
|
|||
if workerAllQueue.MatchString(req.URL.Path) {
|
||||
s.allQueues(w, req)
|
||||
return
|
||||
} else if matches := util.FindNamedStringSubmatch(workerLogsURL, req.URL.Path); matches != nil {
|
||||
workerID, _ := strconv.Atoi(matches["id"])
|
||||
|
||||
resp, err = s.controller.WorkerLogs(uint32(workerID))
|
||||
} else if matches := util.FindNamedStringSubmatch(workerEventsQueueURL, req.URL.Path); matches != nil {
|
||||
workerID, _ := strconv.Atoi(matches["id"])
|
||||
|
||||
resp, err = s.controller.ListQueue(uint32(workerID))
|
||||
} else if matches := util.FindNamedStringSubmatch(workerStatusURL, req.URL.Path); matches != nil {
|
||||
var workerStatus *spec.WorkerStatus
|
||||
|
||||
workerID, _ := strconv.Atoi(matches["id"])
|
||||
workerStatus, err = s.controller.WorkerStatus(uint32(workerID))
|
||||
if workerStatus == nil {
|
||||
resp = "idle"
|
||||
} else {
|
||||
resp = workerStatus
|
||||
}
|
||||
} else if workerAllStatus.MatchString(req.URL.Path) {
|
||||
}
|
||||
if workerAllStatus.MatchString(req.URL.Path) {
|
||||
s.allWorkers(w, req)
|
||||
return
|
||||
} else {
|
||||
s.respond(nil, fmt.Errorf("page not found"), w)
|
||||
return
|
||||
}
|
||||
|
||||
err = fmt.Errorf("page not found")
|
||||
|
||||
if matches := util.FindNamedStringSubmatch(workerLogsURL, req.URL.Path); matches != nil {
|
||||
workerID := mustConvertToUint32(matches["id"])
|
||||
resp, err = s.controller.WorkerLogs(workerID)
|
||||
|
||||
} else if matches := util.FindNamedStringSubmatch(workerEventsQueueURL, req.URL.Path); matches != nil {
|
||||
workerID := mustConvertToUint32(matches["id"])
|
||||
resp, err = s.controller.ListQueue(workerID)
|
||||
|
||||
} else if matches := util.FindNamedStringSubmatch(workerStatusURL, req.URL.Path); matches != nil {
|
||||
var workerStatus *cluster.WorkerStatus
|
||||
|
||||
workerID := mustConvertToUint32(matches["id"])
|
||||
resp = "idle"
|
||||
if workerStatus, err = s.controller.WorkerStatus(workerID); workerStatus != nil {
|
||||
resp = workerStatus
|
||||
}
|
||||
}
|
||||
|
||||
s.respond(resp, err, w)
|
||||
|
|
@ -228,8 +245,6 @@ func (s *Server) databases(w http.ResponseWriter, req *http.Request) {
|
|||
|
||||
databaseNamesPerCluster := s.controller.ClusterDatabasesMap()
|
||||
s.respond(databaseNamesPerCluster, nil, w)
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) allQueues(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ package cluster
|
|||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
|
|
@ -12,14 +11,17 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||
policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
|
||||
|
|
@ -28,6 +30,7 @@ import (
|
|||
"github.com/zalando-incubator/postgres-operator/pkg/util/patroni"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/teams"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/users"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -39,10 +42,11 @@ var (
|
|||
|
||||
// Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication.
|
||||
type Config struct {
|
||||
OpConfig config.Config
|
||||
RestConfig *rest.Config
|
||||
InfrastructureRoles map[string]spec.PgUser // inherited from the controller
|
||||
PodServiceAccount *v1.ServiceAccount
|
||||
OpConfig config.Config
|
||||
RestConfig *rest.Config
|
||||
InfrastructureRoles map[string]spec.PgUser // inherited from the controller
|
||||
PodServiceAccount *v1.ServiceAccount
|
||||
PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding
|
||||
}
|
||||
|
||||
type kubeResources struct {
|
||||
|
|
@ -58,13 +62,13 @@ type kubeResources struct {
|
|||
// Cluster describes postgresql cluster
|
||||
type Cluster struct {
|
||||
kubeResources
|
||||
spec.Postgresql
|
||||
acidv1.Postgresql
|
||||
Config
|
||||
logger *logrus.Entry
|
||||
patroni patroni.Interface
|
||||
pgUsers map[string]spec.PgUser
|
||||
systemUsers map[string]spec.PgUser
|
||||
podSubscribers map[spec.NamespacedName]chan spec.PodEvent
|
||||
podSubscribers map[spec.NamespacedName]chan PodEvent
|
||||
podSubscribersMu sync.RWMutex
|
||||
pgDb *sql.DB
|
||||
mu sync.Mutex
|
||||
|
|
@ -75,7 +79,7 @@ type Cluster struct {
|
|||
teamsAPIClient teams.Interface
|
||||
oauthTokenGetter OAuthTokenGetter
|
||||
KubeClient k8sutil.KubernetesClient //TODO: move clients to the better place?
|
||||
currentProcess spec.Process
|
||||
currentProcess Process
|
||||
processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex
|
||||
specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex
|
||||
dryRunMode bool
|
||||
|
|
@ -89,11 +93,11 @@ type compareStatefulsetResult struct {
|
|||
}
|
||||
|
||||
// New creates a new cluster. This function should be called from a controller.
|
||||
func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec spec.Postgresql, logger *logrus.Entry) *Cluster {
|
||||
orphanDependents := true
|
||||
func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgresql, logger *logrus.Entry) *Cluster {
|
||||
deletePropagationPolicy := metav1.DeletePropagationOrphan
|
||||
|
||||
podEventsQueue := cache.NewFIFO(func(obj interface{}) (string, error) {
|
||||
e, ok := obj.(spec.PodEvent)
|
||||
e, ok := obj.(PodEvent)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not cast to PodEvent")
|
||||
}
|
||||
|
|
@ -106,20 +110,20 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec spec.Postgresql
|
|||
Postgresql: pgSpec,
|
||||
pgUsers: make(map[string]spec.PgUser),
|
||||
systemUsers: make(map[string]spec.PgUser),
|
||||
podSubscribers: make(map[spec.NamespacedName]chan spec.PodEvent),
|
||||
podSubscribers: make(map[spec.NamespacedName]chan PodEvent),
|
||||
kubeResources: kubeResources{
|
||||
Secrets: make(map[types.UID]*v1.Secret),
|
||||
Services: make(map[PostgresRole]*v1.Service),
|
||||
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
||||
userSyncStrategy: users.DefaultUserSyncStrategy{},
|
||||
deleteOptions: &metav1.DeleteOptions{OrphanDependents: &orphanDependents},
|
||||
deleteOptions: &metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
||||
podEventsQueue: podEventsQueue,
|
||||
KubeClient: kubeClient,
|
||||
dryRunMode: false,
|
||||
}
|
||||
cluster.logger = logger.WithField("pkg", "cluster").WithField("cluster-name", cluster.clusterName())
|
||||
cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger)
|
||||
cluster.oauthTokenGetter = NewSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName)
|
||||
cluster.oauthTokenGetter = newSecretOauthTokenGetter(&kubeClient, cfg.OpConfig.OAuthTokenSecretName)
|
||||
cluster.patroni = patroni.New(cluster.logger)
|
||||
|
||||
return cluster
|
||||
|
|
@ -141,39 +145,36 @@ func (c *Cluster) teamName() string {
|
|||
func (c *Cluster) setProcessName(procName string, args ...interface{}) {
|
||||
c.processMu.Lock()
|
||||
defer c.processMu.Unlock()
|
||||
c.currentProcess = spec.Process{
|
||||
c.currentProcess = Process{
|
||||
Name: fmt.Sprintf(procName, args...),
|
||||
StartTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) setStatus(status spec.PostgresStatus) {
|
||||
c.Status = status
|
||||
b, err := json.Marshal(status)
|
||||
func (c *Cluster) setStatus(status acidv1.PostgresStatus) {
|
||||
// TODO: eventually switch to updateStatus() for kubernetes 1.11 and above
|
||||
var (
|
||||
err error
|
||||
b []byte
|
||||
)
|
||||
if b, err = json.Marshal(status); err != nil {
|
||||
c.logger.Errorf("could not marshal status: %v", err)
|
||||
}
|
||||
|
||||
patch := []byte(fmt.Sprintf(`{"status": %s}`, string(b)))
|
||||
// we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ),
|
||||
// however, we could do patch without it. In the future, once /status subresource is there (starting Kubernets 1.11)
|
||||
// we should take advantage of it.
|
||||
newspec, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.clusterNamespace()).Patch(c.Name, types.MergePatchType, patch)
|
||||
if err != nil {
|
||||
c.logger.Fatalf("could not marshal status: %v", err)
|
||||
}
|
||||
request := []byte(fmt.Sprintf(`{"status": %s}`, string(b))) //TODO: Look into/wait for k8s go client methods
|
||||
|
||||
_, err = c.KubeClient.CRDREST.Patch(types.MergePatchType).
|
||||
Namespace(c.Namespace).
|
||||
Resource(constants.CRDResource).
|
||||
Name(c.Name).
|
||||
Body(request).
|
||||
DoRaw()
|
||||
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
c.logger.Warningf("could not set %q status for the non-existing cluster", status)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set %q status for the cluster: %v", status, err)
|
||||
c.logger.Errorf("could not update status: %v", err)
|
||||
}
|
||||
// update the spec, maintaining the new resourceVersion.
|
||||
c.setSpec(newspec)
|
||||
}
|
||||
|
||||
func (c *Cluster) isNewCluster() bool {
|
||||
return c.Status == spec.ClusterStatusCreating
|
||||
return c.Status == acidv1.ClusterStatusCreating
|
||||
}
|
||||
|
||||
// initUsers populates c.systemUsers and c.pgUsers maps.
|
||||
|
|
@ -201,39 +202,6 @@ func (c *Cluster) initUsers() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
Ensures the service account required by StatefulSets to create pods exists in a namespace before a PG cluster is created there so that a user does not have to deploy the account manually.
|
||||
|
||||
The operator does not sync these accounts after creation.
|
||||
*/
|
||||
func (c *Cluster) createPodServiceAccounts() error {
|
||||
|
||||
podServiceAccountName := c.Config.OpConfig.PodServiceAccountName
|
||||
_, err := c.KubeClient.ServiceAccounts(c.Namespace).Get(podServiceAccountName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
|
||||
c.setProcessName(fmt.Sprintf("creating pod service account in the namespace %v", c.Namespace))
|
||||
|
||||
c.logger.Infof("the pod service account %q cannot be retrieved in the namespace %q. Trying to deploy the account.", podServiceAccountName, c.Namespace)
|
||||
|
||||
// get a separate copy of service account
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
sa := *c.PodServiceAccount
|
||||
_, err = c.KubeClient.ServiceAccounts(c.Namespace).Create(&sa)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot deploy the pod service account %q defined in the config map to the %q namespace: %v", podServiceAccountName, c.Namespace, err)
|
||||
}
|
||||
|
||||
c.logger.Infof("successfully deployed the pod service account %q to the %q namespace", podServiceAccountName, c.Namespace)
|
||||
|
||||
} else {
|
||||
c.logger.Infof("successfully found the service account %q used to create pods to the namespace %q", podServiceAccountName, c.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create creates the new kubernetes objects associated with the cluster.
|
||||
func (c *Cluster) Create() error {
|
||||
c.mu.Lock()
|
||||
|
|
@ -248,13 +216,13 @@ func (c *Cluster) Create() error {
|
|||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
c.setStatus(spec.ClusterStatusRunning) //TODO: are you sure it's running?
|
||||
c.setStatus(acidv1.ClusterStatusRunning) //TODO: are you sure it's running?
|
||||
} else {
|
||||
c.setStatus(spec.ClusterStatusAddFailed)
|
||||
c.setStatus(acidv1.ClusterStatusAddFailed)
|
||||
}
|
||||
}()
|
||||
|
||||
c.setStatus(spec.ClusterStatusCreating)
|
||||
c.setStatus(acidv1.ClusterStatusCreating)
|
||||
|
||||
for _, role := range []PostgresRole{Master, Replica} {
|
||||
|
||||
|
|
@ -302,11 +270,6 @@ func (c *Cluster) Create() error {
|
|||
}
|
||||
c.logger.Infof("pod disruption budget %q has been successfully created", util.NameFromMeta(pdb.ObjectMeta))
|
||||
|
||||
if err = c.createPodServiceAccounts(); err != nil {
|
||||
return fmt.Errorf("could not create pod service account %v : %v", c.OpConfig.PodServiceAccountName, err)
|
||||
}
|
||||
c.logger.Infof("pod service accounts have been successfully synced")
|
||||
|
||||
if c.Statefulset != nil {
|
||||
return fmt.Errorf("statefulset already exists in the cluster")
|
||||
}
|
||||
|
|
@ -324,7 +287,7 @@ func (c *Cluster) Create() error {
|
|||
}
|
||||
c.logger.Infof("pods are ready")
|
||||
|
||||
// create database objects unless we are running without pods or disabled that feature explicitely
|
||||
// create database objects unless we are running without pods or disabled that feature explicitly
|
||||
if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0) {
|
||||
if err = c.createRoles(); err != nil {
|
||||
return fmt.Errorf("could not create users: %v", err)
|
||||
|
|
@ -439,7 +402,6 @@ func (c *Cluster) compareVolumeClaimTemplates(setA, setB *v1beta1.StatefulSet) (
|
|||
func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []string) {
|
||||
reasons := make([]string, 0)
|
||||
needsRollUpdate := false
|
||||
|
||||
for index, containerA := range setA.Spec.Template.Spec.Containers {
|
||||
containerB := setB.Spec.Template.Spec.Containers[index]
|
||||
for _, check := range c.getContainerChecks() {
|
||||
|
|
@ -461,8 +423,8 @@ func (c *Cluster) compareContainers(setA, setB *v1beta1.StatefulSet) (bool, []st
|
|||
return needsRollUpdate, reasons
|
||||
}
|
||||
|
||||
func compareResources(a *v1.ResourceRequirements, b *v1.ResourceRequirements) (equal bool) {
|
||||
equal = true
|
||||
func compareResources(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool {
|
||||
equal := true
|
||||
if a != nil {
|
||||
equal = compareResoucesAssumeFirstNotNil(a, b)
|
||||
}
|
||||
|
|
@ -470,7 +432,7 @@ func compareResources(a *v1.ResourceRequirements, b *v1.ResourceRequirements) (e
|
|||
equal = compareResoucesAssumeFirstNotNil(b, a)
|
||||
}
|
||||
|
||||
return
|
||||
return equal
|
||||
}
|
||||
|
||||
func compareResoucesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.ResourceRequirements) bool {
|
||||
|
|
@ -493,20 +455,20 @@ func compareResoucesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.Resource
|
|||
|
||||
// Update changes Kubernetes objects according to the new specification. Unlike the sync case, the missing object.
|
||||
// (i.e. service) is treated as an error.
|
||||
func (c *Cluster) Update(oldSpec, newSpec *spec.Postgresql) error {
|
||||
func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||
updateFailed := false
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.setStatus(spec.ClusterStatusUpdating)
|
||||
c.setStatus(acidv1.ClusterStatusUpdating)
|
||||
c.setSpec(newSpec)
|
||||
|
||||
defer func() {
|
||||
if updateFailed {
|
||||
c.setStatus(spec.ClusterStatusUpdateFailed)
|
||||
} else if c.Status != spec.ClusterStatusRunning {
|
||||
c.setStatus(spec.ClusterStatusRunning)
|
||||
c.setStatus(acidv1.ClusterStatusUpdateFailed)
|
||||
} else if c.Status != acidv1.ClusterStatusRunning {
|
||||
c.setStatus(acidv1.ClusterStatusRunning)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -616,7 +578,7 @@ func (c *Cluster) Delete() {
|
|||
}
|
||||
|
||||
for _, obj := range c.Secrets {
|
||||
if delete, user := c.shouldDeleteSecret(obj); !delete {
|
||||
if doDelete, user := c.shouldDeleteSecret(obj); !doDelete {
|
||||
c.logger.Warningf("not removing secret %q for the system user %q", obj.GetName(), user)
|
||||
continue
|
||||
}
|
||||
|
|
@ -647,21 +609,29 @@ func (c *Cluster) Delete() {
|
|||
}
|
||||
}
|
||||
|
||||
//NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status).
|
||||
func (c *Cluster) NeedsRepair() (bool, acidv1.PostgresStatus) {
|
||||
c.specMu.RLock()
|
||||
defer c.specMu.RUnlock()
|
||||
return !c.Status.Success(), c.Status
|
||||
|
||||
}
|
||||
|
||||
// ReceivePodEvent is called back by the controller in order to add the cluster's pod event to the queue.
|
||||
func (c *Cluster) ReceivePodEvent(event spec.PodEvent) {
|
||||
func (c *Cluster) ReceivePodEvent(event PodEvent) {
|
||||
if err := c.podEventsQueue.Add(event); err != nil {
|
||||
c.logger.Errorf("error when receiving pod events: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) processPodEvent(obj interface{}) error {
|
||||
event, ok := obj.(spec.PodEvent)
|
||||
event, ok := obj.(PodEvent)
|
||||
if !ok {
|
||||
return fmt.Errorf("could not cast to PodEvent")
|
||||
}
|
||||
|
||||
c.podSubscribersMu.RLock()
|
||||
subscriber, ok := c.podSubscribers[event.PodName]
|
||||
subscriber, ok := c.podSubscribers[spec.NamespacedName(event.PodName)]
|
||||
c.podSubscribersMu.RUnlock()
|
||||
if ok {
|
||||
subscriber <- event
|
||||
|
|
@ -733,11 +703,13 @@ func (c *Cluster) initRobotUsers() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) initHumanUsers() error {
|
||||
teamMembers, err := c.getTeamMembers()
|
||||
func (c *Cluster) initTeamMembers(teamID string, isPostgresSuperuserTeam bool) error {
|
||||
teamMembers, err := c.getTeamMembers(teamID)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get list of team members: %v", err)
|
||||
return fmt.Errorf("could not get list of team members for team %q: %v", teamID, err)
|
||||
}
|
||||
|
||||
for _, username := range teamMembers {
|
||||
flags := []string{constants.RoleFlagLogin}
|
||||
memberOf := []string{c.OpConfig.PamRoleName}
|
||||
|
|
@ -745,7 +717,7 @@ func (c *Cluster) initHumanUsers() error {
|
|||
if c.shouldAvoidProtectedOrSystemRole(username, "API role") {
|
||||
continue
|
||||
}
|
||||
if c.OpConfig.EnableTeamSuperuser {
|
||||
if c.OpConfig.EnableTeamSuperuser || isPostgresSuperuserTeam {
|
||||
flags = append(flags, constants.RoleFlagSuperuser)
|
||||
} else {
|
||||
if c.OpConfig.TeamAdminRole != "" {
|
||||
|
|
@ -771,6 +743,33 @@ func (c *Cluster) initHumanUsers() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) initHumanUsers() error {
|
||||
|
||||
var clusterIsOwnedBySuperuserTeam bool
|
||||
|
||||
for _, postgresSuperuserTeam := range c.OpConfig.PostgresSuperuserTeams {
|
||||
err := c.initTeamMembers(postgresSuperuserTeam, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot create a team %q of Postgres superusers: %v", postgresSuperuserTeam, err)
|
||||
}
|
||||
if postgresSuperuserTeam == c.Spec.TeamID {
|
||||
clusterIsOwnedBySuperuserTeam = true
|
||||
}
|
||||
}
|
||||
|
||||
if clusterIsOwnedBySuperuserTeam {
|
||||
c.logger.Infof("Team %q owning the cluster is also a team of superusers. Created superuser roles for its members instead of admin roles.", c.Spec.TeamID)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := c.initTeamMembers(c.Spec.TeamID, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot create a team %q of admins owning the PG cluster: %v", c.Spec.TeamID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) initInfrastructureRoles() error {
|
||||
// add infrastructure roles from the operator's definition
|
||||
for username, newRole := range c.InfrastructureRoles {
|
||||
|
|
@ -796,7 +795,8 @@ func (c *Cluster) initInfrastructureRoles() error {
|
|||
}
|
||||
|
||||
// resolves naming conflicts between existing and new roles by chosing either of them.
|
||||
func (c *Cluster) resolveNameConflict(currentRole, newRole *spec.PgUser) (result spec.PgUser) {
|
||||
func (c *Cluster) resolveNameConflict(currentRole, newRole *spec.PgUser) spec.PgUser {
|
||||
var result spec.PgUser
|
||||
if newRole.Origin >= currentRole.Origin {
|
||||
result = *newRole
|
||||
} else {
|
||||
|
|
@ -804,7 +804,7 @@ func (c *Cluster) resolveNameConflict(currentRole, newRole *spec.PgUser) (result
|
|||
}
|
||||
c.logger.Debugf("resolved a conflict of role %q between %s and %s to %s",
|
||||
newRole.Name, newRole.Origin, currentRole.Origin, result.Origin)
|
||||
return
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *Cluster) shouldAvoidProtectedOrSystemRole(username, purpose string) bool {
|
||||
|
|
@ -820,7 +820,7 @@ func (c *Cluster) shouldAvoidProtectedOrSystemRole(username, purpose string) boo
|
|||
}
|
||||
|
||||
// GetCurrentProcess provides name of the last process of the cluster
|
||||
func (c *Cluster) GetCurrentProcess() spec.Process {
|
||||
func (c *Cluster) GetCurrentProcess() Process {
|
||||
c.processMu.RLock()
|
||||
defer c.processMu.RUnlock()
|
||||
|
||||
|
|
@ -828,8 +828,8 @@ func (c *Cluster) GetCurrentProcess() spec.Process {
|
|||
}
|
||||
|
||||
// GetStatus provides status of the cluster
|
||||
func (c *Cluster) GetStatus() *spec.ClusterStatus {
|
||||
return &spec.ClusterStatus{
|
||||
func (c *Cluster) GetStatus() *ClusterStatus {
|
||||
return &ClusterStatus{
|
||||
Cluster: c.Spec.ClusterName,
|
||||
Team: c.Spec.TeamID,
|
||||
Status: c.Status,
|
||||
|
|
@ -843,19 +843,25 @@ func (c *Cluster) GetStatus() *spec.ClusterStatus {
|
|||
PodDisruptionBudget: c.GetPodDisruptionBudget(),
|
||||
CurrentProcess: c.GetCurrentProcess(),
|
||||
|
||||
Error: c.Error,
|
||||
Error: fmt.Errorf("error: %s", c.Error),
|
||||
}
|
||||
}
|
||||
|
||||
// Switchover does a switchover (via Patroni) to a candidate pod
|
||||
func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) error {
|
||||
|
||||
var err error
|
||||
c.logger.Debugf("failing over from %q to %q", curMaster.Name, candidate)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
podLabelErr := make(chan error)
|
||||
stopCh := make(chan struct{})
|
||||
defer close(podLabelErr)
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ch := c.registerPodSubscriber(candidate)
|
||||
defer c.unregisterPodSubscriber(candidate)
|
||||
|
||||
|
|
@ -863,26 +869,32 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
|
|||
|
||||
select {
|
||||
case <-stopCh:
|
||||
case podLabelErr <- func() error {
|
||||
_, err := c.waitForPodLabel(ch, stopCh, &role)
|
||||
return err
|
||||
case podLabelErr <- func() (err2 error) {
|
||||
_, err2 = c.waitForPodLabel(ch, stopCh, &role)
|
||||
return
|
||||
}():
|
||||
}
|
||||
}()
|
||||
|
||||
if err := c.patroni.Switchover(curMaster, candidate.Name); err != nil {
|
||||
close(stopCh)
|
||||
return fmt.Errorf("could not failover: %v", err)
|
||||
}
|
||||
c.logger.Debugf("successfully failed over from %q to %q", curMaster.Name, candidate)
|
||||
|
||||
defer close(stopCh)
|
||||
|
||||
if err := <-podLabelErr; err != nil {
|
||||
return fmt.Errorf("could not get master pod label: %v", err)
|
||||
if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil {
|
||||
c.logger.Debugf("successfully failed over from %q to %q", curMaster.Name, candidate)
|
||||
if err = <-podLabelErr; err != nil {
|
||||
err = fmt.Errorf("could not get master pod label: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("could not failover: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
// signal the role label waiting goroutine to close the shop and go home
|
||||
close(stopCh)
|
||||
// wait until the goroutine terminates, since unregisterPodSubscriber
|
||||
// must be called before the outer return; otherwsise we risk subscribing to the same pod twice.
|
||||
wg.Wait()
|
||||
// close the label waiting channel no sooner than the waiting goroutine terminates.
|
||||
close(podLabelErr)
|
||||
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// Lock locks the cluster
|
||||
|
|
@ -902,9 +914,9 @@ func (c *Cluster) shouldDeleteSecret(secret *v1.Secret) (delete bool, userName s
|
|||
|
||||
type simpleActionWithResult func() error
|
||||
|
||||
type ClusterObjectGet func(name string) (spec.NamespacedName, error)
|
||||
type clusterObjectGet func(name string) (spec.NamespacedName, error)
|
||||
|
||||
type ClusterObjectDelete func(name string) error
|
||||
type clusterObjectDelete func(name string) error
|
||||
|
||||
func (c *Cluster) deletePatroniClusterObjects() error {
|
||||
// TODO: figure out how to remove leftover patroni objects in other cases
|
||||
|
|
@ -921,8 +933,8 @@ func (c *Cluster) deletePatroniClusterObjects() error {
|
|||
}
|
||||
|
||||
func (c *Cluster) deleteClusterObject(
|
||||
get ClusterObjectGet,
|
||||
del ClusterObjectDelete,
|
||||
get clusterObjectGet,
|
||||
del clusterObjectDelete,
|
||||
objType string) error {
|
||||
for _, suffix := range patroniObjectSuffixes {
|
||||
name := fmt.Sprintf("%s-%s", c.Name, suffix)
|
||||
|
|
@ -950,11 +962,11 @@ func (c *Cluster) deletePatroniClusterEndpoints() error {
|
|||
return util.NameFromMeta(ep.ObjectMeta), err
|
||||
}
|
||||
|
||||
delete := func(name string) error {
|
||||
deleteEndpointFn := func(name string) error {
|
||||
return c.KubeClient.Endpoints(c.Namespace).Delete(name, c.deleteOptions)
|
||||
}
|
||||
|
||||
return c.deleteClusterObject(get, delete, "endpoint")
|
||||
return c.deleteClusterObject(get, deleteEndpointFn, "endpoint")
|
||||
}
|
||||
|
||||
func (c *Cluster) deletePatroniClusterConfigMaps() error {
|
||||
|
|
@ -963,9 +975,9 @@ func (c *Cluster) deletePatroniClusterConfigMaps() error {
|
|||
return util.NameFromMeta(cm.ObjectMeta), err
|
||||
}
|
||||
|
||||
delete := func(name string) error {
|
||||
deleteConfigMapFn := func(name string) error {
|
||||
return c.KubeClient.ConfigMaps(c.Namespace).Delete(name, c.deleteOptions)
|
||||
}
|
||||
|
||||
return c.deleteClusterObject(get, delete, "configmap")
|
||||
return c.deleteClusterObject(get, deleteConfigMapFn, "configmap")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,14 +2,16 @@ package cluster
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/teams"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"reflect"
|
||||
"testing"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -21,43 +23,43 @@ var logger = logrus.New().WithField("test", "cluster")
|
|||
var cl = New(Config{OpConfig: config.Config{ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName}}},
|
||||
k8sutil.KubernetesClient{}, spec.Postgresql{}, logger)
|
||||
k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
||||
func TestInitRobotUsers(t *testing.T) {
|
||||
testName := "TestInitRobotUsers"
|
||||
tests := []struct {
|
||||
manifestUsers map[string]spec.UserFlags
|
||||
manifestUsers map[string]acidv1.UserFlags
|
||||
infraRoles map[string]spec.PgUser
|
||||
result map[string]spec.PgUser
|
||||
err error
|
||||
}{
|
||||
{
|
||||
manifestUsers: map[string]spec.UserFlags{"foo": {"superuser", "createdb"}},
|
||||
manifestUsers: map[string]acidv1.UserFlags{"foo": {"superuser", "createdb"}},
|
||||
infraRoles: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Password: "bar"}},
|
||||
result: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Password: "bar"}},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
manifestUsers: map[string]spec.UserFlags{"!fooBar": {"superuser", "createdb"}},
|
||||
manifestUsers: map[string]acidv1.UserFlags{"!fooBar": {"superuser", "createdb"}},
|
||||
err: fmt.Errorf(`invalid username: "!fooBar"`),
|
||||
},
|
||||
{
|
||||
manifestUsers: map[string]spec.UserFlags{"foobar": {"!superuser", "createdb"}},
|
||||
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"!superuser", "createdb"}},
|
||||
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
||||
`user flag "!superuser" is not alphanumeric`),
|
||||
},
|
||||
{
|
||||
manifestUsers: map[string]spec.UserFlags{"foobar": {"superuser1", "createdb"}},
|
||||
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"superuser1", "createdb"}},
|
||||
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
||||
`user flag "SUPERUSER1" is not valid`),
|
||||
},
|
||||
{
|
||||
manifestUsers: map[string]spec.UserFlags{"foobar": {"inherit", "noinherit"}},
|
||||
manifestUsers: map[string]acidv1.UserFlags{"foobar": {"inherit", "noinherit"}},
|
||||
err: fmt.Errorf(`invalid flags for user "foobar": ` +
|
||||
`conflicting user flags: "NOINHERIT" and "INHERIT"`),
|
||||
},
|
||||
{
|
||||
manifestUsers: map[string]spec.UserFlags{"admin": {"superuser"}, superUserName: {"createdb"}},
|
||||
manifestUsers: map[string]acidv1.UserFlags{"admin": {"superuser"}, superUserName: {"createdb"}},
|
||||
infraRoles: map[string]spec.PgUser{},
|
||||
result: map[string]spec.PgUser{},
|
||||
err: nil,
|
||||
|
|
@ -100,6 +102,7 @@ func (m *mockTeamsAPIClient) setMembers(members []string) {
|
|||
m.members = members
|
||||
}
|
||||
|
||||
// Test adding a member of a product team owning a particular DB cluster
|
||||
func TestInitHumanUsers(t *testing.T) {
|
||||
|
||||
var mockTeamsAPI mockTeamsAPIClient
|
||||
|
|
@ -107,7 +110,9 @@ func TestInitHumanUsers(t *testing.T) {
|
|||
cl.teamsAPIClient = &mockTeamsAPI
|
||||
testName := "TestInitHumanUsers"
|
||||
|
||||
// members of a product team are granted superuser rights for DBs of their team
|
||||
cl.OpConfig.EnableTeamSuperuser = true
|
||||
|
||||
cl.OpConfig.EnableTeamsAPI = true
|
||||
cl.OpConfig.PamRoleName = "zalandos"
|
||||
cl.Spec.TeamID = "test"
|
||||
|
|
@ -145,6 +150,145 @@ func TestInitHumanUsers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type mockTeam struct {
|
||||
teamID string
|
||||
members []string
|
||||
isPostgresSuperuserTeam bool
|
||||
}
|
||||
|
||||
type mockTeamsAPIClientMultipleTeams struct {
|
||||
teams []mockTeam
|
||||
}
|
||||
|
||||
func (m *mockTeamsAPIClientMultipleTeams) TeamInfo(teamID, token string) (tm *teams.Team, err error) {
|
||||
for _, team := range m.teams {
|
||||
if team.teamID == teamID {
|
||||
return &teams.Team{Members: team.members}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// should not be reached if a slice with teams is populated correctly
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Test adding members of maintenance teams that get superuser rights for all PG databases
|
||||
func TestInitHumanUsersWithSuperuserTeams(t *testing.T) {
|
||||
|
||||
var mockTeamsAPI mockTeamsAPIClientMultipleTeams
|
||||
cl.oauthTokenGetter = &mockOAuthTokenGetter{}
|
||||
cl.teamsAPIClient = &mockTeamsAPI
|
||||
cl.OpConfig.EnableTeamSuperuser = false
|
||||
testName := "TestInitHumanUsersWithSuperuserTeams"
|
||||
|
||||
cl.OpConfig.EnableTeamsAPI = true
|
||||
cl.OpConfig.PamRoleName = "zalandos"
|
||||
|
||||
teamA := mockTeam{
|
||||
teamID: "postgres_superusers",
|
||||
members: []string{"postgres_superuser"},
|
||||
isPostgresSuperuserTeam: true,
|
||||
}
|
||||
|
||||
userA := spec.PgUser{
|
||||
Name: "postgres_superuser",
|
||||
Origin: spec.RoleOriginTeamsAPI,
|
||||
MemberOf: []string{cl.OpConfig.PamRoleName},
|
||||
Flags: []string{"LOGIN", "SUPERUSER"},
|
||||
}
|
||||
|
||||
teamB := mockTeam{
|
||||
teamID: "postgres_admins",
|
||||
members: []string{"postgres_admin"},
|
||||
isPostgresSuperuserTeam: true,
|
||||
}
|
||||
|
||||
userB := spec.PgUser{
|
||||
Name: "postgres_admin",
|
||||
Origin: spec.RoleOriginTeamsAPI,
|
||||
MemberOf: []string{cl.OpConfig.PamRoleName},
|
||||
Flags: []string{"LOGIN", "SUPERUSER"},
|
||||
}
|
||||
|
||||
teamTest := mockTeam{
|
||||
teamID: "test",
|
||||
members: []string{"test_user"},
|
||||
isPostgresSuperuserTeam: false,
|
||||
}
|
||||
|
||||
userTest := spec.PgUser{
|
||||
Name: "test_user",
|
||||
Origin: spec.RoleOriginTeamsAPI,
|
||||
MemberOf: []string{cl.OpConfig.PamRoleName},
|
||||
Flags: []string{"LOGIN"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
ownerTeam string
|
||||
existingRoles map[string]spec.PgUser
|
||||
superuserTeams []string
|
||||
teams []mockTeam
|
||||
result map[string]spec.PgUser
|
||||
}{
|
||||
// case 1: there are two different teams of PG maintainers and one product team
|
||||
{
|
||||
ownerTeam: "test",
|
||||
existingRoles: map[string]spec.PgUser{},
|
||||
superuserTeams: []string{"postgres_superusers", "postgres_admins"},
|
||||
teams: []mockTeam{teamA, teamB, teamTest},
|
||||
result: map[string]spec.PgUser{
|
||||
"postgres_superuser": userA,
|
||||
"postgres_admin": userB,
|
||||
"test_user": userTest,
|
||||
},
|
||||
},
|
||||
// case 2: the team of superusers creates a new PG cluster
|
||||
{
|
||||
ownerTeam: "postgres_superusers",
|
||||
existingRoles: map[string]spec.PgUser{},
|
||||
superuserTeams: []string{"postgres_superusers"},
|
||||
teams: []mockTeam{teamA},
|
||||
result: map[string]spec.PgUser{
|
||||
"postgres_superuser": userA,
|
||||
},
|
||||
},
|
||||
// case 3: the team owning the cluster is promoted to the maintainers' status
|
||||
{
|
||||
ownerTeam: "postgres_superusers",
|
||||
existingRoles: map[string]spec.PgUser{
|
||||
// role with the name exists before w/o superuser privilege
|
||||
"postgres_superuser": spec.PgUser{
|
||||
Origin: spec.RoleOriginTeamsAPI,
|
||||
Name: "postgres_superuser",
|
||||
Password: "",
|
||||
Flags: []string{"LOGIN"},
|
||||
MemberOf: []string{cl.OpConfig.PamRoleName},
|
||||
Parameters: map[string]string(nil)}},
|
||||
superuserTeams: []string{"postgres_superusers"},
|
||||
teams: []mockTeam{teamA},
|
||||
result: map[string]spec.PgUser{
|
||||
"postgres_superuser": userA,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
mockTeamsAPI.teams = tt.teams
|
||||
|
||||
cl.Spec.TeamID = tt.ownerTeam
|
||||
cl.pgUsers = tt.existingRoles
|
||||
cl.OpConfig.PostgresSuperuserTeams = tt.superuserTeams
|
||||
|
||||
if err := cl.initHumanUsers(); err != nil {
|
||||
t.Errorf("%s got an unexpected error %v", testName, err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cl.pgUsers, tt.result) {
|
||||
t.Errorf("%s expects %#v, got %#v", testName, tt.result, cl.pgUsers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldDeleteSecret(t *testing.T) {
|
||||
testName := "TestShouldDeleteSecret"
|
||||
|
||||
|
|
|
|||
|
|
@ -153,12 +153,10 @@ func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUser
|
|||
|
||||
// getDatabases returns the map of current databases with owners
|
||||
// The caller is responsible for opening and closing the database connection
|
||||
func (c *Cluster) getDatabases() (map[string]string, error) {
|
||||
func (c *Cluster) getDatabases() (dbs map[string]string, err error) {
|
||||
var (
|
||||
rows *sql.Rows
|
||||
err error
|
||||
)
|
||||
dbs := make(map[string]string)
|
||||
|
||||
if rows, err = c.pgDb.Query(getDatabasesSQL); err != nil {
|
||||
return nil, fmt.Errorf("could not query database: %v", err)
|
||||
|
|
@ -166,15 +164,20 @@ func (c *Cluster) getDatabases() (map[string]string, error) {
|
|||
|
||||
defer func() {
|
||||
if err2 := rows.Close(); err2 != nil {
|
||||
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err)
|
||||
} else {
|
||||
err = fmt.Errorf("error when closing query cursor: %v", err2)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
dbs = make(map[string]string)
|
||||
|
||||
for rows.Next() {
|
||||
var datname, owner string
|
||||
|
||||
err := rows.Scan(&datname, &owner)
|
||||
if err != nil {
|
||||
if err = rows.Scan(&datname, &owner); err != nil {
|
||||
return nil, fmt.Errorf("error when processing row: %v", err)
|
||||
}
|
||||
dbs[datname] = owner
|
||||
|
|
@ -186,26 +189,24 @@ func (c *Cluster) getDatabases() (map[string]string, error) {
|
|||
// executeCreateDatabase creates new database with the given owner.
|
||||
// The caller is responsible for openinging and closing the database connection.
|
||||
func (c *Cluster) executeCreateDatabase(datname, owner string) error {
|
||||
if !c.databaseNameOwnerValid(datname, owner) {
|
||||
return nil
|
||||
}
|
||||
c.logger.Infof("creating database %q with owner %q", datname, owner)
|
||||
|
||||
if _, err := c.pgDb.Exec(fmt.Sprintf(createDatabaseSQL, datname, owner)); err != nil {
|
||||
return fmt.Errorf("could not execute create database: %v", err)
|
||||
}
|
||||
return nil
|
||||
return c.execCreateOrAlterDatabase(datname, owner, createDatabaseSQL,
|
||||
"creating database", "create database")
|
||||
}
|
||||
|
||||
// executeCreateDatabase changes the owner of the given database.
|
||||
// The caller is responsible for openinging and closing the database connection.
|
||||
func (c *Cluster) executeAlterDatabaseOwner(datname string, owner string) error {
|
||||
return c.execCreateOrAlterDatabase(datname, owner, alterDatabaseOwnerSQL,
|
||||
"changing owner for database", "alter database owner")
|
||||
}
|
||||
|
||||
func (c *Cluster) execCreateOrAlterDatabase(datname, owner, statement, doing, operation string) error {
|
||||
if !c.databaseNameOwnerValid(datname, owner) {
|
||||
return nil
|
||||
}
|
||||
c.logger.Infof("changing database %q owner to %q", datname, owner)
|
||||
if _, err := c.pgDb.Exec(fmt.Sprintf(alterDatabaseOwnerSQL, datname, owner)); err != nil {
|
||||
return fmt.Errorf("could not execute alter database owner: %v", err)
|
||||
c.logger.Infof("%s %q owner %q", doing, datname, owner)
|
||||
if _, err := c.pgDb.Exec(fmt.Sprintf(statement, datname, owner)); err != nil {
|
||||
return fmt.Errorf("could not execute %s: %v", operation, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -5,10 +5,9 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
|
|
@ -54,15 +53,15 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) (
|
|||
Stderr: true,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewExecutor(c.RestConfig, "POST", req.URL())
|
||||
exec, err := remotecommand.NewSPDYExecutor(c.RestConfig, "POST", req.URL())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to init executor: %v", err)
|
||||
}
|
||||
|
||||
err = exec.Stream(remotecommand.StreamOptions{
|
||||
SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols,
|
||||
Stdout: &execOut,
|
||||
Stderr: &execErr,
|
||||
Stdout: &execOut,
|
||||
Stderr: &execErr,
|
||||
Tty: false,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -5,22 +5,28 @@ import (
|
|||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||
policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
const (
|
||||
pgBinariesLocationTemplate = "/usr/lib/postgresql/%s/bin"
|
||||
patroniPGBinariesParameterName = "bin_dir"
|
||||
patroniPGParametersParameterName = "parameters"
|
||||
patroniPGHBAConfParameterName = "pg_hba"
|
||||
localHost = "127.0.0.1/32"
|
||||
)
|
||||
|
||||
|
|
@ -40,7 +46,6 @@ type patroniDCS struct {
|
|||
type pgBootstrap struct {
|
||||
Initdb []interface{} `json:"initdb"`
|
||||
Users map[string]pgUser `json:"users"`
|
||||
PgHBA []string `json:"pg_hba"`
|
||||
DCS patroniDCS `json:"dcs,omitempty"`
|
||||
}
|
||||
|
||||
|
|
@ -79,25 +84,30 @@ func (c *Cluster) podDisruptionBudgetName() string {
|
|||
return c.OpConfig.PDBNameFormat.Format("cluster", c.Name)
|
||||
}
|
||||
|
||||
func (c *Cluster) resourceRequirements(resources spec.Resources) (*v1.ResourceRequirements, error) {
|
||||
func (c *Cluster) makeDefaultResources() acidv1.Resources {
|
||||
|
||||
config := c.OpConfig
|
||||
|
||||
defaultRequests := acidv1.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest}
|
||||
defaultLimits := acidv1.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit}
|
||||
|
||||
return acidv1.Resources{ResourceRequest: defaultRequests, ResourceLimits: defaultLimits}
|
||||
}
|
||||
|
||||
func generateResourceRequirements(resources acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
|
||||
var err error
|
||||
|
||||
specRequests := resources.ResourceRequest
|
||||
specLimits := resources.ResourceLimits
|
||||
|
||||
config := c.OpConfig
|
||||
|
||||
defaultRequests := spec.ResourceDescription{CPU: config.DefaultCPURequest, Memory: config.DefaultMemoryRequest}
|
||||
defaultLimits := spec.ResourceDescription{CPU: config.DefaultCPULimit, Memory: config.DefaultMemoryLimit}
|
||||
|
||||
result := v1.ResourceRequirements{}
|
||||
|
||||
result.Requests, err = fillResourceList(specRequests, defaultRequests)
|
||||
result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fill resource requests: %v", err)
|
||||
}
|
||||
|
||||
result.Limits, err = fillResourceList(specLimits, defaultLimits)
|
||||
result.Limits, err = fillResourceList(specLimits, defaultResources.ResourceLimits)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fill resource limits: %v", err)
|
||||
}
|
||||
|
|
@ -105,7 +115,7 @@ func (c *Cluster) resourceRequirements(resources spec.Resources) (*v1.ResourceRe
|
|||
return &result, nil
|
||||
}
|
||||
|
||||
func fillResourceList(spec spec.ResourceDescription, defaults spec.ResourceDescription) (v1.ResourceList, error) {
|
||||
func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) {
|
||||
var err error
|
||||
requests := v1.ResourceList{}
|
||||
|
||||
|
|
@ -135,7 +145,7 @@ func fillResourceList(spec spec.ResourceDescription, defaults spec.ResourceDescr
|
|||
return requests, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) generateSpiloJSONConfiguration(pg *spec.PostgresqlParam, patroni *spec.Patroni) string {
|
||||
func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, logger *logrus.Entry) string {
|
||||
config := spiloConfiguration{}
|
||||
|
||||
config.Bootstrap = pgBootstrap{}
|
||||
|
|
@ -178,7 +188,7 @@ PatroniInitDBParams:
|
|||
}
|
||||
}
|
||||
default:
|
||||
c.logger.Warningf("unsupported type for initdb configuration item %s: %T", defaultParam, defaultParam)
|
||||
logger.Warningf("unsupported type for initdb configuration item %s: %T", defaultParam, defaultParam)
|
||||
continue PatroniInitDBParams
|
||||
}
|
||||
}
|
||||
|
|
@ -193,19 +203,6 @@ PatroniInitDBParams:
|
|||
config.Bootstrap.Initdb = append(config.Bootstrap.Initdb, map[string]string{k: v})
|
||||
}
|
||||
|
||||
// pg_hba parameters in the manifest replace the default ones. We cannot
|
||||
// reasonably merge them automatically, because pg_hba parsing stops on
|
||||
// a first successfully matched rule.
|
||||
if len(patroni.PgHba) > 0 {
|
||||
config.Bootstrap.PgHBA = patroni.PgHba
|
||||
} else {
|
||||
config.Bootstrap.PgHBA = []string{
|
||||
"hostnossl all all all reject",
|
||||
fmt.Sprintf("hostssl all +%s all pam", c.OpConfig.PamRoleName),
|
||||
"hostssl all all all md5",
|
||||
}
|
||||
}
|
||||
|
||||
if patroni.MaximumLagOnFailover >= 0 {
|
||||
config.Bootstrap.DCS.MaximumLagOnFailover = patroni.MaximumLagOnFailover
|
||||
}
|
||||
|
|
@ -222,43 +219,56 @@ PatroniInitDBParams:
|
|||
config.PgLocalConfiguration = make(map[string]interface{})
|
||||
config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion)
|
||||
if len(pg.Parameters) > 0 {
|
||||
localParameters := make(map[string]string)
|
||||
bootstrapParameters := make(map[string]string)
|
||||
for param, val := range pg.Parameters {
|
||||
if isBootstrapOnlyParameter(param) {
|
||||
bootstrapParameters[param] = val
|
||||
} else {
|
||||
localParameters[param] = val
|
||||
}
|
||||
local, bootstrap := getLocalAndBoostrapPostgreSQLParameters(pg.Parameters)
|
||||
|
||||
if len(local) > 0 {
|
||||
config.PgLocalConfiguration[patroniPGParametersParameterName] = local
|
||||
}
|
||||
if len(localParameters) > 0 {
|
||||
config.PgLocalConfiguration[patroniPGParametersParameterName] = localParameters
|
||||
}
|
||||
if len(bootstrapParameters) > 0 {
|
||||
if len(bootstrap) > 0 {
|
||||
config.Bootstrap.DCS.PGBootstrapConfiguration = make(map[string]interface{})
|
||||
config.Bootstrap.DCS.PGBootstrapConfiguration[patroniPGParametersParameterName] = bootstrapParameters
|
||||
config.Bootstrap.DCS.PGBootstrapConfiguration[patroniPGParametersParameterName] = bootstrap
|
||||
}
|
||||
}
|
||||
// Patroni gives us a choice of writing pg_hba.conf to either the bootstrap section or to the local postgresql one.
|
||||
// We choose the local one, because we need Patroni to change pg_hba.conf in PostgreSQL after the user changes the
|
||||
// relevant section in the manifest.
|
||||
if len(patroni.PgHba) > 0 {
|
||||
config.PgLocalConfiguration[patroniPGHBAConfParameterName] = patroni.PgHba
|
||||
}
|
||||
|
||||
config.Bootstrap.Users = map[string]pgUser{
|
||||
c.OpConfig.PamRoleName: {
|
||||
pamRoleName: {
|
||||
Password: "",
|
||||
Options: []string{constants.RoleFlagCreateDB, constants.RoleFlagNoLogin},
|
||||
},
|
||||
}
|
||||
result, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
c.logger.Errorf("cannot convert spilo configuration into JSON: %v", err)
|
||||
logger.Errorf("cannot convert spilo configuration into JSON: %v", err)
|
||||
return ""
|
||||
}
|
||||
return string(result)
|
||||
}
|
||||
|
||||
func (c *Cluster) nodeAffinity() *v1.Affinity {
|
||||
func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (local, bootstrap map[string]string) {
|
||||
local = make(map[string]string)
|
||||
bootstrap = make(map[string]string)
|
||||
for param, val := range parameters {
|
||||
if isBootstrapOnlyParameter(param) {
|
||||
bootstrap[param] = val
|
||||
} else {
|
||||
local[param] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func nodeAffinity(nodeReadinessLabel map[string]string) *v1.Affinity {
|
||||
matchExpressions := make([]v1.NodeSelectorRequirement, 0)
|
||||
if len(c.OpConfig.NodeReadinessLabel) == 0 {
|
||||
if len(nodeReadinessLabel) == 0 {
|
||||
return nil
|
||||
}
|
||||
for k, v := range c.OpConfig.NodeReadinessLabel {
|
||||
for k, v := range nodeReadinessLabel {
|
||||
matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{
|
||||
Key: k,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
|
|
@ -275,13 +285,12 @@ func (c *Cluster) nodeAffinity() *v1.Affinity {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) tolerations(tolerationsSpec *[]v1.Toleration) []v1.Toleration {
|
||||
func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]string) []v1.Toleration {
|
||||
// allow to override tolerations by postgresql manifest
|
||||
if len(*tolerationsSpec) > 0 {
|
||||
return *tolerationsSpec
|
||||
}
|
||||
|
||||
podToleration := c.Config.OpConfig.PodToleration
|
||||
if len(podToleration["key"]) > 0 || len(podToleration["operator"]) > 0 || len(podToleration["value"]) > 0 || len(podToleration["effect"]) > 0 {
|
||||
return []v1.Toleration{
|
||||
{
|
||||
|
|
@ -309,19 +318,128 @@ func isBootstrapOnlyParameter(param string) bool {
|
|||
param == "track_commit_timestamp"
|
||||
}
|
||||
|
||||
func (c *Cluster) generatePodTemplate(
|
||||
uid types.UID,
|
||||
resourceRequirements *v1.ResourceRequirements,
|
||||
resourceRequirementsScalyrSidecar *v1.ResourceRequirements,
|
||||
tolerationsSpec *[]v1.Toleration,
|
||||
pgParameters *spec.PostgresqlParam,
|
||||
patroniParameters *spec.Patroni,
|
||||
cloneDescription *spec.CloneDescription,
|
||||
dockerImage *string,
|
||||
customPodEnvVars map[string]string,
|
||||
) *v1.PodTemplateSpec {
|
||||
spiloConfiguration := c.generateSpiloJSONConfiguration(pgParameters, patroniParameters)
|
||||
func generateVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
Name: constants.DataVolumeName,
|
||||
MountPath: constants.PostgresDataMount, //TODO: fetch from manifest
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateSpiloContainer(
|
||||
name string,
|
||||
dockerImage *string,
|
||||
resourceRequirements *v1.ResourceRequirements,
|
||||
envVars []v1.EnvVar,
|
||||
volumeMounts []v1.VolumeMount,
|
||||
) *v1.Container {
|
||||
|
||||
privilegedMode := true
|
||||
return &v1.Container{
|
||||
Name: name,
|
||||
Image: *dockerImage,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Resources: *resourceRequirements,
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8008,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 5432,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8080,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
VolumeMounts: volumeMounts,
|
||||
Env: envVars,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privilegedMode,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateSidecarContainers(sidecars []acidv1.Sidecar,
|
||||
volumeMounts []v1.VolumeMount, defaultResources acidv1.Resources,
|
||||
superUserName string, credentialsSecretName string, logger *logrus.Entry) ([]v1.Container, error) {
|
||||
|
||||
if len(sidecars) > 0 {
|
||||
result := make([]v1.Container, 0)
|
||||
for index, sidecar := range sidecars {
|
||||
|
||||
resources, err := generateResourceRequirements(
|
||||
makeResources(
|
||||
sidecar.Resources.ResourceRequest.CPU,
|
||||
sidecar.Resources.ResourceRequest.Memory,
|
||||
sidecar.Resources.ResourceLimits.CPU,
|
||||
sidecar.Resources.ResourceLimits.Memory,
|
||||
),
|
||||
defaultResources,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sc := getSidecarContainer(sidecar, index, volumeMounts, resources, superUserName, credentialsSecretName, logger)
|
||||
result = append(result, *sc)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func generatePodTemplate(
|
||||
namespace string,
|
||||
labels labels.Set,
|
||||
spiloContainer *v1.Container,
|
||||
sidecarContainers []v1.Container,
|
||||
tolerationsSpec *[]v1.Toleration,
|
||||
nodeAffinity *v1.Affinity,
|
||||
terminateGracePeriod int64,
|
||||
podServiceAccountName string,
|
||||
kubeIAMRole string,
|
||||
priorityClassName string,
|
||||
) (*v1.PodTemplateSpec, error) {
|
||||
|
||||
terminateGracePeriodSeconds := terminateGracePeriod
|
||||
containers := []v1.Container{*spiloContainer}
|
||||
containers = append(containers, sidecarContainers...)
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
ServiceAccountName: podServiceAccountName,
|
||||
TerminationGracePeriodSeconds: &terminateGracePeriodSeconds,
|
||||
Containers: containers,
|
||||
Tolerations: *tolerationsSpec,
|
||||
}
|
||||
|
||||
if nodeAffinity != nil {
|
||||
podSpec.Affinity = nodeAffinity
|
||||
}
|
||||
|
||||
if priorityClassName != "" {
|
||||
podSpec.PriorityClassName = priorityClassName
|
||||
}
|
||||
|
||||
template := v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: podSpec,
|
||||
}
|
||||
if kubeIAMRole != "" {
|
||||
template.Annotations = map[string]string{constants.KubeIAmAnnotation: kubeIAMRole}
|
||||
}
|
||||
|
||||
return &template, nil
|
||||
}
|
||||
|
||||
// generatePodEnvVars generates environment variables for the Spilo Pod
|
||||
func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration string, cloneDescription *acidv1.CloneDescription, customPodEnvVarsList []v1.EnvVar) []v1.EnvVar {
|
||||
envVars := []v1.EnvVar{
|
||||
{
|
||||
Name: "SCOPE",
|
||||
|
|
@ -409,134 +527,89 @@ func (c *Cluster) generatePodTemplate(
|
|||
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
|
||||
}
|
||||
|
||||
var names []string
|
||||
// handle environment variables from the PodEnvironmentConfigMap. We don't use envSource here as it is impossible
|
||||
// to track any changes to the object envSource points to. In order to emulate the envSource behavior, however, we
|
||||
// need to make sure that PodConfigMap variables doesn't override those we set explicitly from the configuration
|
||||
// parameters
|
||||
envVarsMap := make(map[string]string)
|
||||
for _, envVar := range envVars {
|
||||
envVarsMap[envVar.Name] = envVar.Value
|
||||
if len(customPodEnvVarsList) > 0 {
|
||||
envVars = append(envVars, customPodEnvVarsList...)
|
||||
}
|
||||
for name := range customPodEnvVars {
|
||||
if _, ok := envVarsMap[name]; !ok {
|
||||
names = append(names, name)
|
||||
} else {
|
||||
c.logger.Warningf("variable %q value from %q is ignored: conflict with the definition from the operator",
|
||||
name, c.OpConfig.PodEnvironmentConfigMap)
|
||||
|
||||
return envVars
|
||||
}
|
||||
|
||||
// deduplicateEnvVars makes sure there are no duplicate in the target envVar array. While Kubernetes already
|
||||
// deduplicates variables defined in a container, it leaves the last definition in the list and this behavior is not
|
||||
// well-documented, which means that the behavior can be reversed at some point (it may also start producing an error).
|
||||
// Therefore, the merge is done by the operator, the entries that are ahead in the passed list take priority over those
|
||||
// that are behind, and only the name is considered in order to eliminate duplicates.
|
||||
func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus.Entry) []v1.EnvVar {
|
||||
result := make([]v1.EnvVar, 0)
|
||||
names := make(map[string]int)
|
||||
|
||||
for i, va := range input {
|
||||
if names[va.Name] == 0 {
|
||||
names[va.Name]++
|
||||
result = append(result, input[i])
|
||||
} else if names[va.Name] == 1 {
|
||||
names[va.Name]++
|
||||
logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored",
|
||||
va.Name, containerName)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
envVars = append(envVars, v1.EnvVar{Name: name, Value: customPodEnvVars[name]})
|
||||
return result
|
||||
}
|
||||
|
||||
func getSidecarContainer(sidecar acidv1.Sidecar, index int, volumeMounts []v1.VolumeMount,
|
||||
resources *v1.ResourceRequirements, superUserName string, credentialsSecretName string, logger *logrus.Entry) *v1.Container {
|
||||
name := sidecar.Name
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("sidecar-%d", index)
|
||||
}
|
||||
|
||||
privilegedMode := true
|
||||
containerImage := c.OpConfig.DockerImage
|
||||
if dockerImage != nil && *dockerImage != "" {
|
||||
containerImage = *dockerImage
|
||||
}
|
||||
volumeMounts := []v1.VolumeMount{
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: constants.DataVolumeName,
|
||||
MountPath: constants.PostgresDataMount, //TODO: fetch from manifest
|
||||
},
|
||||
}
|
||||
container := v1.Container{
|
||||
Name: c.containerName(),
|
||||
Image: containerImage,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Resources: *resourceRequirements,
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8008,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 5432,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8080,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
VolumeMounts: volumeMounts,
|
||||
Env: envVars,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privilegedMode,
|
||||
},
|
||||
}
|
||||
terminateGracePeriodSeconds := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
ServiceAccountName: c.OpConfig.PodServiceAccountName,
|
||||
TerminationGracePeriodSeconds: &terminateGracePeriodSeconds,
|
||||
Containers: []v1.Container{container},
|
||||
Tolerations: c.tolerations(tolerationsSpec),
|
||||
}
|
||||
|
||||
if affinity := c.nodeAffinity(); affinity != nil {
|
||||
podSpec.Affinity = affinity
|
||||
}
|
||||
|
||||
if c.OpConfig.ScalyrAPIKey != "" && c.OpConfig.ScalyrImage != "" {
|
||||
podSpec.Containers = append(
|
||||
podSpec.Containers,
|
||||
v1.Container{
|
||||
Name: "scalyr-sidecar",
|
||||
Image: c.OpConfig.ScalyrImage,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Resources: *resourceRequirementsScalyrSidecar,
|
||||
VolumeMounts: volumeMounts,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "SCALYR_API_KEY",
|
||||
Value: c.OpConfig.ScalyrAPIKey,
|
||||
},
|
||||
{
|
||||
Name: "SCALYR_SERVER_HOST",
|
||||
Value: c.Name,
|
||||
},
|
||||
{
|
||||
Name: "SCALYR_SERVER_URL",
|
||||
Value: c.OpConfig.ScalyrServerURL,
|
||||
},
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POSTGRES_USER",
|
||||
Value: superUserName,
|
||||
},
|
||||
{
|
||||
Name: "POSTGRES_PASSWORD",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: credentialsSecretName,
|
||||
},
|
||||
Key: "password",
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
template := v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: c.labelsSet(true),
|
||||
Namespace: c.Namespace,
|
||||
},
|
||||
Spec: podSpec,
|
||||
}
|
||||
if c.OpConfig.KubeIAMRole != "" {
|
||||
template.Annotations = map[string]string{constants.KubeIAmAnnotation: c.OpConfig.KubeIAMRole}
|
||||
if len(sidecar.Env) > 0 {
|
||||
env = append(env, sidecar.Env...)
|
||||
}
|
||||
return &v1.Container{
|
||||
Name: name,
|
||||
Image: sidecar.DockerImage,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Resources: *resources,
|
||||
VolumeMounts: volumeMounts,
|
||||
Env: deduplicateEnvVars(env, name, logger),
|
||||
Ports: sidecar.Ports,
|
||||
}
|
||||
|
||||
return &template
|
||||
}
|
||||
|
||||
func getBucketScopeSuffix(uid string) string {
|
||||
|
|
@ -546,46 +619,114 @@ func getBucketScopeSuffix(uid string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) spec.Resources {
|
||||
return spec.Resources{
|
||||
ResourceRequest: spec.ResourceDescription{
|
||||
func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acidv1.Resources {
|
||||
return acidv1.Resources{
|
||||
ResourceRequest: acidv1.ResourceDescription{
|
||||
CPU: cpuRequest,
|
||||
Memory: memoryRequest,
|
||||
},
|
||||
ResourceLimits: spec.ResourceDescription{
|
||||
ResourceLimits: acidv1.ResourceDescription{
|
||||
CPU: cpuLimit,
|
||||
Memory: memoryLimit,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.StatefulSet, error) {
|
||||
resourceRequirements, err := c.resourceRequirements(spec.Resources)
|
||||
func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.StatefulSet, error) {
|
||||
|
||||
var (
|
||||
err error
|
||||
sidecarContainers []v1.Container
|
||||
podTemplate *v1.PodTemplateSpec
|
||||
volumeClaimTemplate *v1.PersistentVolumeClaim
|
||||
)
|
||||
defaultResources := c.makeDefaultResources()
|
||||
|
||||
resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||
}
|
||||
resourceRequirementsScalyrSidecar, err := c.resourceRequirements(
|
||||
makeResources(
|
||||
c.OpConfig.ScalyrCPURequest,
|
||||
c.OpConfig.ScalyrMemoryRequest,
|
||||
c.OpConfig.ScalyrCPULimit,
|
||||
c.OpConfig.ScalyrMemoryLimit,
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate Scalyr sidecar resource requirements: %v", err)
|
||||
}
|
||||
var customPodEnvVars map[string]string
|
||||
|
||||
customPodEnvVarsList := make([]v1.EnvVar, 0)
|
||||
|
||||
if c.OpConfig.PodEnvironmentConfigMap != "" {
|
||||
if cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap, metav1.GetOptions{}); err != nil {
|
||||
var cm *v1.ConfigMap
|
||||
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
|
||||
} else {
|
||||
customPodEnvVars = cm.Data
|
||||
}
|
||||
for k, v := range cm.Data {
|
||||
customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v})
|
||||
}
|
||||
sort.Slice(customPodEnvVarsList,
|
||||
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
|
||||
}
|
||||
podTemplate := c.generatePodTemplate(c.Postgresql.GetUID(), resourceRequirements, resourceRequirementsScalyrSidecar, &spec.Tolerations, &spec.PostgresqlParam, &spec.Patroni, &spec.Clone, &spec.DockerImage, customPodEnvVars)
|
||||
volumeClaimTemplate, err := generatePersistentVolumeClaimTemplate(spec.Volume.Size, spec.Volume.StorageClass)
|
||||
|
||||
spiloConfiguration := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger)
|
||||
|
||||
// generate environment variables for the spilo container
|
||||
spiloEnvVars := deduplicateEnvVars(
|
||||
c.generateSpiloPodEnvVars(c.Postgresql.GetUID(), spiloConfiguration, &spec.Clone, customPodEnvVarsList),
|
||||
c.containerName(), c.logger)
|
||||
|
||||
// pickup the docker image for the spilo container
|
||||
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
|
||||
|
||||
volumeMounts := generateVolumeMounts()
|
||||
|
||||
// generate the spilo container
|
||||
spiloContainer := generateSpiloContainer(c.containerName(), &effectiveDockerImage, resourceRequirements, spiloEnvVars, volumeMounts)
|
||||
|
||||
// resolve conflicts between operator-global and per-cluster sidecards
|
||||
sideCars := c.mergeSidecars(spec.Sidecars)
|
||||
|
||||
resourceRequirementsScalyrSidecar := makeResources(
|
||||
c.OpConfig.ScalyrCPURequest,
|
||||
c.OpConfig.ScalyrMemoryRequest,
|
||||
c.OpConfig.ScalyrCPULimit,
|
||||
c.OpConfig.ScalyrMemoryLimit,
|
||||
)
|
||||
|
||||
// generate scalyr sidecar container
|
||||
if scalyrSidecar :=
|
||||
generateScalyrSidecarSpec(c.Name,
|
||||
c.OpConfig.ScalyrAPIKey,
|
||||
c.OpConfig.ScalyrServerURL,
|
||||
c.OpConfig.ScalyrImage,
|
||||
&resourceRequirementsScalyrSidecar, c.logger); scalyrSidecar != nil {
|
||||
sideCars = append(sideCars, *scalyrSidecar)
|
||||
}
|
||||
|
||||
// generate sidecar containers
|
||||
if sidecarContainers, err = generateSidecarContainers(sideCars, volumeMounts, defaultResources,
|
||||
c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger); err != nil {
|
||||
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
|
||||
}
|
||||
|
||||
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
|
||||
|
||||
// generate pod template for the statefulset, based on the spilo container and sidecards
|
||||
if podTemplate, err = generatePodTemplate(
|
||||
c.Namespace,
|
||||
c.labelsSet(true),
|
||||
spiloContainer,
|
||||
sidecarContainers,
|
||||
&tolerationSpec,
|
||||
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||
c.OpConfig.PodServiceAccountName,
|
||||
c.OpConfig.KubeIAMRole,
|
||||
effectivePodPriorityClassName); err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template: %v", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate pod template: %v", err)
|
||||
}
|
||||
|
||||
if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size,
|
||||
spec.Volume.StorageClass); err != nil {
|
||||
return nil, fmt.Errorf("could not generate volume claim template: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -596,7 +737,7 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu
|
|||
Name: c.statefulSetName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.labelsSet(true),
|
||||
Annotations: map[string]string{RollingUpdateStatefulsetAnnotationKey: "false"},
|
||||
Annotations: map[string]string{rollingUpdateStatefulsetAnnotationKey: "false"},
|
||||
},
|
||||
Spec: v1beta1.StatefulSetSpec{
|
||||
Replicas: &numberOfInstances,
|
||||
|
|
@ -610,11 +751,65 @@ func (c *Cluster) generateStatefulSet(spec *spec.PostgresSpec) (*v1beta1.Statefu
|
|||
return statefulSet, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) getNumberOfInstances(spec *spec.PostgresSpec) (newcur int32) {
|
||||
func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string,
|
||||
containerResources *acidv1.Resources, logger *logrus.Entry) *acidv1.Sidecar {
|
||||
if APIKey == "" || dockerImage == "" {
|
||||
if APIKey == "" && dockerImage != "" {
|
||||
logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
scalarSpec := &acidv1.Sidecar{
|
||||
Name: "scalyr-sidecar",
|
||||
DockerImage: dockerImage,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "SCALYR_API_KEY",
|
||||
Value: APIKey,
|
||||
},
|
||||
{
|
||||
Name: "SCALYR_SERVER_HOST",
|
||||
Value: clusterName,
|
||||
},
|
||||
},
|
||||
Resources: *containerResources,
|
||||
}
|
||||
if serverURL != "" {
|
||||
scalarSpec.Env = append(scalarSpec.Env, v1.EnvVar{Name: "SCALYR_SERVER_URL", Value: serverURL})
|
||||
}
|
||||
return scalarSpec
|
||||
}
|
||||
|
||||
// mergeSidecar merges globally-defined sidecars with those defined in the cluster manifest
|
||||
func (c *Cluster) mergeSidecars(sidecars []acidv1.Sidecar) []acidv1.Sidecar {
|
||||
globalSidecarsToSkip := map[string]bool{}
|
||||
result := make([]acidv1.Sidecar, 0)
|
||||
|
||||
for i, sidecar := range sidecars {
|
||||
dockerImage, ok := c.OpConfig.Sidecars[sidecar.Name]
|
||||
if ok {
|
||||
if dockerImage != sidecar.DockerImage {
|
||||
c.logger.Warningf("merging definitions for sidecar %q: "+
|
||||
"ignoring %q in the global scope in favor of %q defined in the cluster",
|
||||
sidecar.Name, dockerImage, sidecar.DockerImage)
|
||||
}
|
||||
globalSidecarsToSkip[sidecar.Name] = true
|
||||
}
|
||||
result = append(result, sidecars[i])
|
||||
}
|
||||
for name, dockerImage := range c.OpConfig.Sidecars {
|
||||
if !globalSidecarsToSkip[name] {
|
||||
result = append(result, acidv1.Sidecar{Name: name, DockerImage: dockerImage})
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
||||
min := c.OpConfig.MinInstances
|
||||
max := c.OpConfig.MaxInstances
|
||||
cur := spec.NumberOfInstances
|
||||
newcur = cur
|
||||
newcur := cur
|
||||
|
||||
if max >= 0 && newcur > max {
|
||||
newcur = max
|
||||
|
|
@ -626,18 +821,23 @@ func (c *Cluster) getNumberOfInstances(spec *spec.PostgresSpec) (newcur int32) {
|
|||
c.logger.Infof("adjusted number of instances from %d to %d (min: %d, max: %d)", cur, newcur, min, max)
|
||||
}
|
||||
|
||||
return
|
||||
return newcur
|
||||
}
|
||||
|
||||
func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) {
|
||||
|
||||
var storageClassName *string
|
||||
|
||||
metadata := metav1.ObjectMeta{
|
||||
Name: constants.DataVolumeName,
|
||||
}
|
||||
if volumeStorageClass != "" {
|
||||
// TODO: check if storage class exists
|
||||
// TODO: remove the old annotation, switching completely to the StorageClassName field.
|
||||
metadata.Annotations = map[string]string{"volume.beta.kubernetes.io/storage-class": volumeStorageClass}
|
||||
storageClassName = &volumeStorageClass
|
||||
} else {
|
||||
metadata.Annotations = map[string]string{"volume.alpha.kubernetes.io/storage-class": "default"}
|
||||
storageClassName = nil
|
||||
}
|
||||
|
||||
quantity, err := resource.ParseQuantity(volumeSize)
|
||||
|
|
@ -654,14 +854,15 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string
|
|||
v1.ResourceStorage: quantity,
|
||||
},
|
||||
},
|
||||
StorageClassName: storageClassName,
|
||||
},
|
||||
}
|
||||
|
||||
return volumeClaim, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) generateUserSecrets() (secrets map[string]*v1.Secret) {
|
||||
secrets = make(map[string]*v1.Secret, len(c.pgUsers))
|
||||
func (c *Cluster) generateUserSecrets() map[string]*v1.Secret {
|
||||
secrets := make(map[string]*v1.Secret, len(c.pgUsers))
|
||||
namespace := c.Namespace
|
||||
for username, pgUser := range c.pgUsers {
|
||||
//Skip users with no password i.e. human users (they'll be authenticated using pam)
|
||||
|
|
@ -678,7 +879,7 @@ func (c *Cluster) generateUserSecrets() (secrets map[string]*v1.Secret) {
|
|||
}
|
||||
}
|
||||
|
||||
return
|
||||
return secrets
|
||||
}
|
||||
|
||||
func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) *v1.Secret {
|
||||
|
|
@ -707,7 +908,7 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
|
|||
return &secret
|
||||
}
|
||||
|
||||
func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *spec.PostgresSpec) bool {
|
||||
func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *acidv1.PostgresSpec) bool {
|
||||
|
||||
switch role {
|
||||
|
||||
|
|
@ -735,7 +936,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *sp
|
|||
|
||||
}
|
||||
|
||||
func (c *Cluster) generateService(role PostgresRole, spec *spec.PostgresSpec) *v1.Service {
|
||||
func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service {
|
||||
var dnsName string
|
||||
|
||||
if role == Master {
|
||||
|
|
@ -806,7 +1007,7 @@ func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubse
|
|||
return endpoints
|
||||
}
|
||||
|
||||
func (c *Cluster) generateCloneEnvironment(description *spec.CloneDescription) []v1.EnvVar {
|
||||
func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) []v1.EnvVar {
|
||||
result := make([]v1.EnvVar, 0)
|
||||
|
||||
if description.ClusterName == "" {
|
||||
|
|
@ -841,7 +1042,7 @@ func (c *Cluster) generateCloneEnvironment(description *spec.CloneDescription) [
|
|||
result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"})
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp})
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.Uid)})
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.UID)})
|
||||
result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
u "github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
|
||||
"testing"
|
||||
|
|
@ -18,41 +18,41 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
|
|||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, spec.Postgresql{}, logger)
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger)
|
||||
|
||||
testName := "TestCreateLoadBalancerLogic"
|
||||
tests := []struct {
|
||||
subtest string
|
||||
role PostgresRole
|
||||
spec *spec.PostgresSpec
|
||||
spec *acidv1.PostgresSpec
|
||||
opConfig config.Config
|
||||
result bool
|
||||
}{
|
||||
{
|
||||
subtest: "new format, load balancer is enabled for replica",
|
||||
role: Replica,
|
||||
spec: &spec.PostgresSpec{EnableReplicaLoadBalancer: u.True()},
|
||||
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: u.True()},
|
||||
opConfig: config.Config{},
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
subtest: "new format, load balancer is disabled for replica",
|
||||
role: Replica,
|
||||
spec: &spec.PostgresSpec{EnableReplicaLoadBalancer: u.False()},
|
||||
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: u.False()},
|
||||
opConfig: config.Config{},
|
||||
result: false,
|
||||
},
|
||||
{
|
||||
subtest: "new format, load balancer isn't specified for replica",
|
||||
role: Replica,
|
||||
spec: &spec.PostgresSpec{EnableReplicaLoadBalancer: nil},
|
||||
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil},
|
||||
opConfig: config.Config{EnableReplicaLoadBalancer: true},
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
subtest: "new format, load balancer isn't specified for replica",
|
||||
role: Replica,
|
||||
spec: &spec.PostgresSpec{EnableReplicaLoadBalancer: nil},
|
||||
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil},
|
||||
opConfig: config.Config{EnableReplicaLoadBalancer: false},
|
||||
result: false,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -4,11 +4,12 @@ import (
|
|||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
)
|
||||
|
||||
func (c *Cluster) listPods() ([]v1.Pod, error) {
|
||||
|
|
@ -96,12 +97,12 @@ func (c *Cluster) unregisterPodSubscriber(podName spec.NamespacedName) {
|
|||
delete(c.podSubscribers, podName)
|
||||
}
|
||||
|
||||
func (c *Cluster) registerPodSubscriber(podName spec.NamespacedName) chan spec.PodEvent {
|
||||
func (c *Cluster) registerPodSubscriber(podName spec.NamespacedName) chan PodEvent {
|
||||
c.logger.Debugf("subscribing to pod %q", podName)
|
||||
c.podSubscribersMu.Lock()
|
||||
defer c.podSubscribersMu.Unlock()
|
||||
|
||||
ch := make(chan spec.PodEvent)
|
||||
ch := make(chan PodEvent)
|
||||
if _, ok := c.podSubscribers[podName]; ok {
|
||||
panic("pod '" + podName.String() + "' is already subscribed")
|
||||
}
|
||||
|
|
@ -182,6 +183,8 @@ func (c *Cluster) masterCandidate(oldNodeName string) (*v1.Pod, error) {
|
|||
func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
||||
var (
|
||||
masterCandidatePod *v1.Pod
|
||||
err error
|
||||
eol bool
|
||||
)
|
||||
|
||||
oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(podName.Name, metav1.GetOptions{})
|
||||
|
|
@ -192,9 +195,10 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
|
||||
c.logger.Infof("migrating master pod %q", podName)
|
||||
|
||||
if eol, err := c.podIsEndOfLife(oldMaster); err != nil {
|
||||
if eol, err = c.podIsEndOfLife(oldMaster); err != nil {
|
||||
return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err)
|
||||
} else if !eol {
|
||||
}
|
||||
if !eol {
|
||||
c.logger.Debugf("pod is already on a live node")
|
||||
return nil
|
||||
}
|
||||
|
|
@ -205,41 +209,43 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
}
|
||||
// we must have a statefulset in the cluster for the migration to work
|
||||
if c.Statefulset == nil {
|
||||
sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
var sset *v1beta1.StatefulSet
|
||||
if sset, err = c.KubeClient.StatefulSets(c.Namespace).Get(c.statefulSetName(),
|
||||
metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not retrieve cluster statefulset: %v", err)
|
||||
}
|
||||
c.Statefulset = sset
|
||||
}
|
||||
// We may not have a cached statefulset if the initial cluster sync has aborted, revert to the spec in that case.
|
||||
if *c.Statefulset.Spec.Replicas == 1 {
|
||||
c.logger.Warningf("single master pod for cluster %q, migration will cause longer downtime of the master instance", c.clusterName())
|
||||
} else {
|
||||
masterCandidatePod, err = c.masterCandidate(oldMaster.Spec.NodeName)
|
||||
if err != nil {
|
||||
if *c.Statefulset.Spec.Replicas > 1 {
|
||||
if masterCandidatePod, err = c.masterCandidate(oldMaster.Spec.NodeName); err != nil {
|
||||
return fmt.Errorf("could not get new master candidate: %v", err)
|
||||
}
|
||||
} else {
|
||||
c.logger.Warningf("single master pod for cluster %q, migration will cause longer downtime of the master instance", c.clusterName())
|
||||
}
|
||||
|
||||
// there are two cases for each postgres cluster that has its master pod on the node to migrate from:
|
||||
// - the cluster has some replicas - migrate one of those if necessary and failover to it
|
||||
// - there are no replicas - just terminate the master and wait until it respawns
|
||||
// in both cases the result is the new master up and running on a new node.
|
||||
if masterCandidatePod != nil {
|
||||
pod, err := c.movePodFromEndOfLifeNode(masterCandidatePod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not move pod: %v", err)
|
||||
}
|
||||
|
||||
masterCandidateName := util.NameFromMeta(pod.ObjectMeta)
|
||||
if err := c.Switchover(oldMaster, masterCandidateName); err != nil {
|
||||
return fmt.Errorf("could not failover to pod %q: %v", masterCandidateName, err)
|
||||
}
|
||||
} else {
|
||||
if masterCandidatePod == nil {
|
||||
if _, err = c.movePodFromEndOfLifeNode(oldMaster); err != nil {
|
||||
return fmt.Errorf("could not move pod: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if masterCandidatePod, err = c.movePodFromEndOfLifeNode(masterCandidatePod); err != nil {
|
||||
return fmt.Errorf("could not move pod: %v", err)
|
||||
}
|
||||
|
||||
masterCandidateName := util.NameFromMeta(masterCandidatePod.ObjectMeta)
|
||||
if err := c.Switchover(oldMaster, masterCandidateName); err != nil {
|
||||
return fmt.Errorf("could not failover to pod %q: %v", masterCandidateName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -281,12 +287,12 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
|
|||
if err := c.waitForPodDeletion(ch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pod, err := c.waitForPodLabel(ch, stopChan, nil); err != nil {
|
||||
pod, err := c.waitForPodLabel(ch, stopChan, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
c.logger.Infof("pod %q has been recreated", podName)
|
||||
return pod, nil
|
||||
}
|
||||
c.logger.Infof("pod %q has been recreated", podName)
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) recreatePods() error {
|
||||
|
|
|
|||
|
|
@ -5,11 +5,11 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||
policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
|
|
@ -18,7 +18,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
RollingUpdateStatefulsetAnnotationKey = "zalando-postgres-operator-rolling-update-required"
|
||||
rollingUpdateStatefulsetAnnotationKey = "zalando-postgres-operator-rolling-update-required"
|
||||
)
|
||||
|
||||
func (c *Cluster) listResources() error {
|
||||
|
|
@ -140,7 +140,7 @@ func (c *Cluster) setRollingUpdateFlagForStatefulSet(sset *v1beta1.StatefulSet,
|
|||
if anno == nil {
|
||||
anno = make(map[string]string)
|
||||
}
|
||||
anno[RollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val)
|
||||
anno[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(val)
|
||||
sset.SetAnnotations(anno)
|
||||
}
|
||||
|
||||
|
|
@ -162,13 +162,13 @@ func (c *Cluster) getRollingUpdateFlagFromStatefulSet(sset *v1beta1.StatefulSet,
|
|||
anno := sset.GetAnnotations()
|
||||
flag = defaultValue
|
||||
|
||||
stringFlag, exists := anno[RollingUpdateStatefulsetAnnotationKey]
|
||||
stringFlag, exists := anno[rollingUpdateStatefulsetAnnotationKey]
|
||||
if exists {
|
||||
var err error
|
||||
if flag, err = strconv.ParseBool(stringFlag); err != nil {
|
||||
c.logger.Warnf("error when parsing %q annotation for the statefulset %q: expected boolean value, got %q\n",
|
||||
RollingUpdateStatefulsetAnnotationKey,
|
||||
types.NamespacedName{sset.Namespace, sset.Name},
|
||||
rollingUpdateStatefulsetAnnotationKey,
|
||||
types.NamespacedName{Namespace: sset.Namespace, Name: sset.Name},
|
||||
stringFlag)
|
||||
flag = defaultValue
|
||||
}
|
||||
|
|
@ -272,10 +272,10 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *v1beta1.StatefulSet) error
|
|||
c.logger.Debugf("replacing statefulset")
|
||||
|
||||
// Delete the current statefulset without deleting the pods
|
||||
orphanDepencies := true
|
||||
deletePropagationPolicy := metav1.DeletePropagationOrphan
|
||||
oldStatefulset := c.Statefulset
|
||||
|
||||
options := metav1.DeleteOptions{OrphanDependents: &orphanDepencies}
|
||||
options := metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy}
|
||||
if err := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Delete(oldStatefulset.Name, &options); err != nil {
|
||||
return fmt.Errorf("could not delete statefulset %q: %v", statefulSetName, err)
|
||||
}
|
||||
|
|
@ -483,7 +483,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset
|
|||
if len(endPointAddresses) > 0 {
|
||||
result = append(result, v1.EndpointSubset{
|
||||
Addresses: endPointAddresses,
|
||||
Ports: []v1.EndpointPort{{"postgresql", 5432, "TCP"}},
|
||||
Ports: []v1.EndpointPort{{Name: "postgresql", Port: 5432, Protocol: "TCP"}},
|
||||
})
|
||||
} else if role == Master {
|
||||
c.logger.Warningf("master is not running, generated master endpoint does not contain any addresses")
|
||||
|
|
@ -625,27 +625,3 @@ func (c *Cluster) GetStatefulSet() *v1beta1.StatefulSet {
|
|||
func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget {
|
||||
return c.PodDisruptionBudget
|
||||
}
|
||||
|
||||
func (c *Cluster) createDatabases() error {
|
||||
c.setProcessName("creating databases")
|
||||
|
||||
if len(c.Spec.Databases) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.initDbConn(); err != nil {
|
||||
return fmt.Errorf("could not init database connection")
|
||||
}
|
||||
defer func() {
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close database connection: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for datname, owner := range c.Spec.Databases {
|
||||
if err := c.executeCreateDatabase(datname, owner); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,11 +2,11 @@ package cluster
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
|
|
@ -16,7 +16,8 @@ import (
|
|||
|
||||
// Sync syncs the cluster, making sure the actual Kubernetes objects correspond to what is defined in the manifest.
|
||||
// Unlike the update, sync does not error out if some objects do not exist and takes care of creating them.
|
||||
func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) {
|
||||
func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||
var err error
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
var actions []Action = NoActions
|
||||
|
|
@ -26,15 +27,15 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) {
|
|||
defer func() {
|
||||
if err != nil {
|
||||
c.logger.Warningf("error while syncing cluster state: %v", err)
|
||||
c.setStatus(spec.ClusterStatusSyncFailed)
|
||||
} else if c.Status != spec.ClusterStatusRunning {
|
||||
c.setStatus(spec.ClusterStatusRunning)
|
||||
c.setStatus(acidv1.ClusterStatusSyncFailed)
|
||||
} else if c.Status != acidv1.ClusterStatusRunning {
|
||||
c.setStatus(acidv1.ClusterStatusRunning)
|
||||
}
|
||||
}()
|
||||
|
||||
if err = c.initUsers(); err != nil {
|
||||
err = fmt.Errorf("could not init users: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Debugf("syncing secrets")
|
||||
|
|
@ -42,18 +43,18 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) {
|
|||
//TODO: mind the secrets of the deleted/new users
|
||||
if err = c.syncSecrets(); err != nil {
|
||||
err = fmt.Errorf("could not sync secrets: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Debugf("syncing services")
|
||||
if actions, err = c.syncServices(); err != nil {
|
||||
err = fmt.Errorf("could not resolve actions to sync services: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
if err = c.applyActions(actions); err != nil {
|
||||
err = fmt.Errorf("could not apply actions to sync services: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// potentially enlarge volumes before changing the statefulset. By doing that
|
||||
|
|
@ -65,38 +66,38 @@ func (c *Cluster) Sync(newSpec *spec.Postgresql) (err error) {
|
|||
c.logger.Debugf("syncing persistent volumes")
|
||||
if err = c.syncVolumes(); err != nil {
|
||||
err = fmt.Errorf("could not sync persistent volumes: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
c.logger.Debugf("syncing statefulsets")
|
||||
if err = c.syncStatefulSet(); err != nil {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
err = fmt.Errorf("could not sync statefulsets: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// create database objects unless we are running without pods or disabled that feature explicitely
|
||||
// create database objects unless we are running without pods or disabled that feature explicitly
|
||||
if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&newSpec.Spec) <= 0) {
|
||||
c.logger.Debugf("syncing roles")
|
||||
if err = c.syncRoles(); err != nil {
|
||||
err = fmt.Errorf("could not sync roles: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
c.logger.Debugf("syncing databases")
|
||||
if err = c.syncDatabases(); err != nil {
|
||||
err = fmt.Errorf("could not sync databases: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Debug("syncing pod disruption budgets")
|
||||
if err = c.syncPodDisruptionBudget(false); err != nil {
|
||||
err = fmt.Errorf("could not sync pod disruption budget: %v", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Cluster) syncServices() (actions []Action, err error) {
|
||||
|
|
@ -135,10 +136,13 @@ func (c *Cluster) applyActions(actions []Action) (err error) {
|
|||
}
|
||||
|
||||
func (c *Cluster) syncService(role PostgresRole) ([]Action, error) {
|
||||
var (
|
||||
svc *v1.Service
|
||||
err error
|
||||
)
|
||||
c.setProcessName("syncing %s service", role)
|
||||
|
||||
svc, err := c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if svc, err = c.KubeClient.Services(c.Namespace).Get(c.serviceName(role), metav1.GetOptions{}); err == nil {
|
||||
c.Services[role] = svc
|
||||
desiredSvc := c.generateService(role, &c.Spec)
|
||||
match, reason := k8sutil.SameService(svc, desiredSvc)
|
||||
|
|
@ -156,8 +160,8 @@ func (c *Cluster) syncService(role PostgresRole) ([]Action, error) {
|
|||
} else if !k8sutil.ResourceNotFound(err) {
|
||||
return NoActions, fmt.Errorf("could not get %s service: %v", role, err)
|
||||
}
|
||||
// no existing service, create new one
|
||||
c.Services[role] = nil
|
||||
|
||||
c.logger.Infof("could not find the cluster's %s service", role)
|
||||
|
||||
actions, err := c.createService(role)
|
||||
|
|
@ -171,72 +175,78 @@ func (c *Cluster) syncService(role PostgresRole) ([]Action, error) {
|
|||
}
|
||||
|
||||
func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
||||
var (
|
||||
ep *v1.Endpoints
|
||||
err error
|
||||
)
|
||||
c.setProcessName("syncing %s endpoint", role)
|
||||
|
||||
ep, err := c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{})
|
||||
if err == nil {
|
||||
|
||||
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}); err == nil {
|
||||
// TODO: No syncing of endpoints here, is this covered completely by updateService?
|
||||
c.Endpoints[role] = ep
|
||||
return nil
|
||||
} else if !k8sutil.ResourceNotFound(err) {
|
||||
}
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
return fmt.Errorf("could not get %s endpoint: %v", role, err)
|
||||
}
|
||||
// no existing endpoint, create new one
|
||||
c.Endpoints[role] = nil
|
||||
|
||||
c.logger.Infof("could not find the cluster's %s endpoint", role)
|
||||
|
||||
if ep, err := c.createEndpoint(role); err != nil {
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta))
|
||||
ep, err := c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{})
|
||||
if err == nil {
|
||||
c.Endpoints[role] = ep
|
||||
} else {
|
||||
c.logger.Infof("could not fetch existing %s endpoint: %v", role, err)
|
||||
}
|
||||
} else {
|
||||
if ep, err = c.createEndpoint(role); err == nil {
|
||||
c.logger.Infof("created missing %s endpoint %q", role, util.NameFromMeta(ep.ObjectMeta))
|
||||
} else {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
return fmt.Errorf("could not create missing %s endpoint: %v", role, err)
|
||||
}
|
||||
} else {
|
||||
c.logger.Infof("created missing %s endpoint %q", role, util.NameFromMeta(ep.ObjectMeta))
|
||||
c.Endpoints[role] = ep
|
||||
c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta))
|
||||
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(c.endpointName(role), metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err)
|
||||
}
|
||||
}
|
||||
|
||||
c.Endpoints[role] = ep
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
||||
pdb, err := c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{})
|
||||
if err == nil {
|
||||
var (
|
||||
pdb *policybeta1.PodDisruptionBudget
|
||||
err error
|
||||
)
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
|
||||
c.PodDisruptionBudget = pdb
|
||||
newPDB := c.generatePodDisruptionBudget()
|
||||
if match, reason := k8sutil.SamePDB(pdb, newPDB); !match {
|
||||
c.logPDBChanges(pdb, newPDB, isUpdate, reason)
|
||||
if err := c.updatePodDisruptionBudget(newPDB); err != nil {
|
||||
if err = c.updatePodDisruptionBudget(newPDB); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
c.PodDisruptionBudget = pdb
|
||||
}
|
||||
|
||||
return nil
|
||||
} else if !k8sutil.ResourceNotFound(err) {
|
||||
|
||||
}
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
return fmt.Errorf("could not get pod disruption budget: %v", err)
|
||||
}
|
||||
// no existing pod disruption budget, create new one
|
||||
c.PodDisruptionBudget = nil
|
||||
|
||||
c.logger.Infof("could not find the cluster's pod disruption budget")
|
||||
|
||||
if pdb, err = c.createPodDisruptionBudget(); err != nil {
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta))
|
||||
} else {
|
||||
if !k8sutil.ResourceAlreadyExists(err) {
|
||||
return fmt.Errorf("could not create pod disruption budget: %v", err)
|
||||
}
|
||||
} else {
|
||||
c.logger.Infof("created missing pod disruption budget %q", util.NameFromMeta(pdb.ObjectMeta))
|
||||
c.PodDisruptionBudget = pdb
|
||||
c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta))
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta))
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Infof("created missing pod disruption budget %q", util.NameFromMeta(pdb.ObjectMeta))
|
||||
c.PodDisruptionBudget = pdb
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -270,7 +280,9 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
podsRollingUpdateRequired = (len(pods) > 0)
|
||||
if podsRollingUpdateRequired {
|
||||
c.logger.Warningf("found pods from the previous statefulset: trigger rolling update")
|
||||
c.applyRollingUpdateFlagforStatefulSet(podsRollingUpdateRequired)
|
||||
if err := c.applyRollingUpdateFlagforStatefulSet(podsRollingUpdateRequired); err != nil {
|
||||
return fmt.Errorf("could not set rolling update flag for the statefulset: %v", err)
|
||||
}
|
||||
}
|
||||
c.logger.Infof("created missing statefulset %q", util.NameFromMeta(sset.ObjectMeta))
|
||||
|
||||
|
|
@ -330,6 +342,11 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
// checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters
|
||||
// (like max_connections) has changed and if necessary sets it via the Patroni API
|
||||
func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() error {
|
||||
var (
|
||||
err error
|
||||
pods []v1.Pod
|
||||
)
|
||||
|
||||
// we need to extract those options from the cluster manifest.
|
||||
optionsToSet := make(map[string]string)
|
||||
pgOptions := c.Spec.Parameters
|
||||
|
|
@ -340,47 +357,55 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() error {
|
|||
}
|
||||
}
|
||||
|
||||
if len(optionsToSet) > 0 {
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
return fmt.Errorf("could not call Patroni API: cluster has no pods")
|
||||
}
|
||||
for _, pod := range pods {
|
||||
podName := util.NameFromMeta(pod.ObjectMeta)
|
||||
c.logger.Debugf("calling Patroni API on a pod %s to set the following Postgres options: %v",
|
||||
podName, optionsToSet)
|
||||
if err := c.patroni.SetPostgresParameters(&pod, optionsToSet); err == nil {
|
||||
return nil
|
||||
} else {
|
||||
c.logger.Warningf("could not patch postgres parameters with a pod %s: %v", podName, err)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)",
|
||||
len(pods))
|
||||
if len(optionsToSet) == 0 {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
if pods, err = c.listPods(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
return fmt.Errorf("could not call Patroni API: cluster has no pods")
|
||||
}
|
||||
// try all pods until the first one that is successful, as it doesn't matter which pod
|
||||
// carries the request to change configuration through
|
||||
for _, pod := range pods {
|
||||
podName := util.NameFromMeta(pod.ObjectMeta)
|
||||
c.logger.Debugf("calling Patroni API on a pod %s to set the following Postgres options: %v",
|
||||
podName, optionsToSet)
|
||||
if err = c.patroni.SetPostgresParameters(&pod, optionsToSet); err == nil {
|
||||
return nil
|
||||
}
|
||||
c.logger.Warningf("could not patch postgres parameters with a pod %s: %v", podName, err)
|
||||
}
|
||||
return fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)",
|
||||
len(pods))
|
||||
}
|
||||
|
||||
func (c *Cluster) syncSecrets() error {
|
||||
var (
|
||||
err error
|
||||
secret *v1.Secret
|
||||
)
|
||||
c.setProcessName("syncing secrets")
|
||||
secrets := c.generateUserSecrets()
|
||||
|
||||
for secretUsername, secretSpec := range secrets {
|
||||
secret, err := c.KubeClient.Secrets(secretSpec.Namespace).Create(secretSpec)
|
||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(secretSpec); err == nil {
|
||||
c.Secrets[secret.UID] = secret
|
||||
c.logger.Debugf("created new secret %q, uid: %q", util.NameFromMeta(secret.ObjectMeta), secret.UID)
|
||||
continue
|
||||
}
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
var userMap map[string]spec.PgUser
|
||||
curSecret, err2 := c.KubeClient.Secrets(secretSpec.Namespace).Get(secretSpec.Name, metav1.GetOptions{})
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("could not get current secret: %v", err2)
|
||||
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Get(secretSpec.Name, metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("could not get current secret: %v", err)
|
||||
}
|
||||
if secretUsername != string(curSecret.Data["username"]) {
|
||||
if secretUsername != string(secret.Data["username"]) {
|
||||
c.logger.Warningf("secret %q does not contain the role %q", secretSpec.Name, secretUsername)
|
||||
continue
|
||||
}
|
||||
c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(curSecret.ObjectMeta))
|
||||
c.logger.Debugf("secret %q already exists, fetching its password", util.NameFromMeta(secret.ObjectMeta))
|
||||
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
||||
secretUsername = constants.SuperuserKeyName
|
||||
userMap = c.systemUsers
|
||||
|
|
@ -392,35 +417,28 @@ func (c *Cluster) syncSecrets() error {
|
|||
}
|
||||
pwdUser := userMap[secretUsername]
|
||||
// if this secret belongs to the infrastructure role and the password has changed - replace it in the secret
|
||||
if pwdUser.Password != string(curSecret.Data["password"]) && pwdUser.Origin == spec.RoleOriginInfrastructure {
|
||||
if pwdUser.Password != string(secret.Data["password"]) && pwdUser.Origin == spec.RoleOriginInfrastructure {
|
||||
c.logger.Debugf("updating the secret %q from the infrastructure roles", secretSpec.Name)
|
||||
if _, err := c.KubeClient.Secrets(secretSpec.Namespace).Update(secretSpec); err != nil {
|
||||
if _, err = c.KubeClient.Secrets(secretSpec.Namespace).Update(secretSpec); err != nil {
|
||||
return fmt.Errorf("could not update infrastructure role secret for role %q: %v", secretUsername, err)
|
||||
}
|
||||
} else {
|
||||
// for non-infrastructure role - update the role with the password from the secret
|
||||
pwdUser.Password = string(curSecret.Data["password"])
|
||||
pwdUser.Password = string(secret.Data["password"])
|
||||
userMap[secretUsername] = pwdUser
|
||||
}
|
||||
|
||||
continue
|
||||
} else {
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create secret for user %q: %v", secretUsername, err)
|
||||
}
|
||||
c.Secrets[secret.UID] = secret
|
||||
c.logger.Debugf("created new secret %q, uid: %q", util.NameFromMeta(secret.ObjectMeta), secret.UID)
|
||||
return fmt.Errorf("could not create secret for user %q: %v", secretUsername, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncRoles() error {
|
||||
func (c *Cluster) syncRoles() (err error) {
|
||||
c.setProcessName("syncing roles")
|
||||
|
||||
var (
|
||||
err error
|
||||
dbUsers spec.PgUserMap
|
||||
userNames []string
|
||||
)
|
||||
|
|
@ -429,9 +447,14 @@ func (c *Cluster) syncRoles() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not init db connection: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close db connection: %v", err)
|
||||
if err2 := c.closeDbConn(); err2 != nil {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("could not close database connection: %v", err2)
|
||||
} else {
|
||||
err = fmt.Errorf("could not close database connection: %v (prior error: %v)", err2, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -462,7 +485,7 @@ func (c *Cluster) syncVolumes() error {
|
|||
if !act {
|
||||
return nil
|
||||
}
|
||||
if err := c.resizeVolumes(c.Spec.Volume, []volumes.VolumeResizer{&volumes.EBSVolumeResizer{}}); err != nil {
|
||||
if err := c.resizeVolumes(c.Spec.Volume, []volumes.VolumeResizer{&volumes.EBSVolumeResizer{AWSRegion: c.OpConfig.AWSRegion}}); err != nil {
|
||||
return fmt.Errorf("could not sync volumes: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -471,15 +494,6 @@ func (c *Cluster) syncVolumes() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) samePDBWith(pdb *policybeta1.PodDisruptionBudget) (match bool, reason string) {
|
||||
match = reflect.DeepEqual(pdb.Spec, c.PodDisruptionBudget.Spec)
|
||||
if !match {
|
||||
reason = "new service spec doesn't match the current one"
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cluster) syncDatabases() error {
|
||||
c.setProcessName("syncing databases")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,14 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PostgresRole describes role of the node
|
||||
type PostgresRole string
|
||||
|
||||
|
|
@ -10,3 +19,51 @@ const (
|
|||
// Replica role
|
||||
Replica PostgresRole = "replica"
|
||||
)
|
||||
|
||||
type PodEventType string
|
||||
|
||||
// Possible values for the EventType
|
||||
const (
|
||||
PodEventAdd PodEventType = "ADD"
|
||||
PodEventUpdate PodEventType = "UPDATE"
|
||||
PodEventDelete PodEventType = "DELETE"
|
||||
)
|
||||
|
||||
// PodEvent describes the event for a single Pod
|
||||
type PodEvent struct {
|
||||
ResourceVersion string
|
||||
PodName types.NamespacedName
|
||||
PrevPod *v1.Pod
|
||||
CurPod *v1.Pod
|
||||
EventType PodEventType
|
||||
}
|
||||
|
||||
// Process describes process of the cluster
|
||||
type Process struct {
|
||||
Name string
|
||||
StartTime time.Time
|
||||
}
|
||||
|
||||
// WorkerStatus describes status of the worker
|
||||
type WorkerStatus struct {
|
||||
CurrentCluster types.NamespacedName
|
||||
CurrentProcess Process
|
||||
}
|
||||
|
||||
// ClusterStatus describes status of the cluster
|
||||
type ClusterStatus struct {
|
||||
Team string
|
||||
Cluster string
|
||||
MasterService *v1.Service
|
||||
ReplicaService *v1.Service
|
||||
MasterEndpoint *v1.Endpoints
|
||||
ReplicaEndpoint *v1.Endpoints
|
||||
StatefulSet *v1beta1.StatefulSet
|
||||
PodDisruptionBudget *policybeta1.PodDisruptionBudget
|
||||
|
||||
CurrentProcess Process
|
||||
Worker uint32
|
||||
Status acidv1.PostgresStatus
|
||||
Spec acidv1.PostgresSpec
|
||||
Error error
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,12 +11,14 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||
policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||
|
||||
acidzalando "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do"
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
|
|
@ -37,7 +39,7 @@ type SecretOauthTokenGetter struct {
|
|||
OAuthTokenSecretName spec.NamespacedName
|
||||
}
|
||||
|
||||
func NewSecretOauthTokenGetter(kubeClient *k8sutil.KubernetesClient,
|
||||
func newSecretOauthTokenGetter(kubeClient *k8sutil.KubernetesClient,
|
||||
OAuthTokenSecretName spec.NamespacedName) *SecretOauthTokenGetter {
|
||||
return &SecretOauthTokenGetter{kubeClient, OAuthTokenSecretName}
|
||||
}
|
||||
|
|
@ -80,7 +82,8 @@ func (c *Cluster) isSystemUsername(username string) bool {
|
|||
|
||||
func isValidFlag(flag string) bool {
|
||||
for _, validFlag := range []string{constants.RoleFlagSuperuser, constants.RoleFlagLogin, constants.RoleFlagCreateDB,
|
||||
constants.RoleFlagInherit, constants.RoleFlagReplication, constants.RoleFlagByPassRLS} {
|
||||
constants.RoleFlagInherit, constants.RoleFlagReplication, constants.RoleFlagByPassRLS,
|
||||
constants.RoleFlagCreateRole} {
|
||||
if flag == validFlag || flag == "NO"+validFlag {
|
||||
return true
|
||||
}
|
||||
|
|
@ -215,17 +218,19 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) logVolumeChanges(old, new spec.Volume) {
|
||||
func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) {
|
||||
c.logger.Infof("volume specification has been changed")
|
||||
c.logger.Debugf("diff\n%s\n", util.PrettyDiff(old, new))
|
||||
}
|
||||
|
||||
func (c *Cluster) getTeamMembers() ([]string, error) {
|
||||
if c.Spec.TeamID == "" {
|
||||
func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
||||
|
||||
if teamID == "" {
|
||||
return nil, fmt.Errorf("no teamId specified")
|
||||
}
|
||||
|
||||
if !c.OpConfig.EnableTeamsAPI {
|
||||
c.logger.Debug("team API is disabled, returning empty list of members")
|
||||
c.logger.Debugf("team API is disabled, returning empty list of members for team %q", teamID)
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
|
|
@ -235,16 +240,16 @@ func (c *Cluster) getTeamMembers() ([]string, error) {
|
|||
return []string{}, nil
|
||||
}
|
||||
|
||||
teamInfo, err := c.teamsAPIClient.TeamInfo(c.Spec.TeamID, token)
|
||||
teamInfo, err := c.teamsAPIClient.TeamInfo(teamID, token)
|
||||
if err != nil {
|
||||
c.logger.Warnf("could not get team info, returning empty list of team members: %v", err)
|
||||
c.logger.Warnf("could not get team info for team %q, returning empty list of team members: %v", teamID, err)
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return teamInfo.Members, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) waitForPodLabel(podEvents chan spec.PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) {
|
||||
func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) {
|
||||
timeout := time.After(c.OpConfig.PodLabelWaitTimeout)
|
||||
for {
|
||||
select {
|
||||
|
|
@ -266,12 +271,12 @@ func (c *Cluster) waitForPodLabel(podEvents chan spec.PodEvent, stopChan chan st
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) waitForPodDeletion(podEvents chan spec.PodEvent) error {
|
||||
func (c *Cluster) waitForPodDeletion(podEvents chan PodEvent) error {
|
||||
timeout := time.After(c.OpConfig.PodDeletionWaitTimeout)
|
||||
for {
|
||||
select {
|
||||
case podEvent := <-podEvents:
|
||||
if podEvent.EventType == spec.EventDelete {
|
||||
if podEvent.EventType == PodEventDelete {
|
||||
return nil
|
||||
}
|
||||
case <-timeout:
|
||||
|
|
@ -385,7 +390,7 @@ func (c *Cluster) waitStatefulsetPodsReady() error {
|
|||
}
|
||||
|
||||
// Returns labels used to create or list k8s objects such as pods
|
||||
// For backward compatability, shouldAddExtraLabels must be false
|
||||
// For backward compatibility, shouldAddExtraLabels must be false
|
||||
// when listing k8s objects. See operator PR #252
|
||||
func (c *Cluster) labelsSet(shouldAddExtraLabels bool) labels.Set {
|
||||
lbls := make(map[string]string)
|
||||
|
|
@ -403,7 +408,7 @@ func (c *Cluster) labelsSet(shouldAddExtraLabels bool) labels.Set {
|
|||
}
|
||||
|
||||
func (c *Cluster) labelsSelector() *metav1.LabelSelector {
|
||||
return &metav1.LabelSelector{c.labelsSet(false), nil}
|
||||
return &metav1.LabelSelector{MatchLabels: c.labelsSet(false), MatchExpressions: nil}
|
||||
}
|
||||
|
||||
func (c *Cluster) roleLabelsSet(role PostgresRole) labels.Set {
|
||||
|
|
@ -437,18 +442,18 @@ func (c *Cluster) credentialSecretNameForCluster(username string, clusterName st
|
|||
return c.OpConfig.SecretNameTemplate.Format(
|
||||
"username", strings.Replace(username, "_", "-", -1),
|
||||
"cluster", clusterName,
|
||||
"tprkind", constants.CRDKind,
|
||||
"tprgroup", constants.CRDGroup)
|
||||
"tprkind", acidv1.PostgresCRDResourceKind,
|
||||
"tprgroup", acidzalando.GroupName)
|
||||
}
|
||||
|
||||
func masterCandidate(replicas []spec.NamespacedName) spec.NamespacedName {
|
||||
return replicas[rand.Intn(len(replicas))]
|
||||
}
|
||||
|
||||
func cloneSpec(from *spec.Postgresql) (*spec.Postgresql, error) {
|
||||
func cloneSpec(from *acidv1.Postgresql) (*acidv1.Postgresql, error) {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
result *spec.Postgresql
|
||||
result *acidv1.Postgresql
|
||||
err error
|
||||
)
|
||||
enc := gob.NewEncoder(&buf)
|
||||
|
|
@ -462,13 +467,13 @@ func cloneSpec(from *spec.Postgresql) (*spec.Postgresql, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) setSpec(newSpec *spec.Postgresql) {
|
||||
func (c *Cluster) setSpec(newSpec *acidv1.Postgresql) {
|
||||
c.specMu.Lock()
|
||||
c.Postgresql = *newSpec
|
||||
c.specMu.Unlock()
|
||||
}
|
||||
|
||||
func (c *Cluster) GetSpec() (*spec.Postgresql, error) {
|
||||
func (c *Cluster) GetSpec() (*acidv1.Postgresql, error) {
|
||||
c.specMu.RLock()
|
||||
defer c.specMu.RUnlock()
|
||||
return cloneSpec(&c.Postgresql)
|
||||
|
|
|
|||
|
|
@ -5,10 +5,11 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
|
|
@ -88,10 +89,11 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) {
|
|||
}
|
||||
|
||||
// resizeVolumes resize persistent volumes compatible with the given resizer interface
|
||||
func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.VolumeResizer) error {
|
||||
func (c *Cluster) resizeVolumes(newVolume acidv1.Volume, resizers []volumes.VolumeResizer) error {
|
||||
c.setProcessName("resizing volumes")
|
||||
|
||||
totalCompatible := 0
|
||||
var totalIncompatible int
|
||||
|
||||
newQuantity, err := resource.ParseQuantity(newVolume.Size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse volume size: %v", err)
|
||||
|
|
@ -100,7 +102,6 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not list persistent volumes: %v", err)
|
||||
}
|
||||
|
||||
for _, pv := range pvs {
|
||||
volumeSize := quantityToGigabyte(pv.Spec.Capacity[v1.ResourceStorage])
|
||||
if volumeSize >= newSize {
|
||||
|
|
@ -109,11 +110,12 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume
|
|||
}
|
||||
continue
|
||||
}
|
||||
compatible := false
|
||||
for _, resizer := range resizers {
|
||||
if !resizer.VolumeBelongsToProvider(pv) {
|
||||
continue
|
||||
}
|
||||
totalCompatible++
|
||||
compatible = true
|
||||
if !resizer.IsConnectedToProvider() {
|
||||
err := resizer.ConnectToProvider()
|
||||
if err != nil {
|
||||
|
|
@ -146,14 +148,18 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume
|
|||
}
|
||||
c.logger.Debugf("successfully updated persistent volume %q", pv.Name)
|
||||
}
|
||||
if !compatible {
|
||||
c.logger.Warningf("volume %q is incompatible with all available resizing providers", pv.Name)
|
||||
totalIncompatible++
|
||||
}
|
||||
}
|
||||
if len(pvs) > 0 && totalCompatible == 0 {
|
||||
return fmt.Errorf("could not resize EBS volumes: persistent volumes are not compatible with existing resizing providers")
|
||||
if totalIncompatible > 0 {
|
||||
return fmt.Errorf("could not resize EBS volumes: some persistent volumes are not compatible with existing resizing providers")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) volumesNeedResizing(newVolume spec.Volume) (bool, error) {
|
||||
func (c *Cluster) volumesNeedResizing(newVolume acidv1.Volume) (bool, error) {
|
||||
vols, manifestSize, err := c.listVolumesWithManifestSize(newVolume)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
@ -167,7 +173,7 @@ func (c *Cluster) volumesNeedResizing(newVolume spec.Volume) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) listVolumesWithManifestSize(newVolume spec.Volume) ([]*v1.PersistentVolume, int64, error) {
|
||||
func (c *Cluster) listVolumesWithManifestSize(newVolume acidv1.Volume) ([]*v1.PersistentVolume, int64, error) {
|
||||
newSize, err := resource.ParseQuantity(newVolume.Size)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("could not parse volume size from the manifest: %v", err)
|
||||
|
|
|
|||
|
|
@ -6,10 +6,11 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"k8s.io/api/core/v1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/apiserver"
|
||||
|
|
@ -20,6 +21,8 @@ import (
|
|||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/ringlog"
|
||||
|
||||
acidv1informer "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1"
|
||||
)
|
||||
|
||||
// Controller represents operator controller
|
||||
|
|
@ -45,14 +48,17 @@ type Controller struct {
|
|||
postgresqlInformer cache.SharedIndexInformer
|
||||
podInformer cache.SharedIndexInformer
|
||||
nodesInformer cache.SharedIndexInformer
|
||||
podCh chan spec.PodEvent
|
||||
podCh chan cluster.PodEvent
|
||||
|
||||
clusterEventQueues []*cache.FIFO // [workerID]Queue
|
||||
lastClusterSyncTime int64
|
||||
clusterEventQueues []*cache.FIFO // [workerID]Queue
|
||||
lastClusterSyncTime int64
|
||||
lastClusterRepairTime int64
|
||||
|
||||
workerLogs map[uint32]ringlog.RingLogger
|
||||
|
||||
PodServiceAccount *v1.ServiceAccount
|
||||
PodServiceAccount *v1.ServiceAccount
|
||||
PodServiceAccountRoleBinding *rbacv1beta1.RoleBinding
|
||||
namespacesWithDefinedRBAC sync.Map
|
||||
}
|
||||
|
||||
// NewController creates a new controller
|
||||
|
|
@ -70,7 +76,7 @@ func NewController(controllerConfig *spec.ControllerConfig) *Controller {
|
|||
clusterHistory: make(map[spec.NamespacedName]ringlog.RingLogger),
|
||||
teamClusters: make(map[string][]spec.NamespacedName),
|
||||
stopCh: make(chan struct{}),
|
||||
podCh: make(chan spec.PodEvent),
|
||||
podCh: make(chan cluster.PodEvent),
|
||||
}
|
||||
logger.Hooks.Add(c)
|
||||
|
||||
|
|
@ -101,23 +107,24 @@ func (c *Controller) initOperatorConfig() {
|
|||
c.logger.Infoln("no ConfigMap specified. Loading default values")
|
||||
}
|
||||
|
||||
configMapData["watched_namespace"] = c.getEffectiveNamespace(os.Getenv("WATCHED_NAMESPACE"), configMapData["watched_namespace"])
|
||||
|
||||
if c.config.NoDatabaseAccess {
|
||||
configMapData["enable_database_access"] = "false"
|
||||
}
|
||||
if c.config.NoTeamsAPI {
|
||||
configMapData["enable_teams_api"] = "false"
|
||||
}
|
||||
|
||||
c.opConfig = config.NewFromMap(configMapData)
|
||||
c.warnOnDeprecatedOperatorParameters()
|
||||
|
||||
}
|
||||
|
||||
func (c *Controller) modifyConfigFromEnvironment() {
|
||||
c.opConfig.WatchedNamespace = c.getEffectiveNamespace(os.Getenv("WATCHED_NAMESPACE"), c.opConfig.WatchedNamespace)
|
||||
|
||||
if c.config.NoDatabaseAccess {
|
||||
c.opConfig.EnableDBAccess = false
|
||||
}
|
||||
if c.config.NoTeamsAPI {
|
||||
c.opConfig.EnableTeamsAPI = false
|
||||
}
|
||||
scalyrAPIKey := os.Getenv("SCALYR_API_KEY")
|
||||
if scalyrAPIKey != "" {
|
||||
c.opConfig.ScalyrAPIKey = scalyrAPIKey
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// warningOnDeprecatedParameters emits warnings upon finding deprecated parmaters
|
||||
|
|
@ -146,9 +153,9 @@ func (c *Controller) initPodServiceAccount() {
|
|||
|
||||
switch {
|
||||
case err != nil:
|
||||
panic(fmt.Errorf("Unable to parse pod service account definiton from the operator config map: %v", err))
|
||||
panic(fmt.Errorf("Unable to parse pod service account definition from the operator config map: %v", err))
|
||||
case groupVersionKind.Kind != "ServiceAccount":
|
||||
panic(fmt.Errorf("pod service account definiton in the operator config map defines another type of resource: %v", groupVersionKind.Kind))
|
||||
panic(fmt.Errorf("pod service account definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind))
|
||||
default:
|
||||
c.PodServiceAccount = obj.(*v1.ServiceAccount)
|
||||
if c.PodServiceAccount.Name != c.opConfig.PodServiceAccountName {
|
||||
|
|
@ -161,22 +168,85 @@ func (c *Controller) initPodServiceAccount() {
|
|||
// actual service accounts are deployed at the time of Postgres/Spilo cluster creation
|
||||
}
|
||||
|
||||
func (c *Controller) initRoleBinding() {
|
||||
|
||||
// service account on its own lacks any rights starting with k8s v1.8
|
||||
// operator binds it to the cluster role with sufficient privileges
|
||||
// we assume the role is created by the k8s administrator
|
||||
if c.opConfig.PodServiceAccountRoleBindingDefinition == "" {
|
||||
c.opConfig.PodServiceAccountRoleBindingDefinition = `
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "zalando-postgres-operator"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "ClusterRole",
|
||||
"name": "zalando-postgres-operator"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "operator"
|
||||
}
|
||||
]
|
||||
}`
|
||||
}
|
||||
c.logger.Info("Parse role bindings")
|
||||
// re-uses k8s internal parsing. See k8s client-go issue #193 for explanation
|
||||
decode := scheme.Codecs.UniversalDeserializer().Decode
|
||||
obj, groupVersionKind, err := decode([]byte(c.opConfig.PodServiceAccountRoleBindingDefinition), nil, nil)
|
||||
|
||||
switch {
|
||||
case err != nil:
|
||||
panic(fmt.Errorf("Unable to parse the definition of the role binding for the pod service account definition from the operator config map: %v", err))
|
||||
case groupVersionKind.Kind != "RoleBinding":
|
||||
panic(fmt.Errorf("role binding definition in the operator config map defines another type of resource: %v", groupVersionKind.Kind))
|
||||
default:
|
||||
c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding)
|
||||
c.PodServiceAccountRoleBinding.Namespace = ""
|
||||
c.PodServiceAccountRoleBinding.Subjects[0].Name = c.PodServiceAccount.Name
|
||||
c.logger.Info("successfully parsed")
|
||||
|
||||
}
|
||||
|
||||
// actual roles bindings are deployed at the time of Postgres/Spilo cluster creation
|
||||
}
|
||||
|
||||
func (c *Controller) initController() {
|
||||
c.initClients()
|
||||
c.initOperatorConfig()
|
||||
|
||||
if configObjectName := os.Getenv("POSTGRES_OPERATOR_CONFIGURATION_OBJECT"); configObjectName != "" {
|
||||
if err := c.createConfigurationCRD(); err != nil {
|
||||
c.logger.Fatalf("could not register Operator Configuration CustomResourceDefinition: %v", err)
|
||||
}
|
||||
if cfg, err := c.readOperatorConfigurationFromCRD(spec.GetOperatorNamespace(), configObjectName); err != nil {
|
||||
c.logger.Fatalf("unable to read operator configuration: %v", err)
|
||||
} else {
|
||||
c.opConfig = c.importConfigurationFromCRD(&cfg.Configuration)
|
||||
}
|
||||
} else {
|
||||
c.initOperatorConfig()
|
||||
}
|
||||
c.initPodServiceAccount()
|
||||
c.initRoleBinding()
|
||||
|
||||
c.modifyConfigFromEnvironment()
|
||||
|
||||
if err := c.createPostgresCRD(); err != nil {
|
||||
c.logger.Fatalf("could not register Postgres CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
c.initPodServiceAccount()
|
||||
c.initSharedInformers()
|
||||
|
||||
c.logger.Infof("config: %s", c.opConfig.MustMarshal())
|
||||
|
||||
if c.opConfig.DebugLogging {
|
||||
c.logger.Logger.Level = logrus.DebugLevel
|
||||
}
|
||||
|
||||
if err := c.createCRD(); err != nil {
|
||||
c.logger.Fatalf("could not register CustomResourceDefinition: %v", err)
|
||||
}
|
||||
c.logger.Infof("config: %s", c.opConfig.MustMarshal())
|
||||
|
||||
if infraRoles, err := c.getInfrastructureRoles(&c.opConfig.InfrastructureRolesSecretName); err != nil {
|
||||
c.logger.Warningf("could not get infrastructure roles: %v", err)
|
||||
|
|
@ -188,7 +258,7 @@ func (c *Controller) initController() {
|
|||
c.workerLogs = make(map[uint32]ringlog.RingLogger, c.opConfig.Workers)
|
||||
for i := range c.clusterEventQueues {
|
||||
c.clusterEventQueues[i] = cache.NewFIFO(func(obj interface{}) (string, error) {
|
||||
e, ok := obj.(spec.ClusterEvent)
|
||||
e, ok := obj.(ClusterEvent)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("could not cast to ClusterEvent")
|
||||
}
|
||||
|
|
@ -201,13 +271,10 @@ func (c *Controller) initController() {
|
|||
}
|
||||
|
||||
func (c *Controller) initSharedInformers() {
|
||||
// Postgresqls
|
||||
c.postgresqlInformer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: c.clusterListFunc,
|
||||
WatchFunc: c.clusterWatchFunc,
|
||||
},
|
||||
&spec.Postgresql{},
|
||||
|
||||
c.postgresqlInformer = acidv1informer.NewPostgresqlInformer(
|
||||
c.KubeClient.AcidV1ClientSet,
|
||||
c.opConfig.WatchedNamespace,
|
||||
constants.QueueResyncPeriodTPR,
|
||||
cache.Indexers{})
|
||||
|
||||
|
|
@ -258,6 +325,18 @@ func (c *Controller) initSharedInformers() {
|
|||
func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {
|
||||
c.initController()
|
||||
|
||||
// start workers reading from the events queue to prevent the initial sync from blocking on it.
|
||||
for i := range c.clusterEventQueues {
|
||||
wg.Add(1)
|
||||
c.workerLogs[uint32(i)] = ringlog.New(c.opConfig.RingLogLines)
|
||||
go c.processClusterEventsQueue(i, stopCh, wg)
|
||||
}
|
||||
|
||||
// populate clusters before starting nodeInformer that relies on it and run the initial sync
|
||||
if err := c.acquireInitialListOfClusters(); err != nil {
|
||||
panic("could not acquire initial list of clusters")
|
||||
}
|
||||
|
||||
wg.Add(5)
|
||||
go c.runPodInformer(stopCh, wg)
|
||||
go c.runPostgresqlInformer(stopCh, wg)
|
||||
|
|
@ -265,12 +344,6 @@ func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {
|
|||
go c.apiserver.Run(stopCh, wg)
|
||||
go c.kubeNodesInformer(stopCh, wg)
|
||||
|
||||
for i := range c.clusterEventQueues {
|
||||
wg.Add(1)
|
||||
c.workerLogs[uint32(i)] = ringlog.New(c.opConfig.RingLogLines)
|
||||
go c.processClusterEventsQueue(i, stopCh, wg)
|
||||
}
|
||||
|
||||
c.logger.Info("started working in background")
|
||||
}
|
||||
|
||||
|
|
@ -286,7 +359,7 @@ func (c *Controller) runPostgresqlInformer(stopCh <-chan struct{}, wg *sync.Wait
|
|||
c.postgresqlInformer.Run(stopCh)
|
||||
}
|
||||
|
||||
func queueClusterKey(eventType spec.EventType, uid types.UID) string {
|
||||
func queueClusterKey(eventType EventType, uid types.UID) string {
|
||||
return fmt.Sprintf("%s-%s", eventType, uid)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -11,10 +11,11 @@ import (
|
|||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// ClusterStatus provides status of the cluster
|
||||
func (c *Controller) ClusterStatus(team, namespace, cluster string) (*spec.ClusterStatus, error) {
|
||||
func (c *Controller) ClusterStatus(team, namespace, cluster string) (*cluster.ClusterStatus, error) {
|
||||
|
||||
clusterName := spec.NamespacedName{
|
||||
Namespace: namespace,
|
||||
|
|
@ -196,7 +197,7 @@ func (c *Controller) GetWorkersCnt() uint32 {
|
|||
}
|
||||
|
||||
//WorkerStatus provides status of the worker
|
||||
func (c *Controller) WorkerStatus(workerID uint32) (*spec.WorkerStatus, error) {
|
||||
func (c *Controller) WorkerStatus(workerID uint32) (*cluster.WorkerStatus, error) {
|
||||
obj, ok := c.curWorkerCluster.Load(workerID)
|
||||
if !ok || obj == nil {
|
||||
return nil, nil
|
||||
|
|
@ -207,8 +208,8 @@ func (c *Controller) WorkerStatus(workerID uint32) (*spec.WorkerStatus, error) {
|
|||
return nil, fmt.Errorf("could not cast to Cluster struct")
|
||||
}
|
||||
|
||||
return &spec.WorkerStatus{
|
||||
CurrentCluster: util.NameFromMeta(cl.ObjectMeta),
|
||||
return &cluster.WorkerStatus{
|
||||
CurrentCluster: types.NamespacedName(util.NameFromMeta(cl.ObjectMeta)),
|
||||
CurrentProcess: cl.GetCurrentProcess(),
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -1,11 +1,11 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/cluster"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
|||
|
|
@ -0,0 +1,100 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"time"
|
||||
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*acidv1.OperatorConfiguration, error) {
|
||||
|
||||
config, err := c.KubeClient.AcidV1ClientSet.AcidV1().OperatorConfigurations(configObjectNamespace).Get(configObjectName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get operator configuration object %q: %v", configObjectName, err)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// importConfigurationFromCRD is a transitional function that converts CRD configuration to the one based on the configmap
|
||||
func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigurationData) *config.Config {
|
||||
result := &config.Config{}
|
||||
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.DockerImage = fromCRD.DockerImage
|
||||
result.Workers = fromCRD.Workers
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
result.MaxInstances = fromCRD.MaxInstances
|
||||
result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod)
|
||||
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
|
||||
result.Sidecars = fromCRD.Sidecars
|
||||
|
||||
result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername
|
||||
result.ReplicationUsername = fromCRD.PostgresUsersConfiguration.ReplicationUsername
|
||||
|
||||
result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName
|
||||
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
|
||||
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
|
||||
result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod)
|
||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
||||
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
||||
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
||||
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
|
||||
result.PodRoleLabel = fromCRD.Kubernetes.PodRoleLabel
|
||||
result.ClusterLabels = fromCRD.Kubernetes.ClusterLabels
|
||||
result.ClusterNameLabel = fromCRD.Kubernetes.ClusterNameLabel
|
||||
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
|
||||
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
||||
|
||||
result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest
|
||||
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
|
||||
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
|
||||
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
|
||||
|
||||
result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval)
|
||||
result.ResourceCheckTimeout = time.Duration(fromCRD.Timeouts.ResourceCheckTimeout)
|
||||
result.PodLabelWaitTimeout = time.Duration(fromCRD.Timeouts.PodLabelWaitTimeout)
|
||||
result.PodDeletionWaitTimeout = time.Duration(fromCRD.Timeouts.PodDeletionWaitTimeout)
|
||||
result.ReadyWaitInterval = time.Duration(fromCRD.Timeouts.ReadyWaitInterval)
|
||||
result.ReadyWaitTimeout = time.Duration(fromCRD.Timeouts.ReadyWaitTimeout)
|
||||
|
||||
result.DbHostedZone = fromCRD.LoadBalancer.DbHostedZone
|
||||
result.EnableMasterLoadBalancer = fromCRD.LoadBalancer.EnableMasterLoadBalancer
|
||||
result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer
|
||||
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
|
||||
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
|
||||
|
||||
result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket
|
||||
result.AWSRegion = fromCRD.AWSGCP.AWSRegion
|
||||
result.LogS3Bucket = fromCRD.AWSGCP.LogS3Bucket
|
||||
result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole
|
||||
|
||||
result.DebugLogging = fromCRD.OperatorDebug.DebugLogging
|
||||
result.EnableDBAccess = fromCRD.OperatorDebug.EnableDBAccess
|
||||
result.EnableTeamsAPI = fromCRD.TeamsAPI.EnableTeamsAPI
|
||||
result.TeamsAPIUrl = fromCRD.TeamsAPI.TeamsAPIUrl
|
||||
result.TeamAPIRoleConfiguration = fromCRD.TeamsAPI.TeamAPIRoleConfiguration
|
||||
result.EnableTeamSuperuser = fromCRD.TeamsAPI.EnableTeamSuperuser
|
||||
result.TeamAdminRole = fromCRD.TeamsAPI.TeamAdminRole
|
||||
result.PamRoleName = fromCRD.TeamsAPI.PamRoleName
|
||||
result.PostgresSuperuserTeams = fromCRD.TeamsAPI.PostgresSuperuserTeams
|
||||
|
||||
result.APIPort = fromCRD.LoggingRESTAPI.APIPort
|
||||
result.RingLogLines = fromCRD.LoggingRESTAPI.RingLogLines
|
||||
result.ClusterHistoryEntries = fromCRD.LoggingRESTAPI.ClusterHistoryEntries
|
||||
|
||||
result.ScalyrAPIKey = fromCRD.Scalyr.ScalyrAPIKey
|
||||
result.ScalyrImage = fromCRD.Scalyr.ScalyrImage
|
||||
result.ScalyrServerURL = fromCRD.Scalyr.ScalyrServerURL
|
||||
result.ScalyrCPURequest = fromCRD.Scalyr.ScalyrCPURequest
|
||||
result.ScalyrMemoryRequest = fromCRD.Scalyr.ScalyrMemoryRequest
|
||||
result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit
|
||||
result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit
|
||||
|
||||
return result
|
||||
}
|
||||
|
|
@ -1,13 +1,15 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/cluster"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func (c *Controller) podListFunc(options metav1.ListOptions) (runtime.Object, error) {
|
||||
|
|
@ -30,7 +32,7 @@ func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface,
|
|||
return c.KubeClient.Pods(c.opConfig.WatchedNamespace).Watch(opts)
|
||||
}
|
||||
|
||||
func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event spec.PodEvent) {
|
||||
func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event cluster.PodEvent) {
|
||||
c.clustersMu.RLock()
|
||||
cluster, ok := c.clusters[clusterName]
|
||||
c.clustersMu.RUnlock()
|
||||
|
|
@ -40,19 +42,9 @@ func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event spe
|
|||
}
|
||||
|
||||
func (c *Controller) podAdd(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return
|
||||
if pod, ok := obj.(*v1.Pod); ok {
|
||||
c.preparePodEventForDispatch(pod, nil, cluster.PodEventAdd)
|
||||
}
|
||||
|
||||
podEvent := spec.PodEvent{
|
||||
PodName: util.NameFromMeta(pod.ObjectMeta),
|
||||
CurPod: pod,
|
||||
EventType: spec.EventAdd,
|
||||
ResourceVersion: pod.ResourceVersion,
|
||||
}
|
||||
|
||||
c.dispatchPodEvent(c.podClusterName(pod), podEvent)
|
||||
}
|
||||
|
||||
func (c *Controller) podUpdate(prev, cur interface{}) {
|
||||
|
|
@ -66,29 +58,24 @@ func (c *Controller) podUpdate(prev, cur interface{}) {
|
|||
return
|
||||
}
|
||||
|
||||
podEvent := spec.PodEvent{
|
||||
PodName: util.NameFromMeta(curPod.ObjectMeta),
|
||||
PrevPod: prevPod,
|
||||
c.preparePodEventForDispatch(curPod, prevPod, cluster.PodEventUpdate)
|
||||
}
|
||||
|
||||
func (c *Controller) podDelete(obj interface{}) {
|
||||
|
||||
if pod, ok := obj.(*v1.Pod); ok {
|
||||
c.preparePodEventForDispatch(pod, nil, cluster.PodEventDelete)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) preparePodEventForDispatch(curPod, prevPod *v1.Pod, event cluster.PodEventType) {
|
||||
podEvent := cluster.PodEvent{
|
||||
PodName: types.NamespacedName(util.NameFromMeta(curPod.ObjectMeta)),
|
||||
CurPod: curPod,
|
||||
EventType: spec.EventUpdate,
|
||||
PrevPod: prevPod,
|
||||
EventType: event,
|
||||
ResourceVersion: curPod.ResourceVersion,
|
||||
}
|
||||
|
||||
c.dispatchPodEvent(c.podClusterName(curPod), podEvent)
|
||||
}
|
||||
|
||||
func (c *Controller) podDelete(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
podEvent := spec.PodEvent{
|
||||
PodName: util.NameFromMeta(pod.ObjectMeta),
|
||||
CurPod: pod,
|
||||
EventType: spec.EventDelete,
|
||||
ResourceVersion: pod.ResourceVersion,
|
||||
}
|
||||
|
||||
c.dispatchPodEvent(c.podClusterName(pod), podEvent)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
|
@ -10,16 +9,16 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/cluster"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/ringlog"
|
||||
)
|
||||
|
||||
|
|
@ -30,7 +29,7 @@ func (c *Controller) clusterResync(stopCh <-chan struct{}, wg *sync.WaitGroup) {
|
|||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if _, err := c.clusterListFunc(metav1.ListOptions{ResourceVersion: "0"}); err != nil {
|
||||
if err := c.clusterListAndSync(); err != nil {
|
||||
c.logger.Errorf("could not list clusters: %v", err)
|
||||
}
|
||||
case <-stopCh:
|
||||
|
|
@ -39,40 +38,64 @@ func (c *Controller) clusterResync(stopCh <-chan struct{}, wg *sync.WaitGroup) {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: make a separate function to be called from InitSharedInformers
|
||||
// clusterListFunc obtains a list of all PostgreSQL clusters and runs sync when necessary
|
||||
func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object, error) {
|
||||
var list spec.PostgresqlList
|
||||
var activeClustersCnt, failedClustersCnt int
|
||||
|
||||
req := c.KubeClient.CRDREST.
|
||||
Get().
|
||||
Namespace(c.opConfig.WatchedNamespace).
|
||||
Resource(constants.CRDResource).
|
||||
VersionedParams(&options, metav1.ParameterCodec)
|
||||
|
||||
b, err := req.DoRaw()
|
||||
// clusterListFunc obtains a list of all PostgreSQL clusters
|
||||
func (c *Controller) listClusters(options metav1.ListOptions) (*acidv1.PostgresqlList, error) {
|
||||
// TODO: use the SharedInformer cache instead of quering Kubernetes API directly.
|
||||
list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(options)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not get the list of postgresql CRD objects: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if err = json.Unmarshal(b, &list); err != nil {
|
||||
c.logger.Warningf("could not unmarshal list of clusters: %v", err)
|
||||
c.logger.Errorf("could not list postgresql objects: %v", err)
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
timeFromPreviousSync := time.Now().Unix() - atomic.LoadInt64(&c.lastClusterSyncTime)
|
||||
if timeFromPreviousSync < int64(c.opConfig.ResyncPeriod.Seconds()) {
|
||||
c.logger.Infof("not running SYNC, previous sync happened %d seconds ago", timeFromPreviousSync)
|
||||
return &list, err
|
||||
}
|
||||
// clusterListAndSync lists all manifests and decides whether to run the sync or repair.
|
||||
func (c *Controller) clusterListAndSync() error {
|
||||
var (
|
||||
err error
|
||||
event EventType
|
||||
)
|
||||
|
||||
currentTime := time.Now().Unix()
|
||||
timeFromPreviousSync := currentTime - atomic.LoadInt64(&c.lastClusterSyncTime)
|
||||
timeFromPreviousRepair := currentTime - atomic.LoadInt64(&c.lastClusterRepairTime)
|
||||
|
||||
if timeFromPreviousSync >= int64(c.opConfig.ResyncPeriod.Seconds()) {
|
||||
event = EventSync
|
||||
} else if timeFromPreviousRepair >= int64(c.opConfig.RepairPeriod.Seconds()) {
|
||||
event = EventRepair
|
||||
}
|
||||
if event != "" {
|
||||
var list *acidv1.PostgresqlList
|
||||
if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil {
|
||||
return err
|
||||
}
|
||||
c.queueEvents(list, event)
|
||||
} else {
|
||||
c.logger.Infof("not enough time passed since the last sync (%s seconds) or repair (%s seconds)",
|
||||
timeFromPreviousSync, timeFromPreviousRepair)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// queueEvents queues a sync or repair event for every cluster with a valid manifest
|
||||
func (c *Controller) queueEvents(list *acidv1.PostgresqlList, event EventType) {
|
||||
var activeClustersCnt, failedClustersCnt, clustersToRepair int
|
||||
for i, pg := range list.Items {
|
||||
if pg.Error != nil {
|
||||
// XXX: check the cluster status field instead
|
||||
if pg.Error != "" {
|
||||
failedClustersCnt++
|
||||
continue
|
||||
}
|
||||
c.queueClusterEvent(nil, &list.Items[i], spec.EventSync)
|
||||
activeClustersCnt++
|
||||
// check if that cluster needs repair
|
||||
if event == EventRepair {
|
||||
if pg.Status.Success() {
|
||||
continue
|
||||
} else {
|
||||
clustersToRepair++
|
||||
}
|
||||
}
|
||||
c.queueClusterEvent(nil, &list.Items[i], event)
|
||||
}
|
||||
if len(list.Items) > 0 {
|
||||
if failedClustersCnt > 0 && activeClustersCnt == 0 {
|
||||
|
|
@ -82,57 +105,46 @@ func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object
|
|||
} else {
|
||||
c.logger.Infof("there are %d clusters running and %d are in the failed state", activeClustersCnt, failedClustersCnt)
|
||||
}
|
||||
if clustersToRepair > 0 {
|
||||
c.logger.Infof("%d clusters are scheduled for a repair scan", clustersToRepair)
|
||||
}
|
||||
} else {
|
||||
c.logger.Infof("no clusters running")
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&c.lastClusterSyncTime, time.Now().Unix())
|
||||
|
||||
return &list, err
|
||||
}
|
||||
|
||||
type crdDecoder struct {
|
||||
dec *json.Decoder
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (d *crdDecoder) Close() {
|
||||
d.close()
|
||||
}
|
||||
|
||||
func (d *crdDecoder) Decode() (action watch.EventType, object runtime.Object, err error) {
|
||||
var e struct {
|
||||
Type watch.EventType
|
||||
Object spec.Postgresql
|
||||
if event == EventRepair || event == EventSync {
|
||||
atomic.StoreInt64(&c.lastClusterRepairTime, time.Now().Unix())
|
||||
if event == EventSync {
|
||||
atomic.StoreInt64(&c.lastClusterSyncTime, time.Now().Unix())
|
||||
}
|
||||
}
|
||||
if err := d.dec.Decode(&e); err != nil {
|
||||
return watch.Error, nil, err
|
||||
}
|
||||
|
||||
return e.Type, &e.Object, nil
|
||||
}
|
||||
|
||||
func (c *Controller) clusterWatchFunc(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.Watch = true
|
||||
r, err := c.KubeClient.CRDREST.
|
||||
Get().
|
||||
Namespace(c.opConfig.WatchedNamespace).
|
||||
Resource(constants.CRDResource).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
FieldsSelectorParam(nil).
|
||||
Stream()
|
||||
func (c *Controller) acquireInitialListOfClusters() error {
|
||||
var (
|
||||
list *acidv1.PostgresqlList
|
||||
err error
|
||||
clusterName spec.NamespacedName
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return watch.NewStreamWatcher(&crdDecoder{
|
||||
dec: json.NewDecoder(r),
|
||||
close: r.Close,
|
||||
}), nil
|
||||
c.logger.Debugf("acquiring initial list of clusters")
|
||||
for _, pg := range list.Items {
|
||||
// XXX: check the cluster status field instead
|
||||
if pg.Error != "" {
|
||||
continue
|
||||
}
|
||||
clusterName = util.NameFromMeta(pg.ObjectMeta)
|
||||
c.addCluster(c.logger, clusterName, &pg)
|
||||
c.logger.Debugf("added new cluster: %q", clusterName)
|
||||
}
|
||||
// initiate initial sync of all clusters.
|
||||
c.queueEvents(list, EventSync)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *spec.Postgresql) *cluster.Cluster {
|
||||
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) *cluster.Cluster {
|
||||
cl := cluster.New(c.makeClusterConfig(), c.KubeClient, *pgSpec, lg)
|
||||
cl.Run(c.stopCh)
|
||||
teamName := strings.ToLower(cl.Spec.TeamID)
|
||||
|
|
@ -148,13 +160,13 @@ func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedNam
|
|||
return cl
|
||||
}
|
||||
|
||||
func (c *Controller) processEvent(event spec.ClusterEvent) {
|
||||
func (c *Controller) processEvent(event ClusterEvent) {
|
||||
var clusterName spec.NamespacedName
|
||||
var clHistory ringlog.RingLogger
|
||||
|
||||
lg := c.logger.WithField("worker", event.WorkerID)
|
||||
|
||||
if event.EventType == spec.EventAdd || event.EventType == spec.EventSync {
|
||||
if event.EventType == EventAdd || event.EventType == EventSync || event.EventType == EventRepair {
|
||||
clusterName = util.NameFromMeta(event.NewSpec.ObjectMeta)
|
||||
} else {
|
||||
clusterName = util.NameFromMeta(event.OldSpec.ObjectMeta)
|
||||
|
|
@ -170,7 +182,17 @@ func (c *Controller) processEvent(event spec.ClusterEvent) {
|
|||
|
||||
defer c.curWorkerCluster.Store(event.WorkerID, nil)
|
||||
|
||||
if event.EventType == spec.EventAdd || event.EventType == spec.EventUpdate || event.EventType == spec.EventSync {
|
||||
if event.EventType == EventRepair {
|
||||
runRepair, lastOperationStatus := cl.NeedsRepair()
|
||||
if !runRepair {
|
||||
lg.Debugf("Observed cluster status %s, repair is not required", lastOperationStatus)
|
||||
return
|
||||
}
|
||||
lg.Debugf("Observed cluster status %s, running sync scan to repair the cluster", lastOperationStatus)
|
||||
event.EventType = EventSync
|
||||
}
|
||||
|
||||
if event.EventType == EventAdd || event.EventType == EventUpdate || event.EventType == EventSync {
|
||||
// handle deprecated parameters by possibly assigning their values to the new ones.
|
||||
if event.OldSpec != nil {
|
||||
c.mergeDeprecatedPostgreSQLSpecParameters(&event.OldSpec.Spec)
|
||||
|
|
@ -179,10 +201,15 @@ func (c *Controller) processEvent(event spec.ClusterEvent) {
|
|||
c.warnOnDeprecatedPostgreSQLSpecParameters(&event.NewSpec.Spec)
|
||||
c.mergeDeprecatedPostgreSQLSpecParameters(&event.NewSpec.Spec)
|
||||
}
|
||||
|
||||
if err := c.submitRBACCredentials(event); err != nil {
|
||||
c.logger.Warnf("Pods and/or Patroni may misfunction due to the lack of permissions: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
switch event.EventType {
|
||||
case spec.EventAdd:
|
||||
case EventAdd:
|
||||
if clusterFound {
|
||||
lg.Debugf("cluster already exists")
|
||||
return
|
||||
|
|
@ -195,14 +222,14 @@ func (c *Controller) processEvent(event spec.ClusterEvent) {
|
|||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||
|
||||
if err := cl.Create(); err != nil {
|
||||
cl.Error = fmt.Errorf("could not create cluster: %v", err)
|
||||
cl.Error = fmt.Sprintf("could not create cluster: %v", err)
|
||||
lg.Error(cl.Error)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
lg.Infoln("cluster has been created")
|
||||
case spec.EventUpdate:
|
||||
case EventUpdate:
|
||||
lg.Infoln("update of the cluster started")
|
||||
|
||||
if !clusterFound {
|
||||
|
|
@ -211,12 +238,12 @@ func (c *Controller) processEvent(event spec.ClusterEvent) {
|
|||
}
|
||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||
if err := cl.Update(event.OldSpec, event.NewSpec); err != nil {
|
||||
cl.Error = fmt.Errorf("could not update cluster: %v", err)
|
||||
cl.Error = fmt.Sprintf("could not update cluster: %v", err)
|
||||
lg.Error(cl.Error)
|
||||
|
||||
return
|
||||
}
|
||||
cl.Error = nil
|
||||
cl.Error = ""
|
||||
lg.Infoln("cluster has been updated")
|
||||
|
||||
clHistory.Insert(&spec.Diff{
|
||||
|
|
@ -224,7 +251,7 @@ func (c *Controller) processEvent(event spec.ClusterEvent) {
|
|||
ProcessTime: time.Now(),
|
||||
Diff: util.Diff(event.OldSpec, event.NewSpec),
|
||||
})
|
||||
case spec.EventDelete:
|
||||
case EventDelete:
|
||||
if !clusterFound {
|
||||
lg.Errorf("unknown cluster: %q", clusterName)
|
||||
return
|
||||
|
|
@ -254,7 +281,7 @@ func (c *Controller) processEvent(event spec.ClusterEvent) {
|
|||
}()
|
||||
|
||||
lg.Infof("cluster has been deleted")
|
||||
case spec.EventSync:
|
||||
case EventSync:
|
||||
lg.Infof("syncing of the cluster started")
|
||||
|
||||
// no race condition because a cluster is always processed by single worker
|
||||
|
|
@ -264,11 +291,11 @@ func (c *Controller) processEvent(event spec.ClusterEvent) {
|
|||
|
||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||
if err := cl.Sync(event.NewSpec); err != nil {
|
||||
cl.Error = fmt.Errorf("could not sync cluster: %v", err)
|
||||
cl.Error = fmt.Sprintf("could not sync cluster: %v", err)
|
||||
lg.Error(cl.Error)
|
||||
return
|
||||
}
|
||||
cl.Error = nil
|
||||
cl.Error = ""
|
||||
|
||||
lg.Infof("cluster has been synced")
|
||||
}
|
||||
|
|
@ -291,7 +318,7 @@ func (c *Controller) processClusterEventsQueue(idx int, stopCh <-chan struct{},
|
|||
c.logger.Errorf("error when processing cluster events queue: %v", err)
|
||||
continue
|
||||
}
|
||||
event, ok := obj.(spec.ClusterEvent)
|
||||
event, ok := obj.(ClusterEvent)
|
||||
if !ok {
|
||||
c.logger.Errorf("could not cast to ClusterEvent")
|
||||
}
|
||||
|
|
@ -300,7 +327,7 @@ func (c *Controller) processClusterEventsQueue(idx int, stopCh <-chan struct{},
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *spec.PostgresSpec) {
|
||||
func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.PostgresSpec) {
|
||||
|
||||
deprecate := func(deprecated, replacement string) {
|
||||
c.logger.Warningf("Parameter %q is deprecated. Consider setting %q instead", deprecated, replacement)
|
||||
|
|
@ -330,7 +357,7 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *spec.Postgre
|
|||
// mergeDeprecatedPostgreSQLSpecParameters modifies the spec passed to the cluster by setting current parameter
|
||||
// values from the obsolete ones. Note: while the spec that is modified is a copy made in queueClusterEvent, it is
|
||||
// still a shallow copy, so be extra careful not to modify values pointer fields point to, but copy them instead.
|
||||
func (c *Controller) mergeDeprecatedPostgreSQLSpecParameters(spec *spec.PostgresSpec) *spec.PostgresSpec {
|
||||
func (c *Controller) mergeDeprecatedPostgreSQLSpecParameters(spec *acidv1.PostgresSpec) *acidv1.PostgresSpec {
|
||||
if (spec.UseLoadBalancer != nil || spec.ReplicaLoadBalancer != nil) &&
|
||||
(spec.EnableReplicaLoadBalancer == nil && spec.EnableMasterLoadBalancer == nil) {
|
||||
if spec.UseLoadBalancer != nil {
|
||||
|
|
@ -348,18 +375,18 @@ func (c *Controller) mergeDeprecatedPostgreSQLSpecParameters(spec *spec.Postgres
|
|||
return spec
|
||||
}
|
||||
|
||||
func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Postgresql, eventType spec.EventType) {
|
||||
func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.Postgresql, eventType EventType) {
|
||||
var (
|
||||
uid types.UID
|
||||
clusterName spec.NamespacedName
|
||||
clusterError error
|
||||
clusterError string
|
||||
)
|
||||
|
||||
if informerOldSpec != nil { //update, delete
|
||||
uid = informerOldSpec.GetUID()
|
||||
clusterName = util.NameFromMeta(informerOldSpec.ObjectMeta)
|
||||
if eventType == spec.EventUpdate && informerNewSpec.Error == nil && informerOldSpec.Error != nil {
|
||||
eventType = spec.EventSync
|
||||
if eventType == EventUpdate && informerNewSpec.Error == "" && informerOldSpec.Error != "" {
|
||||
eventType = EventSync
|
||||
clusterError = informerNewSpec.Error
|
||||
} else {
|
||||
clusterError = informerOldSpec.Error
|
||||
|
|
@ -370,10 +397,10 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po
|
|||
clusterError = informerNewSpec.Error
|
||||
}
|
||||
|
||||
if clusterError != nil && eventType != spec.EventDelete {
|
||||
if clusterError != "" && eventType != EventDelete {
|
||||
c.logger.
|
||||
WithField("cluster-name", clusterName).
|
||||
Debugf("skipping %q event for the invalid cluster: %v", eventType, clusterError)
|
||||
Debugf("skipping %q event for the invalid cluster: %s", eventType, clusterError)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -382,7 +409,7 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po
|
|||
// effect, the modified state will be returned together with subsequent events).
|
||||
|
||||
workerID := c.clusterWorkerID(clusterName)
|
||||
clusterEvent := spec.ClusterEvent{
|
||||
clusterEvent := ClusterEvent{
|
||||
EventTime: time.Now(),
|
||||
EventType: eventType,
|
||||
UID: uid,
|
||||
|
|
@ -397,11 +424,11 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po
|
|||
}
|
||||
lg.Infof("%q event has been queued", eventType)
|
||||
|
||||
if eventType != spec.EventDelete {
|
||||
if eventType != EventDelete {
|
||||
return
|
||||
}
|
||||
|
||||
for _, evType := range []spec.EventType{spec.EventAdd, spec.EventSync, spec.EventUpdate} {
|
||||
// A delete event discards all prior requests for that cluster.
|
||||
for _, evType := range []EventType{EventAdd, EventSync, EventUpdate, EventRepair} {
|
||||
obj, exists, err := c.clusterEventQueues[workerID].GetByKey(queueClusterKey(evType, uid))
|
||||
if err != nil {
|
||||
lg.Warningf("could not get event from the queue: %v", err)
|
||||
|
|
@ -422,38 +449,113 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *spec.Po
|
|||
}
|
||||
|
||||
func (c *Controller) postgresqlAdd(obj interface{}) {
|
||||
pg, ok := obj.(*spec.Postgresql)
|
||||
pg, ok := obj.(*acidv1.Postgresql)
|
||||
if !ok {
|
||||
c.logger.Errorf("could not cast to postgresql spec")
|
||||
return
|
||||
}
|
||||
|
||||
// We will not get multiple Add events for the same cluster
|
||||
c.queueClusterEvent(nil, pg, spec.EventAdd)
|
||||
c.queueClusterEvent(nil, pg, EventAdd)
|
||||
}
|
||||
|
||||
func (c *Controller) postgresqlUpdate(prev, cur interface{}) {
|
||||
pgOld, ok := prev.(*spec.Postgresql)
|
||||
pgOld, ok := prev.(*acidv1.Postgresql)
|
||||
if !ok {
|
||||
c.logger.Errorf("could not cast to postgresql spec")
|
||||
}
|
||||
pgNew, ok := cur.(*spec.Postgresql)
|
||||
pgNew, ok := cur.(*acidv1.Postgresql)
|
||||
if !ok {
|
||||
c.logger.Errorf("could not cast to postgresql spec")
|
||||
}
|
||||
// Avoid the inifinite recursion for status updates
|
||||
if reflect.DeepEqual(pgOld.Spec, pgNew.Spec) {
|
||||
return
|
||||
}
|
||||
|
||||
c.queueClusterEvent(pgOld, pgNew, spec.EventUpdate)
|
||||
c.queueClusterEvent(pgOld, pgNew, EventUpdate)
|
||||
}
|
||||
|
||||
func (c *Controller) postgresqlDelete(obj interface{}) {
|
||||
pg, ok := obj.(*spec.Postgresql)
|
||||
pg, ok := obj.(*acidv1.Postgresql)
|
||||
if !ok {
|
||||
c.logger.Errorf("could not cast to postgresql spec")
|
||||
return
|
||||
}
|
||||
|
||||
c.queueClusterEvent(pg, nil, spec.EventDelete)
|
||||
c.queueClusterEvent(pg, nil, EventDelete)
|
||||
}
|
||||
|
||||
/*
|
||||
Ensures the pod service account and role bindings exists in a namespace before a PG cluster is created there so that a user does not have to deploy these credentials manually.
|
||||
StatefulSets require the service account to create pods; Patroni requires relevant RBAC bindings to access endpoints.
|
||||
|
||||
The operator does not sync accounts/role bindings after creation.
|
||||
*/
|
||||
func (c *Controller) submitRBACCredentials(event ClusterEvent) error {
|
||||
|
||||
namespace := event.NewSpec.GetNamespace()
|
||||
if _, ok := c.namespacesWithDefinedRBAC.Load(namespace); ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.createPodServiceAccount(namespace); err != nil {
|
||||
return fmt.Errorf("could not create pod service account %v : %v", c.opConfig.PodServiceAccountName, err)
|
||||
}
|
||||
|
||||
if err := c.createRoleBindings(namespace); err != nil {
|
||||
return fmt.Errorf("could not create role binding %v : %v", c.PodServiceAccountRoleBinding.Name, err)
|
||||
}
|
||||
c.namespacesWithDefinedRBAC.Store(namespace, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) createPodServiceAccount(namespace string) error {
|
||||
|
||||
podServiceAccountName := c.opConfig.PodServiceAccountName
|
||||
_, err := c.KubeClient.ServiceAccounts(namespace).Get(podServiceAccountName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
|
||||
c.logger.Infof(fmt.Sprintf("creating pod service account in the namespace %v", namespace))
|
||||
|
||||
// get a separate copy of service account
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
sa := *c.PodServiceAccount
|
||||
if _, err = c.KubeClient.ServiceAccounts(namespace).Create(&sa); err != nil {
|
||||
return fmt.Errorf("cannot deploy the pod service account %v defined in the config map to the %v namespace: %v", podServiceAccountName, namespace, err)
|
||||
}
|
||||
|
||||
c.logger.Infof("successfully deployed the pod service account %v to the %v namespace", podServiceAccountName, namespace)
|
||||
} else if k8sutil.ResourceAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Controller) createRoleBindings(namespace string) error {
|
||||
|
||||
podServiceAccountName := c.opConfig.PodServiceAccountName
|
||||
podServiceAccountRoleBindingName := c.PodServiceAccountRoleBinding.Name
|
||||
|
||||
_, err := c.KubeClient.RoleBindings(namespace).Get(podServiceAccountRoleBindingName, metav1.GetOptions{})
|
||||
if k8sutil.ResourceNotFound(err) {
|
||||
|
||||
c.logger.Infof("Creating the role binding %v in the namespace %v", podServiceAccountRoleBindingName, namespace)
|
||||
|
||||
// get a separate copy of role binding
|
||||
// to prevent a race condition when setting a namespace for many clusters
|
||||
rb := *c.PodServiceAccountRoleBinding
|
||||
_, err = c.KubeClient.RoleBindings(namespace).Create(&rb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot bind the pod service account %q defined in the config map to the cluster role in the %q namespace: %v", podServiceAccountName, namespace, err)
|
||||
}
|
||||
|
||||
c.logger.Infof("successfully deployed the role binding for the pod service account %q to the %q namespace", podServiceAccountName, namespace)
|
||||
|
||||
} else if k8sutil.ResourceAlreadyExists(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,14 +1,15 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
True bool = true
|
||||
False bool = false
|
||||
True = true
|
||||
False = false
|
||||
)
|
||||
|
||||
func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) {
|
||||
|
|
@ -16,21 +17,21 @@ func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
name string
|
||||
in *spec.PostgresSpec
|
||||
out *spec.PostgresSpec
|
||||
in *acidv1.PostgresSpec
|
||||
out *acidv1.PostgresSpec
|
||||
error string
|
||||
}{
|
||||
{
|
||||
"Check that old parameters propagate values to the new ones",
|
||||
&spec.PostgresSpec{UseLoadBalancer: &True, ReplicaLoadBalancer: &True},
|
||||
&spec.PostgresSpec{UseLoadBalancer: nil, ReplicaLoadBalancer: nil,
|
||||
&acidv1.PostgresSpec{UseLoadBalancer: &True, ReplicaLoadBalancer: &True},
|
||||
&acidv1.PostgresSpec{UseLoadBalancer: nil, ReplicaLoadBalancer: nil,
|
||||
EnableMasterLoadBalancer: &True, EnableReplicaLoadBalancer: &True},
|
||||
"New parameters should be set from the values of old ones",
|
||||
},
|
||||
{
|
||||
"Check that new parameters are not set when both old and new ones are present",
|
||||
&spec.PostgresSpec{UseLoadBalancer: &True, EnableMasterLoadBalancer: &False},
|
||||
&spec.PostgresSpec{UseLoadBalancer: nil, EnableMasterLoadBalancer: &False},
|
||||
&acidv1.PostgresSpec{UseLoadBalancer: &True, EnableMasterLoadBalancer: &False},
|
||||
&acidv1.PostgresSpec{UseLoadBalancer: nil, EnableMasterLoadBalancer: &False},
|
||||
"New parameters should remain unchanged when both old and new are present",
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"time"
|
||||
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
)
|
||||
|
||||
// EventType contains type of the events for the TPRs and Pods received from Kubernetes
|
||||
type EventType string
|
||||
|
||||
// Possible values for the EventType
|
||||
const (
|
||||
EventAdd EventType = "ADD"
|
||||
EventUpdate EventType = "UPDATE"
|
||||
EventDelete EventType = "DELETE"
|
||||
EventSync EventType = "SYNC"
|
||||
EventRepair EventType = "REPAIR"
|
||||
)
|
||||
|
||||
// ClusterEvent carries the payload of the Cluster TPR events.
|
||||
type ClusterEvent struct {
|
||||
EventTime time.Time
|
||||
UID types.UID
|
||||
EventType EventType
|
||||
OldSpec *acidv1.Postgresql
|
||||
NewSpec *acidv1.Postgresql
|
||||
WorkerID uint32
|
||||
}
|
||||
|
|
@ -3,15 +3,15 @@ package controller
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/cluster"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
|
@ -47,22 +47,24 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 {
|
|||
return c.clusterWorkers[clusterName]
|
||||
}
|
||||
|
||||
func (c *Controller) createCRD() error {
|
||||
func (c *Controller) createOperatorCRD(name, kind, plural, short string) error {
|
||||
subResourceStatus := apiextv1beta1.CustomResourceSubresourceStatus{}
|
||||
crd := &apiextv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: constants.CRDResource + "." + constants.CRDGroup,
|
||||
Name: name,
|
||||
},
|
||||
Spec: apiextv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: constants.CRDGroup,
|
||||
Version: constants.CRDApiVersion,
|
||||
Group: acidv1.SchemeGroupVersion.Group,
|
||||
Version: acidv1.SchemeGroupVersion.Version,
|
||||
Names: apiextv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: constants.CRDResource,
|
||||
Singular: constants.CRDKind,
|
||||
ShortNames: []string{constants.CRDShort},
|
||||
Kind: constants.CRDKind,
|
||||
ListKind: constants.CRDKind + "List",
|
||||
Plural: plural,
|
||||
ShortNames: []string{short},
|
||||
Kind: kind,
|
||||
},
|
||||
Scope: apiextv1beta1.NamespaceScoped,
|
||||
Subresources: &apiextv1beta1.CustomResourceSubresources{
|
||||
Status: &subResourceStatus,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -75,7 +77,7 @@ func (c *Controller) createCRD() error {
|
|||
c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)
|
||||
}
|
||||
|
||||
return wait.Poll(c.opConfig.CRD.ReadyWaitInterval, c.opConfig.CRD.ReadyWaitTimeout, func() (bool, error) {
|
||||
return wait.Poll(c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, func() (bool, error) {
|
||||
c, err := c.KubeClient.CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
@ -98,6 +100,20 @@ func (c *Controller) createCRD() error {
|
|||
})
|
||||
}
|
||||
|
||||
func (c *Controller) createPostgresCRD() error {
|
||||
return c.createOperatorCRD(acidv1.PostgresCRDResouceName,
|
||||
acidv1.PostgresCRDResourceKind,
|
||||
acidv1.PostgresCRDResourcePlural,
|
||||
acidv1.PostgresCRDResourceShort)
|
||||
}
|
||||
|
||||
func (c *Controller) createConfigurationCRD() error {
|
||||
return c.createOperatorCRD(acidv1.OperatorConfigCRDResourceName,
|
||||
acidv1.OperatorConfigCRDResouceKind,
|
||||
acidv1.OperatorConfigCRDResourcePlural,
|
||||
acidv1.OperatorConfigCRDResourceShort)
|
||||
}
|
||||
|
||||
func readDecodedRole(s string) (*spec.PgUser, error) {
|
||||
var result spec.PgUser
|
||||
if err := yaml.Unmarshal([]byte(s), &result); err != nil {
|
||||
|
|
@ -106,7 +122,7 @@ func readDecodedRole(s string) (*spec.PgUser, error) {
|
|||
return &result, nil
|
||||
}
|
||||
|
||||
func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (result map[string]spec.PgUser, err error) {
|
||||
func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (map[string]spec.PgUser, error) {
|
||||
if *rolesSecret == (spec.NamespacedName{}) {
|
||||
// we don't have infrastructure roles defined, bail out
|
||||
return nil, nil
|
||||
|
|
@ -121,7 +137,7 @@ func (c *Controller) getInfrastructureRoles(rolesSecret *spec.NamespacedName) (r
|
|||
}
|
||||
|
||||
secretData := infraRolesSecret.Data
|
||||
result = make(map[string]spec.PgUser)
|
||||
result := make(map[string]spec.PgUser)
|
||||
Users:
|
||||
// in worst case we would have one line per user
|
||||
for i := 1; i <= len(secretData); i++ {
|
||||
|
|
@ -163,22 +179,22 @@ Users:
|
|||
if infraRolesMap, err := c.KubeClient.ConfigMaps(rolesSecret.Namespace).Get(rolesSecret.Name, metav1.GetOptions{}); err == nil {
|
||||
// we have a configmap with username - json description, let's read and decode it
|
||||
for role, s := range infraRolesMap.Data {
|
||||
if roleDescr, err := readDecodedRole(s); err != nil {
|
||||
roleDescr, err := readDecodedRole(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not decode role description: %v", err)
|
||||
} else {
|
||||
// check if we have a a password in a configmap
|
||||
c.logger.Debugf("found role description for role %q: %+v", role, roleDescr)
|
||||
if passwd, ok := secretData[role]; ok {
|
||||
roleDescr.Password = string(passwd)
|
||||
delete(secretData, role)
|
||||
} else {
|
||||
c.logger.Warningf("infrastructure role %q has no password defined and is ignored", role)
|
||||
continue
|
||||
}
|
||||
roleDescr.Name = role
|
||||
roleDescr.Origin = spec.RoleOriginInfrastructure
|
||||
result[role] = *roleDescr
|
||||
}
|
||||
// check if we have a a password in a configmap
|
||||
c.logger.Debugf("found role description for role %q: %+v", role, roleDescr)
|
||||
if passwd, ok := secretData[role]; ok {
|
||||
roleDescr.Password = string(passwd)
|
||||
delete(secretData, role)
|
||||
} else {
|
||||
c.logger.Warningf("infrastructure role %q has no password defined and is ignored", role)
|
||||
continue
|
||||
}
|
||||
roleDescr.Name = role
|
||||
roleDescr.Origin = spec.RoleOriginInfrastructure
|
||||
result[role] = *roleDescr
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@ import (
|
|||
"testing"
|
||||
|
||||
b64 "encoding/base64"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package versioned
|
||||
|
||||
import (
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
flowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
Discovery() discovery.DiscoveryInterface
|
||||
AcidV1() acidv1.AcidV1Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Acid() acidv1.AcidV1Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
// version included in a Clientset.
|
||||
type Clientset struct {
|
||||
*discovery.DiscoveryClient
|
||||
acidV1 *acidv1.AcidV1Client
|
||||
}
|
||||
|
||||
// AcidV1 retrieves the AcidV1Client
|
||||
func (c *Clientset) AcidV1() acidv1.AcidV1Interface {
|
||||
return c.acidV1
|
||||
}
|
||||
|
||||
// Deprecated: Acid retrieves the default version of AcidClient.
|
||||
// Please explicitly pick a version.
|
||||
func (c *Clientset) Acid() acidv1.AcidV1Interface {
|
||||
return c.acidV1
|
||||
}
|
||||
|
||||
// Discovery retrieves the DiscoveryClient
|
||||
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.DiscoveryClient
|
||||
}
|
||||
|
||||
// NewForConfig creates a new Clientset for the given config.
|
||||
func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
|
||||
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
|
||||
}
|
||||
var cs Clientset
|
||||
var err error
|
||||
cs.acidV1, err = acidv1.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cs, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.acidV1 = acidv1.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
return &cs
|
||||
}
|
||||
|
||||
// New creates a new Clientset for the given RESTClient.
|
||||
func New(c rest.Interface) *Clientset {
|
||||
var cs Clientset
|
||||
cs.acidV1 = acidv1.New(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
|
||||
return &cs
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// This package has the automatically generated clientset.
|
||||
package versioned
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
clientset "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned"
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||
fakeacidv1 "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/discovery"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// NewSimpleClientset returns a clientset that will respond with the provided objects.
|
||||
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
|
||||
// without applying any validations and/or defaults. It shouldn't be considered a replacement
|
||||
// for a real clientset and is mostly useful in simple unit tests.
|
||||
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
||||
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
||||
for _, obj := range objects {
|
||||
if err := o.Add(obj); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
cs := &Clientset{}
|
||||
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
|
||||
cs.AddReactor("*", "*", testing.ObjectReaction(o))
|
||||
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
|
||||
gvr := action.GetResource()
|
||||
ns := action.GetNamespace()
|
||||
watch, err := o.Watch(gvr, ns)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
return true, watch, nil
|
||||
})
|
||||
|
||||
return cs
|
||||
}
|
||||
|
||||
// Clientset implements clientset.Interface. Meant to be embedded into a
|
||||
// struct to get a default implementation. This makes faking out just the method
|
||||
// you want to test easier.
|
||||
type Clientset struct {
|
||||
testing.Fake
|
||||
discovery *fakediscovery.FakeDiscovery
|
||||
}
|
||||
|
||||
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
return c.discovery
|
||||
}
|
||||
|
||||
var _ clientset.Interface = &Clientset{}
|
||||
|
||||
// AcidV1 retrieves the AcidV1Client
|
||||
func (c *Clientset) AcidV1() acidv1.AcidV1Interface {
|
||||
return &fakeacidv1.FakeAcidV1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// Acid retrieves the AcidV1Client
|
||||
func (c *Clientset) Acid() acidv1.AcidV1Interface {
|
||||
return &fakeacidv1.FakeAcidV1{Fake: &c.Fake}
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// This package has the automatically generated fake clientset.
|
||||
package fake
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
var scheme = runtime.NewScheme()
|
||||
var codecs = serializer.NewCodecFactory(scheme)
|
||||
var parameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(scheme)
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
acidv1.AddToScheme(scheme)
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// This package contains the scheme of the automatically generated clientset.
|
||||
package scheme
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package scheme
|
||||
|
||||
import (
|
||||
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
acidv1.AddToScheme(scheme)
|
||||
}
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type AcidV1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
OperatorConfigurationsGetter
|
||||
PostgresqlsGetter
|
||||
}
|
||||
|
||||
// AcidV1Client is used to interact with features provided by the acid.zalan.do group.
|
||||
type AcidV1Client struct {
|
||||
restClient rest.Interface
|
||||
}
|
||||
|
||||
func (c *AcidV1Client) OperatorConfigurations(namespace string) OperatorConfigurationInterface {
|
||||
return newOperatorConfigurations(c, namespace)
|
||||
}
|
||||
|
||||
func (c *AcidV1Client) Postgresqls(namespace string) PostgresqlInterface {
|
||||
return newPostgresqls(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new AcidV1Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*AcidV1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &AcidV1Client{client}, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new AcidV1Client for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *AcidV1Client {
|
||||
client, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// New creates a new AcidV1Client for the given RESTClient.
|
||||
func New(c rest.Interface) *AcidV1Client {
|
||||
return &AcidV1Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *AcidV1Client) RESTClient() rest.Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restClient
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// This package has the automatically generated typed clients.
|
||||
package v1
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
// Package fake has the automatically generated clients.
|
||||
package fake
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
v1 "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||
rest "k8s.io/client-go/rest"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
type FakeAcidV1 struct {
|
||||
*testing.Fake
|
||||
}
|
||||
|
||||
func (c *FakeAcidV1) OperatorConfigurations(namespace string) v1.OperatorConfigurationInterface {
|
||||
return &FakeOperatorConfigurations{c, namespace}
|
||||
}
|
||||
|
||||
func (c *FakeAcidV1) Postgresqls(namespace string) v1.PostgresqlInterface {
|
||||
return &FakePostgresqls{c, namespace}
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *FakeAcidV1) RESTClient() rest.Interface {
|
||||
var ret *rest.RESTClient
|
||||
return ret
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
acidzalandov1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakeOperatorConfigurations implements OperatorConfigurationInterface
|
||||
type FakeOperatorConfigurations struct {
|
||||
Fake *FakeAcidV1
|
||||
ns string
|
||||
}
|
||||
|
||||
var operatorconfigurationsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "operatorconfigurations"}
|
||||
|
||||
var operatorconfigurationsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "OperatorConfiguration"}
|
||||
|
||||
// Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any.
|
||||
func (c *FakeOperatorConfigurations) Get(name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(operatorconfigurationsResource, c.ns, name), &acidzalandov1.OperatorConfiguration{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*acidzalandov1.OperatorConfiguration), err
|
||||
}
|
||||
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
acidzalandov1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakePostgresqls implements PostgresqlInterface
|
||||
type FakePostgresqls struct {
|
||||
Fake *FakeAcidV1
|
||||
ns string
|
||||
}
|
||||
|
||||
var postgresqlsResource = schema.GroupVersionResource{Group: "acid.zalan.do", Version: "v1", Resource: "postgresqls"}
|
||||
|
||||
var postgresqlsKind = schema.GroupVersionKind{Group: "acid.zalan.do", Version: "v1", Kind: "Postgresql"}
|
||||
|
||||
// Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any.
|
||||
func (c *FakePostgresqls) Get(name string, options v1.GetOptions) (result *acidzalandov1.Postgresql, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*acidzalandov1.Postgresql), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Postgresqls that match those selectors.
|
||||
func (c *FakePostgresqls) List(opts v1.ListOptions) (result *acidzalandov1.PostgresqlList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(postgresqlsResource, postgresqlsKind, c.ns, opts), &acidzalandov1.PostgresqlList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &acidzalandov1.PostgresqlList{ListMeta: obj.(*acidzalandov1.PostgresqlList).ListMeta}
|
||||
for _, item := range obj.(*acidzalandov1.PostgresqlList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested postgresqls.
|
||||
func (c *FakePostgresqls) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(postgresqlsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
||||
func (c *FakePostgresqls) Create(postgresql *acidzalandov1.Postgresql) (result *acidzalandov1.Postgresql, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*acidzalandov1.Postgresql), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
||||
func (c *FakePostgresqls) Update(postgresql *acidzalandov1.Postgresql) (result *acidzalandov1.Postgresql, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(postgresqlsResource, c.ns, postgresql), &acidzalandov1.Postgresql{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*acidzalandov1.Postgresql), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakePostgresqls) UpdateStatus(postgresql *acidzalandov1.Postgresql) (*acidzalandov1.Postgresql, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(postgresqlsResource, "status", c.ns, postgresql), &acidzalandov1.Postgresql{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*acidzalandov1.Postgresql), err
|
||||
}
|
||||
|
||||
// Delete takes name of the postgresql and deletes it. Returns an error if one occurs.
|
||||
func (c *FakePostgresqls) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteAction(postgresqlsResource, c.ns, name), &acidzalandov1.Postgresql{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakePostgresqls) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(postgresqlsResource, c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &acidzalandov1.PostgresqlList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched postgresql.
|
||||
func (c *FakePostgresqls) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *acidzalandov1.Postgresql, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(postgresqlsResource, c.ns, name, data, subresources...), &acidzalandov1.Postgresql{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*acidzalandov1.Postgresql), err
|
||||
}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
type OperatorConfigurationExpansion interface{}
|
||||
|
||||
type PostgresqlExpansion interface{}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
acidzalandov1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
scheme "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// OperatorConfigurationsGetter has a method to return a OperatorConfigurationInterface.
|
||||
// A group's client should implement this interface.
|
||||
type OperatorConfigurationsGetter interface {
|
||||
OperatorConfigurations(namespace string) OperatorConfigurationInterface
|
||||
}
|
||||
|
||||
// OperatorConfigurationInterface has methods to work with OperatorConfiguration resources.
|
||||
type OperatorConfigurationInterface interface {
|
||||
Get(name string, options v1.GetOptions) (*acidzalandov1.OperatorConfiguration, error)
|
||||
OperatorConfigurationExpansion
|
||||
}
|
||||
|
||||
// operatorConfigurations implements OperatorConfigurationInterface
|
||||
type operatorConfigurations struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newOperatorConfigurations returns a OperatorConfigurations
|
||||
func newOperatorConfigurations(c *AcidV1Client, namespace string) *operatorConfigurations {
|
||||
return &operatorConfigurations{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the operatorConfiguration, and returns the corresponding operatorConfiguration object, and an error if there is any.
|
||||
func (c *operatorConfigurations) Get(name string, options v1.GetOptions) (result *acidzalandov1.OperatorConfiguration, err error) {
|
||||
result = &acidzalandov1.OperatorConfiguration{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("operatorconfigurations").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
scheme "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// PostgresqlsGetter has a method to return a PostgresqlInterface.
|
||||
// A group's client should implement this interface.
|
||||
type PostgresqlsGetter interface {
|
||||
Postgresqls(namespace string) PostgresqlInterface
|
||||
}
|
||||
|
||||
// PostgresqlInterface has methods to work with Postgresql resources.
|
||||
type PostgresqlInterface interface {
|
||||
Create(*v1.Postgresql) (*v1.Postgresql, error)
|
||||
Update(*v1.Postgresql) (*v1.Postgresql, error)
|
||||
UpdateStatus(*v1.Postgresql) (*v1.Postgresql, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
|
||||
Get(name string, options metav1.GetOptions) (*v1.Postgresql, error)
|
||||
List(opts metav1.ListOptions) (*v1.PostgresqlList, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Postgresql, err error)
|
||||
PostgresqlExpansion
|
||||
}
|
||||
|
||||
// postgresqls implements PostgresqlInterface
|
||||
type postgresqls struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newPostgresqls returns a Postgresqls
|
||||
func newPostgresqls(c *AcidV1Client, namespace string) *postgresqls {
|
||||
return &postgresqls{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the postgresql, and returns the corresponding postgresql object, and an error if there is any.
|
||||
func (c *postgresqls) Get(name string, options metav1.GetOptions) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Postgresqls that match those selectors.
|
||||
func (c *postgresqls) List(opts metav1.ListOptions) (result *v1.PostgresqlList, err error) {
|
||||
result = &v1.PostgresqlList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested postgresqls.
|
||||
func (c *postgresqls) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Create takes the representation of a postgresql and creates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
||||
func (c *postgresqls) Create(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Body(postgresql).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a postgresql and updates it. Returns the server's representation of the postgresql, and an error, if there is any.
|
||||
func (c *postgresqls) Update(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Name(postgresql.Name).
|
||||
Body(postgresql).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *postgresqls) UpdateStatus(postgresql *v1.Postgresql) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Name(postgresql.Name).
|
||||
SubResource("status").
|
||||
Body(postgresql).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the postgresql and deletes it. Returns an error if one occurs.
|
||||
func (c *postgresqls) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *postgresqls) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched postgresql.
|
||||
func (c *postgresqls) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Postgresql, err error) {
|
||||
result = &v1.Postgresql{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("postgresqls").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package acid
|
||||
|
||||
import (
|
||||
v1 "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do/v1"
|
||||
internalinterfaces "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
type Interface interface {
|
||||
// V1 provides access to shared informers for resources in V1.
|
||||
V1() v1.Interface
|
||||
}
|
||||
|
||||
type group struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// V1 returns a new v1.Interface.
|
||||
func (g *group) V1() v1.Interface {
|
||||
return v1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
internalinterfaces "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// Postgresqls returns a PostgresqlInformer.
|
||||
Postgresqls() PostgresqlInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// Postgresqls returns a PostgresqlInformer.
|
||||
func (v *version) Postgresqls() PostgresqlInformer {
|
||||
return &postgresqlInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
acidzalandov1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
versioned "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned"
|
||||
internalinterfaces "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
v1 "github.com/zalando-incubator/postgres-operator/pkg/generated/listers/acid.zalan.do/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// PostgresqlInformer provides access to a shared informer and lister for
|
||||
// Postgresqls.
|
||||
type PostgresqlInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.PostgresqlLister
|
||||
}
|
||||
|
||||
type postgresqlInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewPostgresqlInformer constructs a new informer for Postgresql type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewPostgresqlInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredPostgresqlInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredPostgresqlInformer constructs a new informer for Postgresql type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredPostgresqlInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AcidV1().Postgresqls(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AcidV1().Postgresqls(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&acidzalandov1.Postgresql{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *postgresqlInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredPostgresqlInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *postgresqlInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&acidzalandov1.Postgresql{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *postgresqlInformer) Lister() v1.PostgresqlLister {
|
||||
return v1.NewPostgresqlLister(f.Informer().GetIndexer())
|
||||
}
|
||||
|
|
@ -0,0 +1,186 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package externalversions
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
time "time"
|
||||
|
||||
versioned "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned"
|
||||
acidzalando "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/acid.zalan.do"
|
||||
internalinterfaces "github.com/zalando-incubator/postgres-operator/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// SharedInformerOption defines the functional option type for SharedInformerFactory.
|
||||
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
|
||||
|
||||
type sharedInformerFactory struct {
|
||||
client versioned.Interface
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
lock sync.Mutex
|
||||
defaultResync time.Duration
|
||||
customResync map[reflect.Type]time.Duration
|
||||
|
||||
informers map[reflect.Type]cache.SharedIndexInformer
|
||||
// startedInformers is used for tracking which informers have been started.
|
||||
// This allows Start() to be called multiple times safely.
|
||||
startedInformers map[reflect.Type]bool
|
||||
}
|
||||
|
||||
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
|
||||
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
|
||||
return func(factory *sharedInformerFactory) *sharedInformerFactory {
|
||||
for k, v := range resyncConfig {
|
||||
factory.customResync[reflect.TypeOf(k)] = v
|
||||
}
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
|
||||
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
|
||||
return func(factory *sharedInformerFactory) *sharedInformerFactory {
|
||||
factory.tweakListOptions = tweakListOptions
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
// WithNamespace limits the SharedInformerFactory to the specified namespace.
|
||||
func WithNamespace(namespace string) SharedInformerOption {
|
||||
return func(factory *sharedInformerFactory) *sharedInformerFactory {
|
||||
factory.namespace = namespace
|
||||
return factory
|
||||
}
|
||||
}
|
||||
|
||||
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
|
||||
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
|
||||
return NewSharedInformerFactoryWithOptions(client, defaultResync)
|
||||
}
|
||||
|
||||
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
|
||||
// Listers obtained via this SharedInformerFactory will be subject to the same filters
|
||||
// as specified here.
|
||||
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
|
||||
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
|
||||
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
|
||||
}
|
||||
|
||||
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
|
||||
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
|
||||
factory := &sharedInformerFactory{
|
||||
client: client,
|
||||
namespace: v1.NamespaceAll,
|
||||
defaultResync: defaultResync,
|
||||
informers: make(map[reflect.Type]cache.SharedIndexInformer),
|
||||
startedInformers: make(map[reflect.Type]bool),
|
||||
customResync: make(map[reflect.Type]time.Duration),
|
||||
}
|
||||
|
||||
// Apply all options
|
||||
for _, opt := range options {
|
||||
factory = opt(factory)
|
||||
}
|
||||
|
||||
return factory
|
||||
}
|
||||
|
||||
// Start initializes all requested informers.
|
||||
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
for informerType, informer := range f.informers {
|
||||
if !f.startedInformers[informerType] {
|
||||
go informer.Run(stopCh)
|
||||
f.startedInformers[informerType] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForCacheSync waits for all started informers' cache were synced.
|
||||
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
|
||||
informers := func() map[reflect.Type]cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informers := map[reflect.Type]cache.SharedIndexInformer{}
|
||||
for informerType, informer := range f.informers {
|
||||
if f.startedInformers[informerType] {
|
||||
informers[informerType] = informer
|
||||
}
|
||||
}
|
||||
return informers
|
||||
}()
|
||||
|
||||
res := map[reflect.Type]bool{}
|
||||
for informType, informer := range informers {
|
||||
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
|
||||
// client.
|
||||
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(obj)
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
}
|
||||
|
||||
resyncPeriod, exists := f.customResync[informerType]
|
||||
if !exists {
|
||||
resyncPeriod = f.defaultResync
|
||||
}
|
||||
|
||||
informer = newFunc(f.client, resyncPeriod)
|
||||
f.informers[informerType] = informer
|
||||
|
||||
return informer
|
||||
}
|
||||
|
||||
// SharedInformerFactory provides shared informers for resources in all known
|
||||
// API group versions.
|
||||
type SharedInformerFactory interface {
|
||||
internalinterfaces.SharedInformerFactory
|
||||
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
|
||||
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
|
||||
|
||||
Acid() acidzalando.Interface
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) Acid() acidzalando.Interface {
|
||||
return acidzalando.New(f, f.namespace, f.tweakListOptions)
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package externalversions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
|
||||
// sharedInformers based on type
|
||||
type GenericInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() cache.GenericLister
|
||||
}
|
||||
|
||||
type genericInformer struct {
|
||||
informer cache.SharedIndexInformer
|
||||
resource schema.GroupResource
|
||||
}
|
||||
|
||||
// Informer returns the SharedIndexInformer.
|
||||
func (f *genericInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.informer
|
||||
}
|
||||
|
||||
// Lister returns the GenericLister.
|
||||
func (f *genericInformer) Lister() cache.GenericLister {
|
||||
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
|
||||
}
|
||||
|
||||
// ForResource gives generic access to a shared informer of the matching type
|
||||
// TODO extend this to unknown resources with a client pool
|
||||
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
|
||||
switch resource {
|
||||
// Group=acid.zalan.do, Version=v1
|
||||
case v1.SchemeGroupVersion.WithResource("postgresqls"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Acid().V1().Postgresqls().Informer()}, nil
|
||||
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no informer found for %v", resource)
|
||||
}
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package internalinterfaces
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
versioned "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
|
||||
|
||||
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
|
||||
type SharedInformerFactory interface {
|
||||
Start(stopCh <-chan struct{})
|
||||
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
type TweakListOptionsFunc func(*v1.ListOptions)
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
// PostgresqlListerExpansion allows custom methods to be added to
|
||||
// PostgresqlLister.
|
||||
type PostgresqlListerExpansion interface{}
|
||||
|
||||
// PostgresqlNamespaceListerExpansion allows custom methods to be added to
|
||||
// PostgresqlNamespaceLister.
|
||||
type PostgresqlNamespaceListerExpansion interface{}
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
Copyright 2018 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// PostgresqlLister helps list Postgresqls.
|
||||
type PostgresqlLister interface {
|
||||
// List lists all Postgresqls in the indexer.
|
||||
List(selector labels.Selector) (ret []*v1.Postgresql, err error)
|
||||
// Postgresqls returns an object that can list and get Postgresqls.
|
||||
Postgresqls(namespace string) PostgresqlNamespaceLister
|
||||
PostgresqlListerExpansion
|
||||
}
|
||||
|
||||
// postgresqlLister implements the PostgresqlLister interface.
|
||||
type postgresqlLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewPostgresqlLister returns a new PostgresqlLister.
|
||||
func NewPostgresqlLister(indexer cache.Indexer) PostgresqlLister {
|
||||
return &postgresqlLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all Postgresqls in the indexer.
|
||||
func (s *postgresqlLister) List(selector labels.Selector) (ret []*v1.Postgresql, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1.Postgresql))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Postgresqls returns an object that can list and get Postgresqls.
|
||||
func (s *postgresqlLister) Postgresqls(namespace string) PostgresqlNamespaceLister {
|
||||
return postgresqlNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
// PostgresqlNamespaceLister helps list and get Postgresqls.
|
||||
type PostgresqlNamespaceLister interface {
|
||||
// List lists all Postgresqls in the indexer for a given namespace.
|
||||
List(selector labels.Selector) (ret []*v1.Postgresql, err error)
|
||||
// Get retrieves the Postgresql from the indexer for a given namespace and name.
|
||||
Get(name string) (*v1.Postgresql, error)
|
||||
PostgresqlNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// postgresqlNamespaceLister implements the PostgresqlNamespaceLister
|
||||
// interface.
|
||||
type postgresqlNamespaceLister struct {
|
||||
indexer cache.Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
// List lists all Postgresqls in the indexer for a given namespace.
|
||||
func (s postgresqlNamespaceLister) List(selector labels.Selector) (ret []*v1.Postgresql, err error) {
|
||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1.Postgresql))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the Postgresql from the indexer for a given namespace and name.
|
||||
func (s postgresqlNamespaceLister) Get(name string) (*v1.Postgresql, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1.Resource("postgresql"), name)
|
||||
}
|
||||
return obj.(*v1.Postgresql), nil
|
||||
}
|
||||
|
|
@ -1,327 +0,0 @@
|
|||
package spec
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/mohae/deepcopy"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
// MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster.
|
||||
type MaintenanceWindow struct {
|
||||
Everyday bool
|
||||
Weekday time.Weekday
|
||||
StartTime time.Time // Start time
|
||||
EndTime time.Time // End time
|
||||
}
|
||||
|
||||
// Volume describes a single volume in the manifest.
|
||||
type Volume struct {
|
||||
Size string `json:"size"`
|
||||
StorageClass string `json:"storageClass"`
|
||||
}
|
||||
|
||||
// PostgresqlParam describes PostgreSQL version and pairs of configuration parameter name - values.
|
||||
type PostgresqlParam struct {
|
||||
PgVersion string `json:"version"`
|
||||
Parameters map[string]string `json:"parameters"`
|
||||
}
|
||||
|
||||
// ResourceDescription describes CPU and memory resources defined for a cluster.
|
||||
type ResourceDescription struct {
|
||||
CPU string `json:"cpu"`
|
||||
Memory string `json:"memory"`
|
||||
}
|
||||
|
||||
// Resources describes requests and limits for the cluster resouces.
|
||||
type Resources struct {
|
||||
ResourceRequest ResourceDescription `json:"requests,omitempty"`
|
||||
ResourceLimits ResourceDescription `json:"limits,omitempty"`
|
||||
}
|
||||
|
||||
// Patroni contains Patroni-specific configuration
|
||||
type Patroni struct {
|
||||
InitDB map[string]string `json:"initdb"`
|
||||
PgHba []string `json:"pg_hba"`
|
||||
TTL uint32 `json:"ttl"`
|
||||
LoopWait uint32 `json:"loop_wait"`
|
||||
RetryTimeout uint32 `json:"retry_timeout"`
|
||||
MaximumLagOnFailover float32 `json:"maximum_lag_on_failover"` // float32 because https://github.com/kubernetes/kubernetes/issues/30213
|
||||
}
|
||||
|
||||
// CloneDescription describes which cluster the new should clone and up to which point in time
|
||||
type CloneDescription struct {
|
||||
ClusterName string `json:"cluster,omitempty"`
|
||||
Uid string `json:"uid,omitempty"`
|
||||
EndTimestamp string `json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
type UserFlags []string
|
||||
|
||||
// PostgresStatus contains status of the PostgreSQL cluster (running, creation failed etc.)
|
||||
type PostgresStatus string
|
||||
|
||||
// possible values for PostgreSQL cluster statuses
|
||||
const (
|
||||
ClusterStatusUnknown PostgresStatus = ""
|
||||
ClusterStatusCreating PostgresStatus = "Creating"
|
||||
ClusterStatusUpdating PostgresStatus = "Updating"
|
||||
ClusterStatusUpdateFailed PostgresStatus = "UpdateFailed"
|
||||
ClusterStatusSyncFailed PostgresStatus = "SyncFailed"
|
||||
ClusterStatusAddFailed PostgresStatus = "CreateFailed"
|
||||
ClusterStatusRunning PostgresStatus = "Running"
|
||||
ClusterStatusInvalid PostgresStatus = "Invalid"
|
||||
)
|
||||
|
||||
const (
|
||||
serviceNameMaxLength = 63
|
||||
clusterNameMaxLength = serviceNameMaxLength - len("-repl")
|
||||
serviceNameRegexString = `^[a-z]([-a-z0-9]*[a-z0-9])?$`
|
||||
)
|
||||
|
||||
// Postgresql defines PostgreSQL Custom Resource Definition Object.
|
||||
type Postgresql struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec PostgresSpec `json:"spec"`
|
||||
Status PostgresStatus `json:"status,omitempty"`
|
||||
Error error `json:"-"`
|
||||
}
|
||||
|
||||
// PostgresSpec defines the specification for the PostgreSQL TPR.
|
||||
type PostgresSpec struct {
|
||||
PostgresqlParam `json:"postgresql"`
|
||||
Volume `json:"volume,omitempty"`
|
||||
Patroni `json:"patroni,omitempty"`
|
||||
Resources `json:"resources,omitempty"`
|
||||
|
||||
TeamID string `json:"teamId"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
|
||||
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
||||
// in that case the var evaluates to nil and the value is taken from the operator config
|
||||
EnableMasterLoadBalancer *bool `json:"enableMasterLoadBalancer,omitempty"`
|
||||
EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"`
|
||||
|
||||
// deprecated load balancer settings mantained for backward compatibility
|
||||
// see "Load balancers" operator docs
|
||||
UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"`
|
||||
ReplicaLoadBalancer *bool `json:"replicaLoadBalancer,omitempty"`
|
||||
|
||||
// load balancers' source ranges are the same for master and replica services
|
||||
AllowedSourceRanges []string `json:"allowedSourceRanges"`
|
||||
|
||||
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||
Users map[string]UserFlags `json:"users"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||
Clone CloneDescription `json:"clone"`
|
||||
ClusterName string `json:"-"`
|
||||
Databases map[string]string `json:"databases,omitempty"`
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
}
|
||||
|
||||
// PostgresqlList defines a list of PostgreSQL clusters.
|
||||
type PostgresqlList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
|
||||
Items []Postgresql `json:"items"`
|
||||
}
|
||||
|
||||
var (
|
||||
weekdays = map[string]int{"Sun": 0, "Mon": 1, "Tue": 2, "Wed": 3, "Thu": 4, "Fri": 5, "Sat": 6}
|
||||
serviceNameRegex = regexp.MustCompile(serviceNameRegexString)
|
||||
)
|
||||
|
||||
// Clone makes a deepcopy of the Postgresql structure. The Error field is nulled-out,
|
||||
// as there is no guaratee that the actual implementation of the error interface
|
||||
// will not contain any private fields not-reachable to deepcopy. This should be ok,
|
||||
// since Error is never read from a Kubernetes object.
|
||||
func (p *Postgresql) Clone() *Postgresql {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
c := deepcopy.Copy(p).(*Postgresql)
|
||||
c.Error = nil
|
||||
return c
|
||||
}
|
||||
|
||||
func parseTime(s string) (time.Time, error) {
|
||||
parts := strings.Split(s, ":")
|
||||
if len(parts) != 2 {
|
||||
return time.Time{}, fmt.Errorf("incorrect time format")
|
||||
}
|
||||
timeLayout := "15:04"
|
||||
|
||||
tp, err := time.Parse(timeLayout, s)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
return tp.UTC(), nil
|
||||
}
|
||||
|
||||
func parseWeekday(s string) (time.Weekday, error) {
|
||||
weekday, ok := weekdays[s]
|
||||
if !ok {
|
||||
return time.Weekday(0), fmt.Errorf("incorrect weekday")
|
||||
}
|
||||
|
||||
return time.Weekday(weekday), nil
|
||||
}
|
||||
|
||||
// MarshalJSON converts a maintenance window definition to JSON.
|
||||
func (m *MaintenanceWindow) MarshalJSON() ([]byte, error) {
|
||||
if m.Everyday {
|
||||
return []byte(fmt.Sprintf("\"%s-%s\"",
|
||||
m.StartTime.Format("15:04"),
|
||||
m.EndTime.Format("15:04"))), nil
|
||||
}
|
||||
|
||||
return []byte(fmt.Sprintf("\"%s:%s-%s\"",
|
||||
m.Weekday.String()[:3],
|
||||
m.StartTime.Format("15:04"),
|
||||
m.EndTime.Format("15:04"))), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON convets a JSON to the maintenance window definition.
|
||||
func (m *MaintenanceWindow) UnmarshalJSON(data []byte) error {
|
||||
var (
|
||||
got MaintenanceWindow
|
||||
err error
|
||||
)
|
||||
|
||||
parts := strings.Split(string(data[1:len(data)-1]), "-")
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("incorrect maintenance window format")
|
||||
}
|
||||
|
||||
fromParts := strings.Split(parts[0], ":")
|
||||
switch len(fromParts) {
|
||||
case 3:
|
||||
got.Everyday = false
|
||||
got.Weekday, err = parseWeekday(fromParts[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse weekday: %v", err)
|
||||
}
|
||||
|
||||
got.StartTime, err = parseTime(fromParts[1] + ":" + fromParts[2])
|
||||
case 2:
|
||||
got.Everyday = true
|
||||
got.StartTime, err = parseTime(fromParts[0] + ":" + fromParts[1])
|
||||
default:
|
||||
return fmt.Errorf("incorrect maintenance window format")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse start time: %v", err)
|
||||
}
|
||||
|
||||
got.EndTime, err = parseTime(parts[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse end time: %v", err)
|
||||
}
|
||||
|
||||
if got.EndTime.Before(got.StartTime) {
|
||||
return fmt.Errorf("'From' time must be prior to the 'To' time")
|
||||
}
|
||||
|
||||
*m = got
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractClusterName(clusterName string, teamName string) (string, error) {
|
||||
teamNameLen := len(teamName)
|
||||
if len(clusterName) < teamNameLen+2 {
|
||||
return "", fmt.Errorf("name is too short")
|
||||
}
|
||||
|
||||
if teamNameLen == 0 {
|
||||
return "", fmt.Errorf("team name is empty")
|
||||
}
|
||||
|
||||
if strings.ToLower(clusterName[:teamNameLen+1]) != strings.ToLower(teamName)+"-" {
|
||||
return "", fmt.Errorf("name must match {TEAM}-{NAME} format")
|
||||
}
|
||||
if len(clusterName) > clusterNameMaxLength {
|
||||
return "", fmt.Errorf("name cannot be longer than %d characters", clusterNameMaxLength)
|
||||
}
|
||||
if !serviceNameRegex.MatchString(clusterName) {
|
||||
return "", fmt.Errorf("name must confirm to DNS-1035, regex used for validation is %q",
|
||||
serviceNameRegexString)
|
||||
}
|
||||
|
||||
return clusterName[teamNameLen+1:], nil
|
||||
}
|
||||
|
||||
func validateCloneClusterDescription(clone *CloneDescription) error {
|
||||
// when cloning from the basebackup (no end timestamp) check that the cluster name is a valid service name
|
||||
if clone.ClusterName != "" && clone.EndTimestamp == "" {
|
||||
if !serviceNameRegex.MatchString(clone.ClusterName) {
|
||||
return fmt.Errorf("clone cluster name must confirm to DNS-1035, regex used for validation is %q",
|
||||
serviceNameRegexString)
|
||||
}
|
||||
if len(clone.ClusterName) > serviceNameMaxLength {
|
||||
return fmt.Errorf("clone cluster name must be no longer than %d characters", serviceNameMaxLength)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type postgresqlListCopy PostgresqlList
|
||||
type postgresqlCopy Postgresql
|
||||
|
||||
// UnmarshalJSON converts a JSON into the PostgreSQL object.
|
||||
func (p *Postgresql) UnmarshalJSON(data []byte) error {
|
||||
var tmp postgresqlCopy
|
||||
|
||||
err := json.Unmarshal(data, &tmp)
|
||||
if err != nil {
|
||||
metaErr := json.Unmarshal(data, &tmp.ObjectMeta)
|
||||
if metaErr != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp.Error = err
|
||||
tmp.Status = ClusterStatusInvalid
|
||||
|
||||
*p = Postgresql(tmp)
|
||||
|
||||
return nil
|
||||
}
|
||||
tmp2 := Postgresql(tmp)
|
||||
|
||||
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
|
||||
tmp2.Error = err
|
||||
tmp2.Status = ClusterStatusInvalid
|
||||
} else if err := validateCloneClusterDescription(&tmp2.Spec.Clone); err != nil {
|
||||
tmp2.Error = err
|
||||
tmp2.Status = ClusterStatusInvalid
|
||||
} else {
|
||||
tmp2.Spec.ClusterName = clusterName
|
||||
}
|
||||
|
||||
*p = tmp2
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON converts a JSON into the PostgreSQL List object.
|
||||
func (pl *PostgresqlList) UnmarshalJSON(data []byte) error {
|
||||
var tmp postgresqlListCopy
|
||||
|
||||
err := json.Unmarshal(data, &tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmp2 := PostgresqlList(tmp)
|
||||
*pl = tmp2
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -2,6 +2,7 @@ package spec
|
|||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
|
@ -11,32 +12,18 @@ import (
|
|||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||
policyv1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// EventType contains type of the events for the TPRs and Pods received from Kubernetes
|
||||
type EventType string
|
||||
|
||||
// NamespacedName describes the namespace/name pairs used in Kubernetes names.
|
||||
type NamespacedName types.NamespacedName
|
||||
|
||||
// Possible values for the EventType
|
||||
const (
|
||||
EventAdd EventType = "ADD"
|
||||
EventUpdate EventType = "UPDATE"
|
||||
EventDelete EventType = "DELETE"
|
||||
EventSync EventType = "SYNC"
|
||||
|
||||
fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
|
||||
)
|
||||
const fileWithNamespace = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
|
||||
|
||||
// RoleOrigin contains the code of the origin of a role
|
||||
type RoleOrigin int
|
||||
|
||||
// The rolesOrigin constant values should be sorted by the role priority.
|
||||
// The rolesOrigin constant values must be sorted by the role priority for resolveNameConflict(...) to work.
|
||||
const (
|
||||
RoleOriginUnknown RoleOrigin = iota
|
||||
RoleOriginManifest
|
||||
|
|
@ -45,16 +32,6 @@ const (
|
|||
RoleOriginSystem
|
||||
)
|
||||
|
||||
// ClusterEvent carries the payload of the Cluster TPR events.
|
||||
type ClusterEvent struct {
|
||||
EventTime time.Time
|
||||
UID types.UID
|
||||
EventType EventType
|
||||
OldSpec *Postgresql
|
||||
NewSpec *Postgresql
|
||||
WorkerID uint32
|
||||
}
|
||||
|
||||
type syncUserOperation int
|
||||
|
||||
// Possible values for the sync user operation (removal of users is not supported yet)
|
||||
|
|
@ -64,15 +41,6 @@ const (
|
|||
PGSyncAlterSet // handle ALTER ROLE SET parameter = value
|
||||
)
|
||||
|
||||
// PodEvent describes the event for a single Pod
|
||||
type PodEvent struct {
|
||||
ResourceVersion string
|
||||
PodName NamespacedName
|
||||
PrevPod *v1.Pod
|
||||
CurPod *v1.Pod
|
||||
EventType EventType
|
||||
}
|
||||
|
||||
// PgUser contains information about a single user.
|
||||
type PgUser struct {
|
||||
Origin RoleOrigin `yaml:"-"`
|
||||
|
|
@ -107,36 +75,6 @@ type LogEntry struct {
|
|||
Message string
|
||||
}
|
||||
|
||||
// Process describes process of the cluster
|
||||
type Process struct {
|
||||
Name string
|
||||
StartTime time.Time
|
||||
}
|
||||
|
||||
// ClusterStatus describes status of the cluster
|
||||
type ClusterStatus struct {
|
||||
Team string
|
||||
Cluster string
|
||||
MasterService *v1.Service
|
||||
ReplicaService *v1.Service
|
||||
MasterEndpoint *v1.Endpoints
|
||||
ReplicaEndpoint *v1.Endpoints
|
||||
StatefulSet *v1beta1.StatefulSet
|
||||
PodDisruptionBudget *policyv1beta1.PodDisruptionBudget
|
||||
|
||||
CurrentProcess Process
|
||||
Worker uint32
|
||||
Status PostgresStatus
|
||||
Spec PostgresSpec
|
||||
Error error
|
||||
}
|
||||
|
||||
// WorkerStatus describes status of the worker
|
||||
type WorkerStatus struct {
|
||||
CurrentCluster NamespacedName
|
||||
CurrentProcess Process
|
||||
}
|
||||
|
||||
// Diff describes diff
|
||||
type Diff struct {
|
||||
EventTime time.Time
|
||||
|
|
@ -162,10 +100,12 @@ type ControllerConfig struct {
|
|||
RestConfig *rest.Config `json:"-"`
|
||||
InfrastructureRoles map[string]PgUser
|
||||
|
||||
NoDatabaseAccess bool
|
||||
NoTeamsAPI bool
|
||||
ConfigMapName NamespacedName
|
||||
Namespace string
|
||||
NoDatabaseAccess bool
|
||||
NoTeamsAPI bool
|
||||
CRDReadyWaitInterval time.Duration
|
||||
CRDReadyWaitTimeout time.Duration
|
||||
ConfigMapName NamespacedName
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// cached value for the GetOperatorNamespace
|
||||
|
|
@ -185,21 +125,39 @@ func (n *NamespacedName) Decode(value string) error {
|
|||
return n.DecodeWorker(value, GetOperatorNamespace())
|
||||
}
|
||||
|
||||
func (n *NamespacedName) UnmarshalJSON(data []byte) error {
|
||||
result := NamespacedName{}
|
||||
var tmp string
|
||||
if err := json.Unmarshal(data, &tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := result.Decode(tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
*n = result
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeWorker separates the decode logic to (unit) test
|
||||
// from obtaining the operator namespace that depends on k8s mounting files at runtime
|
||||
func (n *NamespacedName) DecodeWorker(value, operatorNamespace string) error {
|
||||
name := types.NewNamespacedNameFromString(value)
|
||||
var (
|
||||
name types.NamespacedName
|
||||
)
|
||||
|
||||
if strings.Trim(value, string(types.Separator)) != "" && name == (types.NamespacedName{}) {
|
||||
name.Name = value
|
||||
name.Namespace = operatorNamespace
|
||||
} else if name.Namespace == "" {
|
||||
name.Namespace = operatorNamespace
|
||||
result := strings.SplitN(value, string(types.Separator), 2)
|
||||
if len(result) < 2 {
|
||||
name.Name = result[0]
|
||||
} else {
|
||||
name.Name = strings.TrimLeft(result[1], string(types.Separator))
|
||||
name.Namespace = result[0]
|
||||
}
|
||||
|
||||
if name.Name == "" {
|
||||
return fmt.Errorf("incorrect namespaced name: %v", value)
|
||||
}
|
||||
if name.Namespace == "" {
|
||||
name.Namespace = operatorNamespace
|
||||
}
|
||||
|
||||
*n = NamespacedName(name)
|
||||
|
||||
|
|
@ -208,6 +166,8 @@ func (n *NamespacedName) DecodeWorker(value, operatorNamespace string) error {
|
|||
|
||||
func (r RoleOrigin) String() string {
|
||||
switch r {
|
||||
case RoleOriginUnknown:
|
||||
return "unknown"
|
||||
case RoleOriginManifest:
|
||||
return "manifest role"
|
||||
case RoleOriginInfrastructure:
|
||||
|
|
@ -216,8 +176,9 @@ func (r RoleOrigin) String() string {
|
|||
return "teams API role"
|
||||
case RoleOriginSystem:
|
||||
return "system role"
|
||||
default:
|
||||
panic(fmt.Sprintf("bogus role origin value %d", r))
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// GetOperatorNamespace assumes serviceaccount secret is mounted by kubernetes
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ import (
|
|||
type CRD struct {
|
||||
ReadyWaitInterval time.Duration `name:"ready_wait_interval" default:"4s"`
|
||||
ReadyWaitTimeout time.Duration `name:"ready_wait_timeout" default:"30s"`
|
||||
ResyncPeriod time.Duration `name:"resync_period" default:"5m"`
|
||||
ResyncPeriod time.Duration `name:"resync_period" default:"30m"`
|
||||
RepairPeriod time.Duration `name:"repair_period" default:"5m"`
|
||||
}
|
||||
|
||||
// Resources describes kubernetes resource specific configuration parameters
|
||||
|
|
@ -24,6 +25,7 @@ type Resources struct {
|
|||
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
||||
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
|
||||
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
||||
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
|
||||
|
|
@ -40,7 +42,7 @@ type Resources struct {
|
|||
|
||||
// Auth describes authentication specific configuration parameters
|
||||
type Auth struct {
|
||||
SecretNameTemplate stringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"`
|
||||
SecretNameTemplate StringTemplate `name:"secret_name_template" default:"{username}.{cluster}.credentials.{tprkind}.{tprgroup}"`
|
||||
PamRoleName string `name:"pam_role_name" default:"zalandos"`
|
||||
PamConfiguration string `name:"pam_configuration" default:"https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees"`
|
||||
TeamsAPIUrl string `name:"teams_api_url" default:"https://teams.example.com/api/"`
|
||||
|
|
@ -68,29 +70,32 @@ type Config struct {
|
|||
Auth
|
||||
Scalyr
|
||||
|
||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use k8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8"`
|
||||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use k8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-cdp-10:1.4-p8"`
|
||||
Sidecars map[string]string `name:"sidecar_docker_images"`
|
||||
// default name `operator` enables backward compatibility with the older ServiceAccountName field
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"operator"`
|
||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
|
||||
DbHostedZone string `name:"db_hosted_zone" default:"db.example.com"`
|
||||
WALES3Bucket string `name:"wal_s3_bucket"`
|
||||
LogS3Bucket string `name:"log_s3_bucket"`
|
||||
KubeIAMRole string `name:"kube_iam_role"`
|
||||
DebugLogging bool `name:"debug_logging" default:"true"`
|
||||
EnableDBAccess bool `name:"enable_database_access" default:"true"`
|
||||
EnableTeamsAPI bool `name:"enable_teams_api" default:"true"`
|
||||
EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"`
|
||||
TeamAdminRole string `name:"team_admin_role" default:"admin"`
|
||||
EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"`
|
||||
EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"`
|
||||
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
|
||||
PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""`
|
||||
DbHostedZone string `name:"db_hosted_zone" default:"db.example.com"`
|
||||
AWSRegion string `name:"aws_region" default:"eu-central-1"`
|
||||
WALES3Bucket string `name:"wal_s3_bucket"`
|
||||
LogS3Bucket string `name:"log_s3_bucket"`
|
||||
KubeIAMRole string `name:"kube_iam_role"`
|
||||
DebugLogging bool `name:"debug_logging" default:"true"`
|
||||
EnableDBAccess bool `name:"enable_database_access" default:"true"`
|
||||
EnableTeamsAPI bool `name:"enable_teams_api" default:"true"`
|
||||
EnableTeamSuperuser bool `name:"enable_team_superuser" default:"false"`
|
||||
TeamAdminRole string `name:"team_admin_role" default:"admin"`
|
||||
EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"`
|
||||
EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"`
|
||||
// deprecated and kept for backward compatibility
|
||||
EnableLoadBalancer *bool `name:"enable_load_balancer"`
|
||||
MasterDNSNameFormat stringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"`
|
||||
ReplicaDNSNameFormat stringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"`
|
||||
PDBNameFormat stringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`
|
||||
MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"`
|
||||
ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"`
|
||||
PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`
|
||||
Workers uint32 `name:"workers" default:"4"`
|
||||
APIPort int `name:"api_port" default:"8080"`
|
||||
RingLogLines int `name:"ring_log_lines" default:"100"`
|
||||
|
|
@ -98,6 +103,7 @@ type Config struct {
|
|||
TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
ProtectedRoles []string `name:"protected_role_names" default:"admin"`
|
||||
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
|
||||
}
|
||||
|
||||
// MustMarshal marshals the config or panics
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ type fieldInfo struct {
|
|||
Field reflect.Value
|
||||
}
|
||||
|
||||
type stringTemplate string
|
||||
type StringTemplate string
|
||||
|
||||
func decoderFrom(field reflect.Value) (d decoder) {
|
||||
// it may be impossible for a struct field to fail this check
|
||||
|
|
@ -172,10 +172,9 @@ func processField(value string, field reflect.Value) error {
|
|||
type parserState int
|
||||
|
||||
const (
|
||||
Plain parserState = iota
|
||||
DoubleQuoted
|
||||
SingleQuoted
|
||||
Escape
|
||||
plain parserState = iota
|
||||
doubleQuoted
|
||||
singleQuoted
|
||||
)
|
||||
|
||||
// Split the pair candidates by commas not located inside open quotes
|
||||
|
|
@ -183,7 +182,7 @@ const (
|
|||
// expect to find them inside the map values for our use cases
|
||||
func getMapPairsFromString(value string) (pairs []string, err error) {
|
||||
pairs = make([]string, 0)
|
||||
state := Plain
|
||||
state := plain
|
||||
var start, quote int
|
||||
|
||||
for i, ch := range strings.Split(value, "") {
|
||||
|
|
@ -191,29 +190,29 @@ func getMapPairsFromString(value string) (pairs []string, err error) {
|
|||
fmt.Printf("Parser warning: ecape character '\\' have no effect on quotes inside the configuration value %s\n", value)
|
||||
}
|
||||
if ch == `"` {
|
||||
if state == Plain {
|
||||
state = DoubleQuoted
|
||||
if state == plain {
|
||||
state = doubleQuoted
|
||||
quote = i
|
||||
} else if state == DoubleQuoted {
|
||||
state = Plain
|
||||
} else if state == doubleQuoted {
|
||||
state = plain
|
||||
quote = 0
|
||||
}
|
||||
}
|
||||
if ch == "'" {
|
||||
if state == Plain {
|
||||
state = SingleQuoted
|
||||
if state == plain {
|
||||
state = singleQuoted
|
||||
quote = i
|
||||
} else if state == SingleQuoted {
|
||||
state = Plain
|
||||
} else if state == singleQuoted {
|
||||
state = plain
|
||||
quote = 0
|
||||
}
|
||||
}
|
||||
if ch == "," && state == Plain {
|
||||
if ch == "," && state == plain {
|
||||
pairs = append(pairs, strings.Trim(value[start:i], " \t"))
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if state != Plain {
|
||||
if state != plain {
|
||||
err = fmt.Errorf("unmatched quote starting at position %d", quote+1)
|
||||
pairs = nil
|
||||
} else {
|
||||
|
|
@ -222,13 +221,13 @@ func getMapPairsFromString(value string) (pairs []string, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (f *stringTemplate) Decode(value string) error {
|
||||
*f = stringTemplate(value)
|
||||
func (f *StringTemplate) Decode(value string) error {
|
||||
*f = StringTemplate(value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *stringTemplate) Format(a ...string) string {
|
||||
func (f *StringTemplate) Format(a ...string) string {
|
||||
res := string(*f)
|
||||
|
||||
for i := 0; i < len(a); i += 2 {
|
||||
|
|
@ -238,6 +237,6 @@ func (f *stringTemplate) Format(a ...string) string {
|
|||
return res
|
||||
}
|
||||
|
||||
func (f stringTemplate) MarshalJSON() ([]byte, error) {
|
||||
func (f StringTemplate) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(string(f))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@ import "time"
|
|||
|
||||
// AWS specific constants used by other modules
|
||||
const (
|
||||
// default region for AWS. TODO: move it to the operator configuration
|
||||
AWSRegion = "eu-central-1"
|
||||
// EBS related constants
|
||||
EBSVolumeIDStart = "/vol-"
|
||||
EBSProvisioner = "kubernetes.io/aws-ebs"
|
||||
|
|
|
|||
|
|
@ -1,10 +0,0 @@
|
|||
package constants
|
||||
|
||||
// Different properties of the PostgreSQL Custom Resource Definition
|
||||
const (
|
||||
CRDKind = "postgresql"
|
||||
CRDResource = "postgresqls"
|
||||
CRDShort = "pg"
|
||||
CRDGroup = "acid.zalan.do"
|
||||
CRDApiVersion = "v1"
|
||||
)
|
||||
|
|
@ -2,24 +2,22 @@ package k8sutil
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
"k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apiextbeta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/typed/apps/v1beta1"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
policybeta1 "k8s.io/client-go/pkg/apis/policy/v1beta1"
|
||||
rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"reflect"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
acidv1client "github.com/zalando-incubator/postgres-operator/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
// KubernetesClient describes getters for Kubernetes objects
|
||||
|
|
@ -35,11 +33,12 @@ type KubernetesClient struct {
|
|||
v1core.NamespacesGetter
|
||||
v1core.ServiceAccountsGetter
|
||||
v1beta1.StatefulSetsGetter
|
||||
rbacv1beta1.RoleBindingsGetter
|
||||
policyv1beta1.PodDisruptionBudgetsGetter
|
||||
apiextbeta1.CustomResourceDefinitionsGetter
|
||||
|
||||
RESTClient rest.Interface
|
||||
CRDREST rest.Interface
|
||||
RESTClient rest.Interface
|
||||
AcidV1ClientSet *acidv1client.Clientset
|
||||
}
|
||||
|
||||
// RestConfig creates REST config
|
||||
|
|
@ -83,20 +82,7 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
|
|||
kubeClient.StatefulSetsGetter = client.AppsV1beta1()
|
||||
kubeClient.PodDisruptionBudgetsGetter = client.PolicyV1beta1()
|
||||
kubeClient.RESTClient = client.CoreV1().RESTClient()
|
||||
|
||||
cfg2 := *cfg
|
||||
cfg2.GroupVersion = &schema.GroupVersion{
|
||||
Group: constants.CRDGroup,
|
||||
Version: constants.CRDApiVersion,
|
||||
}
|
||||
cfg2.APIPath = constants.K8sAPIPath
|
||||
cfg2.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs}
|
||||
|
||||
crd, err := rest.RESTClientFor(&cfg2)
|
||||
if err != nil {
|
||||
return kubeClient, fmt.Errorf("could not get rest client: %v", err)
|
||||
}
|
||||
kubeClient.CRDREST = crd
|
||||
kubeClient.RoleBindingsGetter = client.RbacV1beta1()
|
||||
|
||||
apiextClient, err := apiextclient.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
|
|
@ -104,6 +90,7 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
|
|||
}
|
||||
|
||||
kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1beta1()
|
||||
kubeClient.AcidV1ClientSet = acidv1client.NewForConfigOrDie(cfg)
|
||||
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -47,7 +47,7 @@ func apiURL(masterPod *v1.Pod) string {
|
|||
return fmt.Sprintf("http://%s:%d", masterPod.Status.PodIP, apiPort)
|
||||
}
|
||||
|
||||
func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer) error {
|
||||
func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer) (err error) {
|
||||
request, err := http.NewRequest(method, url, body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create request: %v", err)
|
||||
|
|
@ -59,7 +59,16 @@ func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer)
|
|||
if err != nil {
|
||||
return fmt.Errorf("could not make request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
defer func() {
|
||||
if err2 := resp.Body.Close(); err2 != nil {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not close request: %v, prior error: %v", err2, err)
|
||||
} else {
|
||||
err = fmt.Errorf("could not close request: %v", err2)
|
||||
}
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
bodyBytes, err := ioutil.ReadAll(resp.Body)
|
||||
|
|
@ -80,12 +89,11 @@ func (p *Patroni) Switchover(master *v1.Pod, candidate string) error {
|
|||
return fmt.Errorf("could not encode json: %v", err)
|
||||
}
|
||||
return p.httpPostOrPatch(http.MethodPost, apiURL(master)+failoverPath, buf)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//TODO: add an option call /patroni to check if it is necessary to restart the server
|
||||
// SetPostgresParameters sets Postgres options via Patroni patch API call.
|
||||
|
||||
//SetPostgresParameters sets Postgres options via Patroni patch API call.
|
||||
func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]string) error {
|
||||
buf := &bytes.Buffer{}
|
||||
err := json.NewEncoder(buf).Encode(map[string]map[string]interface{}{"postgresql": {"parameters": parameters}})
|
||||
|
|
|
|||
|
|
@ -76,17 +76,15 @@ func (t *API) TeamInfo(teamID, token string) (tm *Team, err error) {
|
|||
t.logger.Debugf("request url: %s", url)
|
||||
req, err = http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("Authorization", "Bearer "+token)
|
||||
resp, err = t.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
if resp, err = t.httpClient.Do(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
closeErr := resp.Body.Close()
|
||||
if closeErr != nil {
|
||||
if closeErr := resp.Body.Close(); closeErr != nil {
|
||||
err = fmt.Errorf("error when closing response: %v", closeErr)
|
||||
}
|
||||
}()
|
||||
|
|
@ -95,27 +93,20 @@ func (t *API) TeamInfo(teamID, token string) (tm *Team, err error) {
|
|||
d := json.NewDecoder(resp.Body)
|
||||
err = d.Decode(&raw)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("team API query failed with status code %d and malformed response: %v", resp.StatusCode, err)
|
||||
return
|
||||
return nil, fmt.Errorf("team API query failed with status code %d and malformed response: %v", resp.StatusCode, err)
|
||||
}
|
||||
|
||||
if errMessage, ok := raw["error"]; ok {
|
||||
err = fmt.Errorf("team API query failed with status code %d and message: '%v'", resp.StatusCode, string(errMessage))
|
||||
return
|
||||
return nil, fmt.Errorf("team API query failed with status code %d and message: '%v'", resp.StatusCode, string(errMessage))
|
||||
}
|
||||
err = fmt.Errorf("team API query failed with status code %d", resp.StatusCode)
|
||||
|
||||
return
|
||||
return nil, fmt.Errorf("team API query failed with status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
tm = &Team{}
|
||||
d := json.NewDecoder(resp.Body)
|
||||
err = d.Decode(tm)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("could not parse team API response: %v", err)
|
||||
tm = nil
|
||||
return
|
||||
if err = d.Decode(tm); err != nil {
|
||||
return nil, fmt.Errorf("could not parse team API response: %v", err)
|
||||
}
|
||||
|
||||
return
|
||||
return tm, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,8 +30,9 @@ type DefaultUserSyncStrategy struct {
|
|||
|
||||
// ProduceSyncRequests figures out the types of changes that need to happen with the given users.
|
||||
func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserMap,
|
||||
newUsers spec.PgUserMap) (reqs []spec.PgSyncUserRequest) {
|
||||
newUsers spec.PgUserMap) []spec.PgSyncUserRequest {
|
||||
|
||||
var reqs []spec.PgSyncUserRequest
|
||||
// No existing roles are deleted or stripped of role memebership/flags
|
||||
for name, newUser := range newUsers {
|
||||
dbUser, exists := dbUsers[name]
|
||||
|
|
@ -66,7 +67,7 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM
|
|||
}
|
||||
}
|
||||
|
||||
return
|
||||
return reqs
|
||||
}
|
||||
|
||||
// ExecuteSyncRequests makes actual database changes from the requests passed in its arguments.
|
||||
|
|
@ -102,7 +103,7 @@ func (strategy DefaultUserSyncStrategy) alterPgUserSet(user spec.PgUser, db *sql
|
|||
return
|
||||
}
|
||||
|
||||
func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) (err error) {
|
||||
func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) error {
|
||||
var userFlags []string
|
||||
var userPassword string
|
||||
|
||||
|
|
@ -120,16 +121,14 @@ func (strategy DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.D
|
|||
}
|
||||
query := fmt.Sprintf(createUserSQL, user.Name, strings.Join(userFlags, " "), userPassword)
|
||||
|
||||
_, err = db.Exec(query) // TODO: Try several times
|
||||
if err != nil {
|
||||
err = fmt.Errorf("dB error: %v, query: %s", err, query)
|
||||
return
|
||||
if _, err := db.Exec(query); err != nil { // TODO: Try several times
|
||||
return fmt.Errorf("dB error: %v, query: %s", err, query)
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB) (err error) {
|
||||
func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB) error {
|
||||
var resultStmt []string
|
||||
|
||||
if user.Password != "" || len(user.Flags) > 0 {
|
||||
|
|
@ -140,19 +139,16 @@ func (strategy DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB
|
|||
grantStmt := produceGrantStmt(user)
|
||||
resultStmt = append(resultStmt, grantStmt)
|
||||
}
|
||||
if len(resultStmt) == 0 {
|
||||
return nil
|
||||
|
||||
if len(resultStmt) > 0 {
|
||||
query := fmt.Sprintf(doBlockStmt, strings.Join(resultStmt, ";"))
|
||||
|
||||
if _, err := db.Exec(query); err != nil { // TODO: Try several times
|
||||
return fmt.Errorf("dB error: %v query %s", err, query)
|
||||
}
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(doBlockStmt, strings.Join(resultStmt, ";"))
|
||||
|
||||
_, err = db.Exec(query) // TODO: Try several times
|
||||
if err != nil {
|
||||
err = fmt.Errorf("dB error: %v query %s", err, query)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func produceAlterStmt(user spec.PgUser) string {
|
||||
|
|
@ -205,7 +201,7 @@ func quoteParameterValue(name, val string) string {
|
|||
// containing spaces (but something more complex, like double quotes inside double quotes or spaces
|
||||
// in the schema name would break the parsing code in the operator.)
|
||||
if start == '\'' && end == '\'' {
|
||||
return fmt.Sprintf("%s", val[1:len(val)-1])
|
||||
return val[1 : len(val)-1]
|
||||
}
|
||||
|
||||
return val
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/md5" // #nosec we need it to for PostgreSQL md5 passwords
|
||||
"encoding/hex"
|
||||
"math/rand"
|
||||
"regexp"
|
||||
|
|
@ -48,7 +48,7 @@ func PGUserPassword(user spec.PgUser) string {
|
|||
// Avoid processing already encrypted or empty passwords
|
||||
return user.Password
|
||||
}
|
||||
s := md5.Sum([]byte(user.Password + user.Name))
|
||||
s := md5.Sum([]byte(user.Password + user.Name)) // #nosec, using md5 since PostgreSQL uses it for hashing passwords.
|
||||
return md5prefix + hex.EncodeToString(s[:])
|
||||
}
|
||||
|
||||
|
|
@ -120,6 +120,7 @@ func MapContains(haystack, needle map[string]string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// Coalesce returns the first argument if it is not null, otherwise the second one.
|
||||
func Coalesce(val, defaultVal string) string {
|
||||
if val == "" {
|
||||
return defaultVal
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando-incubator/postgres-operator/pkg/util/retryutil"
|
||||
|
|
@ -16,11 +16,12 @@ import (
|
|||
// EBSVolumeResizer implements volume resizing interface for AWS EBS volumes.
|
||||
type EBSVolumeResizer struct {
|
||||
connection *ec2.EC2
|
||||
AWSRegion string
|
||||
}
|
||||
|
||||
// ConnectToProvider connects to AWS.
|
||||
func (c *EBSVolumeResizer) ConnectToProvider() error {
|
||||
sess, err := session.NewSession(&aws.Config{Region: aws.String(constants.AWSRegion)})
|
||||
sess, err := session.NewSession(&aws.Config{Region: aws.String(c.AWSRegion)})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not establish AWS session: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package volumes
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// VolumeResizer defines the set of methods used to implememnt provider-specific resizing of persistent volumes.
|
||||
|
|
|
|||
Loading…
Reference in New Issue