Merge branch 'master' into update-quickstart

This commit is contained in:
Felix Kunde 2019-04-24 16:57:07 +02:00
commit 99bbbce93a
13 changed files with 111 additions and 28 deletions

View File

@ -8,7 +8,7 @@ branches:
language: go language: go
go: go:
- "1.10.x" - "1.12.x"
before_install: before_install:
- go get github.com/Masterminds/glide - go get github.com/Masterminds/glide

View File

@ -21,10 +21,11 @@ config:
debug_logging: "true" debug_logging: "true"
workers: "4" workers: "4"
docker_image: registry.opensource.zalan.do/acid/spilo-cdp-10:1.5-p35 docker_image: registry.opensource.zalan.do/acid/spilo-cdp-11:1.5-p70
secret_name_template: '{username}.{cluster}.credentials' secret_name_template: '{username}.{cluster}.credentials'
super_username: postgres super_username: postgres
enable_teams_api: "false" enable_teams_api: "false"
spilo_privileged: "false"
# set_memory_request_to_limit: "true" # set_memory_request_to_limit: "true"
# postgres_superuser_teams: "postgres_superusers" # postgres_superuser_teams: "postgres_superusers"
# enable_team_superuser: "false" # enable_team_superuser: "false"
@ -36,6 +37,7 @@ config:
# pam_role_name: zalandos # pam_role_name: zalandos
# pam_configuration: | # pam_configuration: |
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees # https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
# inherited_labels: ""
aws_region: eu-central-1 aws_region: eu-central-1
db_hosted_zone: db.example.com db_hosted_zone: db.example.com
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}'
@ -57,7 +59,9 @@ config:
resource_check_interval: 3s resource_check_interval: 3s
resource_check_timeout: 10m resource_check_timeout: 10m
resync_period: 5m resync_period: 5m
pod_management_policy: "ordered_ready"
enable_pod_antiaffinity: "false"
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
rbac: rbac:
# Specifies whether RBAC resources should be created # Specifies whether RBAC resources should be created
create: true create: true

View File

@ -11,11 +11,11 @@ pipeline:
apt-get update apt-get update
- desc: 'Install required build software' - desc: 'Install required build software'
cmd: | cmd: |
apt-get install -y make git apt-transport-https ca-certificates curl apt-get install -y make git apt-transport-https ca-certificates curl build-essential
- desc: 'Install go' - desc: 'Install go'
cmd: | cmd: |
cd /tmp cd /tmp
wget -q https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz -O go.tar.gz wget -q https://storage.googleapis.com/golang/go1.12.linux-amd64.tar.gz -O go.tar.gz
tar -xf go.tar.gz tar -xf go.tar.gz
mv go /usr/local mv go /usr/local
ln -s /usr/local/go/bin/go /usr/bin/go ln -s /usr/local/go/bin/go /usr/bin/go

View File

@ -10,7 +10,8 @@ After the installation, issue
$ minikube start $ minikube start
``` ```
Note: if you are running on a Mac, you may also use Docker for Mac Kubernetes instead of a docker-machine. Note: if you are running on a Mac, you may also use Docker for Mac Kubernetes
instead of a docker-machine.
Once you have it started successfully, use [the quickstart Once you have it started successfully, use [the quickstart
guide](https://github.com/kubernetes/minikube#quickstart) in order to test your guide](https://github.com/kubernetes/minikube#quickstart) in order to test your
@ -79,7 +80,8 @@ cluster.
## Connect to PostgreSQL ## Connect to PostgreSQL
We can use the generated secret of the `postgres` robot user to connect to our `acid-minimal-cluster` master running in Minikube: We can use the generated secret of the `postgres` robot user to connect to our
`acid-minimal-cluster` master running in Minikube:
```bash ```bash
$ export HOST_PORT=$(minikube service acid-minimal-cluster --url | sed 's,.*/,,') $ export HOST_PORT=$(minikube service acid-minimal-cluster --url | sed 's,.*/,,')
@ -166,8 +168,15 @@ minikube. The following steps will get you the docker image built and deployed.
# Code generation # Code generation
The operator employs k8s-provided code generation to obtain deep copy methods and Kubernetes-like APIs for its custom resource definitons, namely the Postgres CRD and the operator CRD. The usage of the code generation follows conventions from the k8s community. Relevant scripts live in the `hack` directory: the `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`, The operator employs k8s-provided code generation to obtain deep copy methods
the `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI). The `/pkg/generated/` contains the resultant code. To make these scripts work, you may need to `export GOPATH=$(go env GOPATH)` and Kubernetes-like APIs for its custom resource definitons, namely the Postgres
CRD and the operator CRD. The usage of the code generation follows conventions
from the k8s community. Relevant scripts live in the `hack` directory:
* `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`,
* `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI).
The `/pkg/generated/` contains the resultant code. To make these scripts work,
you may need to `export GOPATH=$(go env GOPATH)`
References for code generation are: References for code generation are:
* [Relevant pull request](https://github.com/zalando/postgres-operator/pull/369) * [Relevant pull request](https://github.com/zalando/postgres-operator/pull/369)
@ -176,7 +185,12 @@ See comments there for minor issues that can sometimes broke the generation proc
* [Code Generation for CustomResources](https://blog.openshift.com/kubernetes-deep-dive-code-generation-customresources/) - intro post on the topic * [Code Generation for CustomResources](https://blog.openshift.com/kubernetes-deep-dive-code-generation-customresources/) - intro post on the topic
* Code generation in [Prometheus](https://github.com/coreos/prometheus-operator) and [etcd](https://github.com/coreos/etcd-operator) operators * Code generation in [Prometheus](https://github.com/coreos/prometheus-operator) and [etcd](https://github.com/coreos/etcd-operator) operators
To debug the generated API locally, use the [kubectl proxy](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/) and `kubectl --v=8` log level to display contents of HTTP requests (run the operator itself with `--v=8` to log all REST API requests). To attach a debugger to the operator, use the `-outofcluster` option to run the operator locally on the developer's laptop (and not in a docker container). To debug the generated API locally, use the
[kubectl proxy](https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/)
and `kubectl --v=8` log level to display contents of HTTP requests (run the
operator itself with `--v=8` to log all REST API requests). To attach a debugger
to the operator, use the `-outofcluster` option to run the operator locally on
the developer's laptop (and not in a docker container).
# Debugging the operator # Debugging the operator
@ -201,15 +215,15 @@ defaults to 4)
* /workers/$id/logs - log of the operations performed by a given worker * /workers/$id/logs - log of the operations performed by a given worker
* /clusters/ - list of teams and clusters known to the operator * /clusters/ - list of teams and clusters known to the operator
* /clusters/$team - list of clusters for the given team * /clusters/$team - list of clusters for the given team
* /clusters/$team/$namespace/$clustername - detailed status of the cluster, including the * /clusters/$team/$namespace/$clustername - detailed status of the cluster,
specifications for CRD, master and replica services, endpoints and including the specifications for CRD, master and replica services, endpoints
statefulsets, as well as any errors and the worker that cluster is assigned and statefulsets, as well as any errors and the worker that cluster is
to. assigned to.
* /clusters/$team/$namespace/$clustername/logs/ - logs of all operations performed to the * /clusters/$team/$namespace/$clustername/logs/ - logs of all operations
cluster so far. performed to the cluster so far.
* /clusters/$team/$namespace/$clustername/history/ - history of cluster changes triggered * /clusters/$team/$namespace/$clustername/history/ - history of cluster changes
by the changes of the manifest (shows the somewhat obscure diff and what triggered by the changes of the manifest (shows the somewhat obscure diff and
exactly has triggered the change) what exactly has triggered the change)
The operator also supports pprof endpoints listed at the The operator also supports pprof endpoints listed at the
[pprof package](https://golang.org/pkg/net/http/pprof/), such as: [pprof package](https://golang.org/pkg/net/http/pprof/), such as:
@ -290,10 +304,46 @@ PASS
``` ```
To test the multinamespace setup, you can use To test the multinamespace setup, you can use
``` ```
./run_operator_locally.sh --rebuild-operator ./run_operator_locally.sh --rebuild-operator
``` ```
It will automatically create an `acid-minimal-cluster` in the namespace `test`. Then you can for example check the Patroni logs: It will automatically create an `acid-minimal-cluster` in the namespace `test`.
Then you can for example check the Patroni logs:
``` ```
kubectl logs acid-minimal-cluster-0 kubectl logs acid-minimal-cluster-0
``` ```
## Introduce additional configuration parameters
In the case you want to add functionality to the operator that shall be
controlled via the operator configuration there are a few places that need to
be updated. As explained [here](reference/operator_parameters.md), it's possible
to configure the operator either with a ConfigMap or CRD, but currently we aim
to synchronize parameters everywhere.
Note: If one option is defined in the operator configuration and in the cluster
[manifest](../manifests/complete-postgres-manifest.yaml), the latter takes
precedence.
So, first define the parameters in:
* the [ConfigMap](../manifests/configmap.yaml) manifest
* the CR's [default configuration](../manifests/postgresql-operator-default-configuration.yaml)
* the Helm chart [values](../charts/postgres-operator/values.yaml)
Update the following Go files that obtain the configuration parameter from the
manifest files:
* [operator_configuration_type.go](../pkg/apis/acid.zalan.do/v1/operator_configuration_type.go)
* [operator_config.go](../pkg/controller/operator_config.go)
* [config.go](../pkg/util/config/config.go)
The operator behavior has to be implemented at least in [k8sres.go](../pkg/cluster/k8sres.go).
Please, reflect your changes in tests, for example in:
* [config_test.go](../pkg/util/config/config_test.go)
* [k8sres_test.go](../pkg/cluster/k8sres_test.go)
* [util_test.go](../pkg/apis/acid.zalan.do/v1/util_test.go)
Finally, document the new configuration option(s) for the operator in its
[reference](reference/operator_parameters.md) document and explain the feature
in the [administrator docs](administrator.md).

View File

@ -212,6 +212,9 @@ configuration they are grouped under the `kubernetes` key.
class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass)
that should be assigned to the Postgres pods. The priority class itself must be defined in advance. that should be assigned to the Postgres pods. The priority class itself must be defined in advance.
Default is empty (use the default priority class). Default is empty (use the default priority class).
* **spilo_privileged**
whether the Spilo container should run in privileged mode. Privileged mode is used for AWS volume resizing and not required if you don't need that capability. The default is `false`.
* **master_pod_move_timeout** * **master_pod_move_timeout**
The period of time to wait for the success of migration of master pods from an unschedulable node. The period of time to wait for the success of migration of master pods from an unschedulable node.

View File

@ -43,13 +43,25 @@ $ kubectl get pods -w --show-labels
## Connect to PostgreSQL ## Connect to PostgreSQL
We can use the generated secret of the `postgres` robot user to connect to our `acid-minimal-cluster` master running in Minikube: With a `port-forward` on one of the database pods (e.g. the master) you can
connect to the PostgreSQL database. Use labels to filter for the master pod of
our test cluster.
```bash
# get name of master pod of acid-minimal-cluster
export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,version=acid-minimal-cluster,spilo-role=master)
# set up port forward
kubectl port-forward $PGMASTER 6432:5432
```
Open another CLI and connect to the database. Use the generated secret of the
`postgres` robot user to connect to our `acid-minimal-cluster` master running
in Minikube:
```bash ```bash
$ export PGHOST=db_host
$ export PGPORT=db_port
$ export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d) $ export PGPASSWORD=$(kubectl get secret postgres.acid-minimal-cluster.credentials -o 'jsonpath={.data.password}' | base64 -d)
$ psql -U postgres $ psql -U postgres -p 6432
``` ```
# Defining database roles in the operator # Defining database roles in the operator

View File

@ -10,11 +10,12 @@ data:
debug_logging: "true" debug_logging: "true"
workers: "4" workers: "4"
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p4 docker_image: registry.opensource.zalan.do/acid/spilo-cdp-11:1.5-p70
pod_service_account_name: "zalando-postgres-operator" pod_service_account_name: "zalando-postgres-operator"
secret_name_template: '{username}.{cluster}.credentials' secret_name_template: '{username}.{cluster}.credentials'
super_username: postgres super_username: postgres
enable_teams_api: "false" enable_teams_api: "false"
spilo_privileged: "false"
# custom_service_annotations: # custom_service_annotations:
# "keyx:valuez,keya:valuea" # "keyx:valuez,keya:valuea"
# set_memory_request_to_limit: "true" # set_memory_request_to_limit: "true"
@ -29,6 +30,7 @@ data:
# pam_role_name: zalandos # pam_role_name: zalandos
# pam_configuration: | # pam_configuration: |
# https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees # https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
# inherited_labels: ""
aws_region: eu-central-1 aws_region: eu-central-1
db_hosted_zone: db.example.com db_hosted_zone: db.example.com
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}'

View File

@ -23,6 +23,7 @@ configuration:
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
oauth_token_secret_name: postgresql-operator oauth_token_secret_name: postgresql-operator
pod_role_label: spilo-role pod_role_label: spilo-role
spilo_privileged: false
cluster_labels: cluster_labels:
application: spilo application: spilo
# inherited_labels: # inherited_labels:
@ -34,6 +35,9 @@ configuration:
# toleration: {} # toleration: {}
# infrastructure_roles_secret_name: "" # infrastructure_roles_secret_name: ""
# pod_environment_configmap: "" # pod_environment_configmap: ""
pod_management_policy: "ordered_ready"
enable_pod_antiaffinity: "false"
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
postgres_pod_resources: postgres_pod_resources:
default_cpu_request: 100m default_cpu_request: 100m
default_memory_request: 100Mi default_memory_request: 100Mi

View File

@ -45,6 +45,7 @@ type KubernetesMetaConfiguration struct {
PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"`
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
SpiloPrivileged bool `json:"spilo_privileged,omitemty"`
WatchedNamespace string `json:"watched_namespace,omitempty"` WatchedNamespace string `json:"watched_namespace,omitempty"`
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"` SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
@ -61,8 +62,9 @@ type KubernetesMetaConfiguration struct {
PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"` PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"`
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
MasterPodMoveTimeout time.Duration `json:"master_pod_move_timeout,omitempty"` MasterPodMoveTimeout time.Duration `json:"master_pod_move_timeout,omitempty"`
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity" default:"false"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"` PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
} }
// PostgresPodResourcesDefaults defines the spec of default resources // PostgresPodResourcesDefaults defines the spec of default resources

View File

@ -358,8 +358,8 @@ func generateSpiloContainer(
resourceRequirements *v1.ResourceRequirements, resourceRequirements *v1.ResourceRequirements,
envVars []v1.EnvVar, envVars []v1.EnvVar,
volumeMounts []v1.VolumeMount, volumeMounts []v1.VolumeMount,
privilegedMode bool,
) *v1.Container { ) *v1.Container {
privilegedMode := true
return &v1.Container{ return &v1.Container{
Name: name, Name: name,
Image: *dockerImage, Image: *dockerImage,
@ -797,6 +797,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
resourceRequirements, resourceRequirements,
spiloEnvVars, spiloEnvVars,
volumeMounts, volumeMounts,
c.OpConfig.Resources.SpiloPrivileged,
) )
// resolve conflicts between operator-global and per-cluster sidecars // resolve conflicts between operator-global and per-cluster sidecars

View File

@ -230,6 +230,8 @@ func (c *Controller) initRoleBinding() {
default: default:
c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding) c.PodServiceAccountRoleBinding = obj.(*rbacv1beta1.RoleBinding)
c.PodServiceAccountRoleBinding.Namespace = "" c.PodServiceAccountRoleBinding.Namespace = ""
c.PodServiceAccountRoleBinding.ObjectMeta.Name = c.PodServiceAccount.Name
c.PodServiceAccountRoleBinding.RoleRef.Name = c.PodServiceAccount.Name
c.PodServiceAccountRoleBinding.Subjects[0].Name = c.PodServiceAccount.Name c.PodServiceAccountRoleBinding.Subjects[0].Name = c.PodServiceAccount.Name
c.logger.Info("successfully parsed") c.logger.Info("successfully parsed")

View File

@ -41,6 +41,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition
result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap result.PodEnvironmentConfigMap = fromCRD.Kubernetes.PodEnvironmentConfigMap
result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod) result.PodTerminateGracePeriod = time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod)
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
@ -52,6 +53,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.ClusterNameLabel = fromCRD.Kubernetes.ClusterNameLabel result.ClusterNameLabel = fromCRD.Kubernetes.ClusterNameLabel
result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel result.NodeReadinessLabel = fromCRD.Kubernetes.NodeReadinessLabel
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
result.PodManagementPolicy = fromCRD.Kubernetes.PodManagementPolicy
result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout result.MasterPodMoveTimeout = fromCRD.Kubernetes.MasterPodMoveTimeout
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity

View File

@ -26,6 +26,7 @@ type Resources struct {
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"` PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
PodPriorityClassName string `name:"pod_priority_class_name"` PodPriorityClassName string `name:"pod_priority_class_name"`
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
InheritedLabels []string `name:"inherited_labels" default:""` InheritedLabels []string `name:"inherited_labels" default:""`
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"` ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`