Custom annotations 329 (#657)

* Add ability for custom annotations to database pods
This commit is contained in:
Thomas Runyon 2019-11-11 04:45:35 -05:00 committed by Felix Kunde
parent 33e1d60703
commit 535517cd1b
15 changed files with 160 additions and 13 deletions

View File

@ -53,9 +53,11 @@ configKubernetes:
cluster_domain: cluster.local cluster_domain: cluster.local
# additional labels assigned to the cluster objects # additional labels assigned to the cluster objects
cluster_labels: cluster_labels:
application: spilo application: spilo
# label assigned to Kubernetes objects created by the operator # label assigned to Kubernetes objects created by the operator
cluster_name_label: cluster-name cluster_name_label: cluster-name
# additional annotations to add to every database pod
custom_pod_annotations:
# toggles pod anti affinity on the Postgres pods # toggles pod anti affinity on the Postgres pods
enable_pod_antiaffinity: false enable_pod_antiaffinity: false
# toggles PDB to set to MinAvailabe 0 or 1 # toggles PDB to set to MinAvailabe 0 or 1

View File

@ -54,6 +54,8 @@ configKubernetes:
cluster_labels: application:spilo cluster_labels: application:spilo
# label assigned to Kubernetes objects created by the operator # label assigned to Kubernetes objects created by the operator
cluster_name_label: version cluster_name_label: version
# annotations attached to each database pod
# custom_pod_annotations: keya:valuea
# toggles pod anti affinity on the Postgres pods # toggles pod anti affinity on the Postgres pods
enable_pod_antiaffinity: "false" enable_pod_antiaffinity: "false"
# toggles PDB to set to MinAvailabe 0 or 1 # toggles PDB to set to MinAvailabe 0 or 1
@ -127,8 +129,7 @@ configLoadBalancer:
# DNS zone for cluster DNS name when load balancer is configured for cluster # DNS zone for cluster DNS name when load balancer is configured for cluster
db_hosted_zone: db.example.com db_hosted_zone: db.example.com
# annotations to apply to service when load balancing is enabled # annotations to apply to service when load balancing is enabled
# custom_service_annotations: # custom_service_annotations: "keyx:valuez,keya:valuea"
# "keyx:valuez,keya:valuea"
# toggles service type load balancer pointing to the master pod of the cluster # toggles service type load balancer pointing to the master pod of the cluster
enable_master_load_balancer: "true" enable_master_load_balancer: "true"

View File

@ -118,6 +118,11 @@ These parameters are grouped directly under the `spec` key in the manifest.
then the default priority class is taken. The priority class itself must be then the default priority class is taken. The priority class itself must be
defined in advance. Optional. defined in advance. Optional.
* **podAnnotations**
A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
to each pod created for the database.
* **enableShmVolume** * **enableShmVolume**
Start a database pod without limitations on shm memory. By default docker Start a database pod without limitations on shm memory. By default docker
limit `/dev/shm` to `64M` (see e.g. the [docker limit `/dev/shm` to `64M` (see e.g. the [docker

View File

@ -168,6 +168,11 @@ configuration they are grouped under the `kubernetes` key.
Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) Postgres pods are [terminated forcefully](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods)
after this timeout. The default is `5m`. after this timeout. The default is `5m`.
* **custom_pod_annotations**
This key/value map provides a list of annotations that get attached to each pod
of a database created by the operator. If the annotation key is also provided
by the database definition, the database definition value is used.
* **watched_namespace** * **watched_namespace**
The operator watches for Postgres objects in the given namespace. If not The operator watches for Postgres objects in the given namespace. If not
specified, the value is taken from the operator namespace. A special `*` specified, the value is taken from the operator namespace. A special `*`

View File

@ -25,7 +25,8 @@ spec:
- 127.0.0.1/32 - 127.0.0.1/32
databases: databases:
foo: zalando foo: zalando
# podAnnotations:
# annotation.key: value
# Expert section # Expert section
enableShmVolume: true enableShmVolume: true

View File

@ -11,8 +11,8 @@ data:
cluster_history_entries: "1000" cluster_history_entries: "1000"
cluster_labels: application:spilo cluster_labels: application:spilo
cluster_name_label: version cluster_name_label: version
# custom_service_annotations: # custom_service_annotations: "keyx:valuez,keya:valuea"
# "keyx:valuez,keya:valuea" # custom_pod_annotations: "keya:valuea"
db_hosted_zone: db.example.com db_hosted_zone: db.example.com
debug_logging: "true" debug_logging: "true"
# default_cpu_limit: "3" # default_cpu_limit: "3"
@ -37,7 +37,7 @@ data:
# logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup" # logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup"
# logical_backup_s3_bucket: "my-bucket-url" # logical_backup_s3_bucket: "my-bucket-url"
# logical_backup_schedule: "30 00 * * *" # logical_backup_schedule: "30 00 * * *"
master_dns_name_format: '{cluster}.{team}.staging.{hostedzone}' master_dns_name_format: "{cluster}.{team}.staging.{hostedzone}"
# master_pod_move_timeout: 10m # master_pod_move_timeout: 10m
# max_instances: "-1" # max_instances: "-1"
# min_instances: "-1" # min_instances: "-1"
@ -60,13 +60,13 @@ data:
ready_wait_interval: 3s ready_wait_interval: 3s
ready_wait_timeout: 30s ready_wait_timeout: 30s
repair_period: 5m repair_period: 5m
replica_dns_name_format: '{cluster}-repl.{team}.staging.{hostedzone}' replica_dns_name_format: "{cluster}-repl.{team}.staging.{hostedzone}"
replication_username: standby replication_username: standby
resource_check_interval: 3s resource_check_interval: 3s
resource_check_timeout: 10m resource_check_timeout: 10m
resync_period: 5m resync_period: 5m
ring_log_lines: "100" ring_log_lines: "100"
secret_name_template: '{username}.{cluster}.credentials' secret_name_template: "{username}.{cluster}.credentials"
# sidecar_docker_images: "" # sidecar_docker_images: ""
# set_memory_request_to_limit: "false" # set_memory_request_to_limit: "false"
spilo_privileged: "false" spilo_privileged: "false"

View File

@ -22,6 +22,9 @@ configuration:
cluster_labels: cluster_labels:
application: spilo application: spilo
cluster_name_label: cluster-name cluster_name_label: cluster-name
# custom_pod_annotations:
# keya: valuea
# keyb: valueb
enable_pod_antiaffinity: false enable_pod_antiaffinity: false
enable_pod_disruption_budget: true enable_pod_disruption_budget: true
# infrastructure_roles_secret_name: "" # infrastructure_roles_secret_name: ""

View File

@ -59,6 +59,7 @@ type KubernetesMetaConfiguration struct {
InheritedLabels []string `json:"inherited_labels,omitempty"` InheritedLabels []string `json:"inherited_labels,omitempty"`
ClusterNameLabel string `json:"cluster_name_label,omitempty"` ClusterNameLabel string `json:"cluster_name_label,omitempty"`
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"`
// TODO: use a proper toleration structure? // TODO: use a proper toleration structure?
PodToleration map[string]string `json:"toleration,omitempty"` PodToleration map[string]string `json:"toleration,omitempty"`
// TODO: use namespacedname // TODO: use namespacedname

View File

@ -59,6 +59,7 @@ type PostgresSpec struct {
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
StandbyCluster *StandbyDescription `json:"standby"` StandbyCluster *StandbyDescription `json:"standby"`
PodAnnotations map[string]string `json:"podAnnotations"`
// deprecated json tags // deprecated json tags
InitContainersOld []v1.Container `json:"init_containers,omitempty"` InitContainersOld []v1.Container `json:"init_containers,omitempty"`

View File

@ -437,6 +437,16 @@ var postgresqlList = []struct {
PostgresqlList{}, PostgresqlList{},
errors.New("unexpected end of JSON input")}} errors.New("unexpected end of JSON input")}}
var annotations = []struct {
in []byte
annotations map[string]string
err error
}{{
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "acid-testcluster1"}, "spec": {"podAnnotations": {"foo": "bar"},"teamId": "acid", "clone": {"cluster": "team-batman"}}}`),
annotations: map[string]string{"foo": "bar"},
err: nil},
}
func mustParseTime(s string) metav1.Time { func mustParseTime(s string) metav1.Time {
v, err := time.Parse("15:04", s) v, err := time.Parse("15:04", s)
if err != nil { if err != nil {
@ -482,6 +492,25 @@ func TestWeekdayTime(t *testing.T) {
} }
} }
func TestClusterAnnotations(t *testing.T) {
for _, tt := range annotations {
var cluster Postgresql
err := cluster.UnmarshalJSON(tt.in)
if err != nil {
if tt.err == nil || err.Error() != tt.err.Error() {
t.Errorf("Unable to marshal cluster with annotations: expected %v got %v", tt.err, err)
}
continue
}
for k, v := range cluster.Spec.PodAnnotations {
found, expected := v, tt.annotations[k]
if found != expected {
t.Errorf("Didn't find correct value for key %v in for podAnnotations: Expected %v found %v", k, expected, found)
}
}
}
}
func TestClusterName(t *testing.T) { func TestClusterName(t *testing.T) {
for _, tt := range clusterNames { for _, tt := range clusterNames {
name, err := extractClusterName(tt.in, tt.inTeam) name, err := extractClusterName(tt.in, tt.inTeam)

View File

@ -102,6 +102,13 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
(*out)[key] = val (*out)[key] = val
} }
} }
if in.CustomPodAnnotations != nil {
in, out := &in.CustomPodAnnotations, &out.CustomPodAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.PodToleration != nil { if in.PodToleration != nil {
in, out := &in.PodToleration, &out.PodToleration in, out := &in.PodToleration, &out.PodToleration
*out = make(map[string]string, len(*in)) *out = make(map[string]string, len(*in))
@ -513,6 +520,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = new(StandbyDescription) *out = new(StandbyDescription)
**out = **in **out = **in
} }
if in.PodAnnotations != nil {
in, out := &in.PodAnnotations, &out.PodAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.InitContainersOld != nil { if in.InitContainersOld != nil {
in, out := &in.InitContainersOld, &out.InitContainersOld in, out := &in.InitContainersOld, &out.InitContainersOld
*out = make([]corev1.Container, len(*in)) *out = make([]corev1.Container, len(*in))

View File

@ -11,7 +11,7 @@ import (
"github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/k8sutil"
"github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/teams"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
) )
const ( const (
@ -328,3 +328,57 @@ func TestShouldDeleteSecret(t *testing.T) {
} }
} }
} }
func TestPodAnnotations(t *testing.T) {
testName := "TestPodAnnotations"
tests := []struct {
subTest string
operator map[string]string
database map[string]string
merged map[string]string
}{
{
subTest: "No Annotations",
operator: make(map[string]string),
database: make(map[string]string),
merged: make(map[string]string),
},
{
subTest: "Operator Config Annotations",
operator: map[string]string{"foo": "bar"},
database: make(map[string]string),
merged: map[string]string{"foo": "bar"},
},
{
subTest: "Database Config Annotations",
operator: make(map[string]string),
database: map[string]string{"foo": "bar"},
merged: map[string]string{"foo": "bar"},
},
{
subTest: "Database Config overrides Operator Config Annotations",
operator: map[string]string{"foo": "bar", "global": "foo"},
database: map[string]string{"foo": "baz", "local": "foo"},
merged: map[string]string{"foo": "baz", "global": "foo", "local": "foo"},
},
}
for _, tt := range tests {
cl.OpConfig.CustomPodAnnotations = tt.operator
cl.Postgresql.Spec.PodAnnotations = tt.database
annotations := cl.generatePodAnnotations(&cl.Postgresql.Spec)
for k, v := range annotations {
if observed, expected := v, tt.merged[k]; observed != expected {
t.Errorf("%v expects annotation value %v for key %v, but found %v",
testName+"/"+tt.subTest, expected, observed, k)
}
}
for k, v := range tt.merged {
if observed, expected := annotations[k], v; observed != expected {
t.Errorf("%v expects annotation value %v for key %v, but found %v",
testName+"/"+tt.subTest, expected, observed, k)
}
}
}
}

View File

@ -430,6 +430,7 @@ func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) *
func generatePodTemplate( func generatePodTemplate(
namespace string, namespace string,
labels labels.Set, labels labels.Set,
annotations map[string]string,
spiloContainer *v1.Container, spiloContainer *v1.Container,
initContainers []v1.Container, initContainers []v1.Container,
sidecarContainers []v1.Container, sidecarContainers []v1.Container,
@ -485,13 +486,17 @@ func generatePodTemplate(
template := v1.PodTemplateSpec{ template := v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Labels: labels, Labels: labels,
Namespace: namespace, Namespace: namespace,
Annotations: annotations,
}, },
Spec: podSpec, Spec: podSpec,
} }
if kubeIAMRole != "" { if kubeIAMRole != "" {
template.Annotations = map[string]string{constants.KubeIAmAnnotation: kubeIAMRole} if template.Annotations == nil{
template.Annotations = make(map[string]string)
}
template.Annotations[constants.KubeIAmAnnotation] = kubeIAMRole
} }
return &template, nil return &template, nil
@ -881,10 +886,13 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
effectiveFSGroup = spec.SpiloFSGroup effectiveFSGroup = spec.SpiloFSGroup
} }
annotations := c.generatePodAnnotations(spec)
// generate pod template for the statefulset, based on the spilo container and sidecars // generate pod template for the statefulset, based on the spilo container and sidecars
if podTemplate, err = generatePodTemplate( if podTemplate, err = generatePodTemplate(
c.Namespace, c.Namespace,
c.labelsSet(true), c.labelsSet(true),
annotations,
spiloContainer, spiloContainer,
spec.InitContainers, spec.InitContainers,
sidecarContainers, sidecarContainers,
@ -949,6 +957,24 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
return statefulSet, nil return statefulSet, nil
} }
func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]string {
annotations := make(map[string]string)
for k, v := range c.OpConfig.CustomPodAnnotations {
annotations[k] = v
}
if spec != nil || spec.PodAnnotations != nil {
for k, v := range spec.PodAnnotations {
annotations[k] = v
}
}
if len(annotations) == 0 {
return nil
}
return annotations
}
func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string, func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string,
containerResources *acidv1.Resources, logger *logrus.Entry) *acidv1.Sidecar { containerResources *acidv1.Resources, logger *logrus.Entry) *acidv1.Sidecar {
if APIKey == "" || dockerImage == "" { if APIKey == "" || dockerImage == "" {
@ -1462,10 +1488,13 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
}, },
}} }}
annotations := c.generatePodAnnotations(&c.Spec)
// re-use the method that generates DB pod templates // re-use the method that generates DB pod templates
if podTemplate, err = generatePodTemplate( if podTemplate, err = generatePodTemplate(
c.Namespace, c.Namespace,
labels, labels,
annotations,
logicalBackupContainer, logicalBackupContainer,
[]v1.Container{}, []v1.Container{},
[]v1.Container{}, []v1.Container{},

View File

@ -41,6 +41,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.ReplicationUsername = fromCRD.PostgresUsersConfiguration.ReplicationUsername result.ReplicationUsername = fromCRD.PostgresUsersConfiguration.ReplicationUsername
// kubernetes config // kubernetes config
result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations
result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName result.PodServiceAccountName = fromCRD.Kubernetes.PodServiceAccountName
result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition
result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition result.PodServiceAccountRoleBindingDefinition = fromCRD.Kubernetes.PodServiceAccountRoleBindingDefinition

View File

@ -109,6 +109,7 @@ type Config struct {
EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"` EnableMasterLoadBalancer bool `name:"enable_master_load_balancer" default:"true"`
EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"` EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"`
CustomServiceAnnotations map[string]string `name:"custom_service_annotations"` CustomServiceAnnotations map[string]string `name:"custom_service_annotations"`
CustomPodAnnotations map[string]string `name:"custom_pod_annotations"`
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"` EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`
PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"` PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"`
// deprecated and kept for backward compatibility // deprecated and kept for backward compatibility