allow PodEnvironmentConfigMap in other namespaces

This commit is contained in:
Felix Kunde 2020-03-18 14:11:11 +01:00
parent 9ddee8f302
commit a0e88ae354
9 changed files with 125 additions and 118 deletions

View File

@ -71,7 +71,7 @@ configKubernetes:
enable_pod_disruption_budget: true enable_pod_disruption_budget: true
# enables sidecar containers to run alongside Spilo in the same pod # enables sidecar containers to run alongside Spilo in the same pod
enable_sidecars: true enable_sidecars: true
# name of the secret containing infrastructure roles names and passwords # namespaced name of the secret containing infrastructure roles names and passwords
# infrastructure_roles_secret_name: postgresql-infrastructure-roles # infrastructure_roles_secret_name: postgresql-infrastructure-roles
# list of labels that can be inherited from the cluster manifest # list of labels that can be inherited from the cluster manifest
@ -86,15 +86,15 @@ configKubernetes:
# node_readiness_label: # node_readiness_label:
# status: ready # status: ready
# name of the secret containing the OAuth2 token to pass to the teams API # namespaced name of the secret containing the OAuth2 token to pass to the teams API
# oauth_token_secret_name: postgresql-operator # oauth_token_secret_name: postgresql-operator
# defines the template for PDB (Pod Disruption Budget) names # defines the template for PDB (Pod Disruption Budget) names
pdb_name_format: "postgres-{cluster}-pdb" pdb_name_format: "postgres-{cluster}-pdb"
# override topology key for pod anti affinity # override topology key for pod anti affinity
pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# name of the ConfigMap with environment variables to populate on every pod # namespaced name of the ConfigMap with environment variables to populate on every pod
# pod_environment_configmap: "" # pod_environment_configmap: "default/my-custom-config"
# specify the pod management policy of stateful sets of Postgres clusters # specify the pod management policy of stateful sets of Postgres clusters
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"

View File

@ -67,7 +67,7 @@ configKubernetes:
enable_pod_disruption_budget: "true" enable_pod_disruption_budget: "true"
# enables sidecar containers to run alongside Spilo in the same pod # enables sidecar containers to run alongside Spilo in the same pod
enable_sidecars: "true" enable_sidecars: "true"
# name of the secret containing infrastructure roles names and passwords # namespaced name of the secret containing infrastructure roles names and passwords
# infrastructure_roles_secret_name: postgresql-infrastructure-roles # infrastructure_roles_secret_name: postgresql-infrastructure-roles
# list of labels that can be inherited from the cluster manifest # list of labels that can be inherited from the cluster manifest
@ -79,15 +79,15 @@ configKubernetes:
# set of labels that a running and active node should possess to be considered ready # set of labels that a running and active node should possess to be considered ready
# node_readiness_label: "" # node_readiness_label: ""
# name of the secret containing the OAuth2 token to pass to the teams API # namespaced name of the secret containing the OAuth2 token to pass to the teams API
# oauth_token_secret_name: postgresql-operator # oauth_token_secret_name: postgresql-operator
# defines the template for PDB (Pod Disruption Budget) names # defines the template for PDB (Pod Disruption Budget) names
pdb_name_format: "postgres-{cluster}-pdb" pdb_name_format: "postgres-{cluster}-pdb"
# override topology key for pod anti affinity # override topology key for pod anti affinity
pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# name of the ConfigMap with environment variables to populate on every pod # namespaced name of the ConfigMap with environment variables to populate on every pod
# pod_environment_configmap: "" # pod_environment_configmap: "default/my-custom-config"
# specify the pod management policy of stateful sets of Postgres clusters # specify the pod management policy of stateful sets of Postgres clusters
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"

View File

@ -336,7 +336,7 @@ metadata:
name: postgres-operator name: postgres-operator
data: data:
# referencing config map with custom settings # referencing config map with custom settings
pod_environment_configmap: postgres-pod-config pod_environment_configmap: default/postgres-pod-config
``` ```
**OperatorConfiguration** **OperatorConfiguration**
@ -349,7 +349,7 @@ metadata:
configuration: configuration:
kubernetes: kubernetes:
# referencing config map with custom settings # referencing config map with custom settings
pod_environment_configmap: postgres-pod-config pod_environment_configmap: default/postgres-pod-config
``` ```
**referenced ConfigMap `postgres-pod-config`** **referenced ConfigMap `postgres-pod-config`**

View File

@ -221,11 +221,12 @@ configuration they are grouped under the `kubernetes` key.
to the Postgres clusters after creation. to the Postgres clusters after creation.
* **oauth_token_secret_name** * **oauth_token_secret_name**
a name of the secret containing the `OAuth2` token to pass to the teams API. namespaced name of the secret containing the `OAuth2` token to pass to the
The default is `postgresql-operator`. teams API. The default is `postgresql-operator`.
* **infrastructure_roles_secret_name** * **infrastructure_roles_secret_name**
name of the secret containing infrastructure roles names and passwords. namespaced name of the secret containing infrastructure roles names and
passwords.
* **pod_role_label** * **pod_role_label**
name of the label assigned to the Postgres pods (and services/endpoints) by name of the label assigned to the Postgres pods (and services/endpoints) by
@ -262,11 +263,11 @@ configuration they are grouped under the `kubernetes` key.
for details on taints and tolerations. The default is empty. for details on taints and tolerations. The default is empty.
* **pod_environment_configmap** * **pod_environment_configmap**
a name of the ConfigMap with environment variables to populate on every pod. namespaced name of the ConfigMap with environment variables to populate on
Right now this ConfigMap is searched in the namespace of the Postgres cluster. every pod. Right now this ConfigMap is searched in the namespace of the
All variables from that ConfigMap are injected to the pod's environment, on Postgres cluster. All variables from that ConfigMap are injected to the pod's
conflicts they are overridden by the environment variables generated by the environment, on conflicts they are overridden by the environment variables
operator. The default is empty. generated by the operator. The default is empty.
* **pod_priority_class_name** * **pod_priority_class_name**
a name of the [priority class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass) a name of the [priority class](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass)

View File

@ -59,7 +59,7 @@ data:
pdb_name_format: "postgres-{cluster}-pdb" pdb_name_format: "postgres-{cluster}-pdb"
# pod_antiaffinity_topology_key: "kubernetes.io/hostname" # pod_antiaffinity_topology_key: "kubernetes.io/hostname"
pod_deletion_wait_timeout: 10m pod_deletion_wait_timeout: 10m
# pod_environment_configmap: "" # pod_environment_configmap: "default/my-custom-config"
pod_label_wait_timeout: 10m pod_label_wait_timeout: 10m
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
pod_role_label: spilo-role pod_role_label: spilo-role

View File

@ -40,7 +40,7 @@ configuration:
oauth_token_secret_name: postgresql-operator oauth_token_secret_name: postgresql-operator
pdb_name_format: "postgres-{cluster}-pdb" pdb_name_format: "postgres-{cluster}-pdb"
pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# pod_environment_configmap: "" # pod_environment_configmap: "default/my-custom-config"
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# pod_priority_class_name: "" # pod_priority_class_name: ""
pod_role_label: spilo-role pod_role_label: spilo-role

View File

@ -63,14 +63,13 @@ type KubernetesMetaConfiguration struct {
NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"` NodeReadinessLabel map[string]string `json:"node_readiness_label,omitempty"`
CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"` CustomPodAnnotations map[string]string `json:"custom_pod_annotations,omitempty"`
// TODO: use a proper toleration structure? // TODO: use a proper toleration structure?
PodToleration map[string]string `json:"toleration,omitempty"` PodToleration map[string]string `json:"toleration,omitempty"`
// TODO: use namespacedname PodEnvironmentConfigMap spec.NamespacedName `json:"pod_environment_configmap,omitempty"`
PodEnvironmentConfigMap string `json:"pod_environment_configmap,omitempty"` PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` PodManagementPolicy string `json:"pod_management_policy,omitempty"`
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
} }
// PostgresPodResourcesDefaults defines the spec of default resources // PostgresPodResourcesDefaults defines the spec of default resources

View File

@ -21,6 +21,7 @@ import (
"github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
batchv1 "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1" batchv1beta1 "k8s.io/api/batch/v1beta1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -123,12 +124,12 @@ func generateResourceRequirements(resources acidv1.Resources, defaultResources a
return &result, nil return &result, nil
} }
func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) { func fillResourceList(pgSpec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) {
var err error var err error
requests := v1.ResourceList{} requests := v1.ResourceList{}
if spec.CPU != "" { if pgSpec.CPU != "" {
requests[v1.ResourceCPU], err = resource.ParseQuantity(spec.CPU) requests[v1.ResourceCPU], err = resource.ParseQuantity(pgSpec.CPU)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse CPU quantity: %v", err) return nil, fmt.Errorf("could not parse CPU quantity: %v", err)
} }
@ -138,8 +139,8 @@ func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceD
return nil, fmt.Errorf("could not parse default CPU quantity: %v", err) return nil, fmt.Errorf("could not parse default CPU quantity: %v", err)
} }
} }
if spec.Memory != "" { if pgSpec.Memory != "" {
requests[v1.ResourceMemory], err = resource.ParseQuantity(spec.Memory) requests[v1.ResourceMemory], err = resource.ParseQuantity(pgSpec.Memory)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse memory quantity: %v", err) return nil, fmt.Errorf("could not parse memory quantity: %v", err)
} }
@ -766,7 +767,7 @@ func (c *Cluster) getNewPgVersion(container v1.Container, newPgVersion string) (
return newPgVersion, nil return newPgVersion, nil
} }
func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) { func (c *Cluster) generateStatefulSet(pgSpec *acidv1.PostgresSpec) (*appsv1.StatefulSet, error) {
var ( var (
err error err error
@ -782,12 +783,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
// controller adjusts the default memory request at operator startup // controller adjusts the default memory request at operator startup
request := spec.Resources.ResourceRequests.Memory request := pgSpec.Resources.ResourceRequests.Memory
if request == "" { if request == "" {
request = c.OpConfig.DefaultMemoryRequest request = c.OpConfig.DefaultMemoryRequest
} }
limit := spec.Resources.ResourceLimits.Memory limit := pgSpec.Resources.ResourceLimits.Memory
if limit == "" { if limit == "" {
limit = c.OpConfig.DefaultMemoryLimit limit = c.OpConfig.DefaultMemoryLimit
} }
@ -798,7 +799,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
if isSmaller { if isSmaller {
c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit) c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit)
spec.Resources.ResourceRequests.Memory = limit pgSpec.Resources.ResourceRequests.Memory = limit
} }
@ -806,7 +807,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
// as this sidecar is managed separately // as this sidecar is managed separately
// adjust sidecar containers defined for that particular cluster // adjust sidecar containers defined for that particular cluster
for _, sidecar := range spec.Sidecars { for _, sidecar := range pgSpec.Sidecars {
// TODO #413 // TODO #413
sidecarRequest := sidecar.Resources.ResourceRequests.Memory sidecarRequest := sidecar.Resources.ResourceRequests.Memory
@ -833,25 +834,31 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
defaultResources := c.makeDefaultResources() defaultResources := c.makeDefaultResources()
resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources) resourceRequirements, err := generateResourceRequirements(pgSpec.Resources, defaultResources)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not generate resource requirements: %v", err) return nil, fmt.Errorf("could not generate resource requirements: %v", err)
} }
if spec.InitContainers != nil && len(spec.InitContainers) > 0 { if pgSpec.InitContainers != nil && len(pgSpec.InitContainers) > 0 {
if c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) { if c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail") c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail")
} }
initContainers = spec.InitContainers initContainers = pgSpec.InitContainers
} }
customPodEnvVarsList := make([]v1.EnvVar, 0) customPodEnvVarsList := make([]v1.EnvVar, 0)
if c.OpConfig.PodEnvironmentConfigMap != "" { if c.OpConfig.PodEnvironmentConfigMap != (spec.NamespacedName{}) {
var cm *v1.ConfigMap var cm *v1.ConfigMap
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap, metav1.GetOptions{}) cm, err = c.KubeClient.ConfigMaps(c.OpConfig.PodEnvironmentConfigMap.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err) // if not found, try again using the operator namespace (old behavior)
if k8sutil.ResourceNotFound(err) && c.Namespace != c.OpConfig.PodEnvironmentConfigMap.Namespace {
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(c.OpConfig.PodEnvironmentConfigMap.Name, metav1.GetOptions{})
}
if err != nil {
return nil, fmt.Errorf("could not read PodEnvironmentConfigMap: %v", err)
}
} }
for k, v := range cm.Data { for k, v := range cm.Data {
customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v}) customPodEnvVarsList = append(customPodEnvVarsList, v1.EnvVar{Name: k, Value: v})
@ -859,33 +866,33 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
sort.Slice(customPodEnvVarsList, sort.Slice(customPodEnvVarsList,
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name }) func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })
} }
if spec.StandbyCluster != nil && spec.StandbyCluster.S3WalPath == "" { if pgSpec.StandbyCluster != nil && pgSpec.StandbyCluster.S3WalPath == "" {
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster") return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
} }
// backward compatible check for InitContainers // backward compatible check for InitContainers
if spec.InitContainersOld != nil { if pgSpec.InitContainersOld != nil {
msg := "Manifest parameter init_containers is deprecated." msg := "Manifest parameter init_containers is deprecated."
if spec.InitContainers == nil { if pgSpec.InitContainers == nil {
c.logger.Warningf("%s Consider using initContainers instead.", msg) c.logger.Warningf("%s Consider using initContainers instead.", msg)
spec.InitContainers = spec.InitContainersOld pgSpec.InitContainers = pgSpec.InitContainersOld
} else { } else {
c.logger.Warningf("%s Only value from initContainers is used", msg) c.logger.Warningf("%s Only value from initContainers is used", msg)
} }
} }
// backward compatible check for PodPriorityClassName // backward compatible check for PodPriorityClassName
if spec.PodPriorityClassNameOld != "" { if pgSpec.PodPriorityClassNameOld != "" {
msg := "Manifest parameter pod_priority_class_name is deprecated." msg := "Manifest parameter pod_priority_class_name is deprecated."
if spec.PodPriorityClassName == "" { if pgSpec.PodPriorityClassName == "" {
c.logger.Warningf("%s Consider using podPriorityClassName instead.", msg) c.logger.Warningf("%s Consider using podPriorityClassName instead.", msg)
spec.PodPriorityClassName = spec.PodPriorityClassNameOld pgSpec.PodPriorityClassName = pgSpec.PodPriorityClassNameOld
} else { } else {
c.logger.Warningf("%s Only value from podPriorityClassName is used", msg) c.logger.Warningf("%s Only value from podPriorityClassName is used", msg)
} }
} }
spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger) spiloConfiguration, err := generateSpiloJSONConfiguration(&pgSpec.PostgresqlParam, &pgSpec.Patroni, c.OpConfig.PamRoleName, c.logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err) return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err)
} }
@ -894,24 +901,24 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
spiloEnvVars := c.generateSpiloPodEnvVars( spiloEnvVars := c.generateSpiloPodEnvVars(
c.Postgresql.GetUID(), c.Postgresql.GetUID(),
spiloConfiguration, spiloConfiguration,
&spec.Clone, &pgSpec.Clone,
spec.StandbyCluster, pgSpec.StandbyCluster,
customPodEnvVarsList, customPodEnvVarsList,
) )
// pickup the docker image for the spilo container // pickup the docker image for the spilo container
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage) effectiveDockerImage := util.Coalesce(pgSpec.DockerImage, c.OpConfig.DockerImage)
// determine the FSGroup for the spilo pod // determine the FSGroup for the spilo pod
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
if spec.SpiloFSGroup != nil { if pgSpec.SpiloFSGroup != nil {
effectiveFSGroup = spec.SpiloFSGroup effectiveFSGroup = pgSpec.SpiloFSGroup
} }
volumeMounts := generateVolumeMounts(spec.Volume) volumeMounts := generateVolumeMounts(pgSpec.Volume)
// configure TLS with a custom secret volume // configure TLS with a custom secret volume
if spec.TLS != nil && spec.TLS.SecretName != "" { if pgSpec.TLS != nil && pgSpec.TLS.SecretName != "" {
if effectiveFSGroup == nil { if effectiveFSGroup == nil {
c.logger.Warnf("Setting the default FSGroup to satisfy the TLS configuration") c.logger.Warnf("Setting the default FSGroup to satisfy the TLS configuration")
fsGroup := int64(spiloPostgresGID) fsGroup := int64(spiloPostgresGID)
@ -924,7 +931,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
Name: "tls-secret", Name: "tls-secret",
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{ Secret: &v1.SecretVolumeSource{
SecretName: spec.TLS.SecretName, SecretName: pgSpec.TLS.SecretName,
DefaultMode: &defaultMode, DefaultMode: &defaultMode,
}, },
}, },
@ -938,16 +945,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
}) })
// use the same filenames as Secret resources by default // use the same filenames as Secret resources by default
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt") certFile := ensurePath(pgSpec.TLS.CertificateFile, mountPath, "tls.crt")
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key") privateKeyFile := ensurePath(pgSpec.TLS.PrivateKeyFile, mountPath, "tls.key")
spiloEnvVars = append( spiloEnvVars = append(
spiloEnvVars, spiloEnvVars,
v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile}, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile},
v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile}, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile},
) )
if spec.TLS.CAFile != "" { if pgSpec.TLS.CAFile != "" {
caFile := ensurePath(spec.TLS.CAFile, mountPath, "") caFile := ensurePath(pgSpec.TLS.CAFile, mountPath, "")
spiloEnvVars = append( spiloEnvVars = append(
spiloEnvVars, spiloEnvVars,
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile}, v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
@ -966,7 +973,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
) )
// resolve conflicts between operator-global and per-cluster sidecars // resolve conflicts between operator-global and per-cluster sidecars
sideCars := c.mergeSidecars(spec.Sidecars) sideCars := c.mergeSidecars(pgSpec.Sidecars)
resourceRequirementsScalyrSidecar := makeResources( resourceRequirementsScalyrSidecar := makeResources(
c.OpConfig.ScalyrCPURequest, c.OpConfig.ScalyrCPURequest,
@ -996,10 +1003,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
} }
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) tolerationSpec := tolerations(&pgSpec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) effectivePodPriorityClassName := util.Coalesce(pgSpec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
annotations := c.generatePodAnnotations(spec) annotations := c.generatePodAnnotations(pgSpec)
// generate pod template for the statefulset, based on the spilo container and sidecars // generate pod template for the statefulset, based on the spilo container and sidecars
podTemplate, err = generatePodTemplate( podTemplate, err = generatePodTemplate(
@ -1016,7 +1023,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
c.OpConfig.PodServiceAccountName, c.OpConfig.PodServiceAccountName,
c.OpConfig.KubeIAMRole, c.OpConfig.KubeIAMRole,
effectivePodPriorityClassName, effectivePodPriorityClassName,
mountShmVolumeNeeded(c.OpConfig, spec), mountShmVolumeNeeded(c.OpConfig, pgSpec),
c.OpConfig.EnablePodAntiAffinity, c.OpConfig.EnablePodAntiAffinity,
c.OpConfig.PodAntiAffinityTopologyKey, c.OpConfig.PodAntiAffinityTopologyKey,
c.OpConfig.AdditionalSecretMount, c.OpConfig.AdditionalSecretMount,
@ -1027,12 +1034,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
return nil, fmt.Errorf("could not generate pod template: %v", err) return nil, fmt.Errorf("could not generate pod template: %v", err)
} }
if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size, if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(pgSpec.Volume.Size,
spec.Volume.StorageClass); err != nil { pgSpec.Volume.StorageClass); err != nil {
return nil, fmt.Errorf("could not generate volume claim template: %v", err) return nil, fmt.Errorf("could not generate volume claim template: %v", err)
} }
numberOfInstances := c.getNumberOfInstances(spec) numberOfInstances := c.getNumberOfInstances(pgSpec)
// the operator has domain-specific logic on how to do rolling updates of PG clusters // the operator has domain-specific logic on how to do rolling updates of PG clusters
// so we do not use default rolling updates implemented by stateful sets // so we do not use default rolling updates implemented by stateful sets
@ -1069,13 +1076,13 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
return statefulSet, nil return statefulSet, nil
} }
func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]string { func (c *Cluster) generatePodAnnotations(pgSpec *acidv1.PostgresSpec) map[string]string {
annotations := make(map[string]string) annotations := make(map[string]string)
for k, v := range c.OpConfig.CustomPodAnnotations { for k, v := range c.OpConfig.CustomPodAnnotations {
annotations[k] = v annotations[k] = v
} }
if spec != nil || spec.PodAnnotations != nil { if pgSpec != nil || pgSpec.PodAnnotations != nil {
for k, v := range spec.PodAnnotations { for k, v := range pgSpec.PodAnnotations {
annotations[k] = v annotations[k] = v
} }
} }
@ -1141,13 +1148,13 @@ func (c *Cluster) mergeSidecars(sidecars []acidv1.Sidecar) []acidv1.Sidecar {
return result return result
} }
func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 { func (c *Cluster) getNumberOfInstances(pgSpec *acidv1.PostgresSpec) int32 {
min := c.OpConfig.MinInstances min := c.OpConfig.MinInstances
max := c.OpConfig.MaxInstances max := c.OpConfig.MaxInstances
cur := spec.NumberOfInstances cur := pgSpec.NumberOfInstances
newcur := cur newcur := cur
if spec.StandbyCluster != nil { if pgSpec.StandbyCluster != nil {
if newcur == 1 { if newcur == 1 {
min = newcur min = newcur
max = newcur max = newcur
@ -1302,15 +1309,15 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
return &secret return &secret
} }
func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *acidv1.PostgresSpec) bool { func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, pgSpec *acidv1.PostgresSpec) bool {
switch role { switch role {
case Replica: case Replica:
// if the value is explicitly set in a Postgresql manifest, follow this setting // if the value is explicitly set in a Postgresql manifest, follow this setting
if spec.EnableReplicaLoadBalancer != nil { if pgSpec.EnableReplicaLoadBalancer != nil {
return *spec.EnableReplicaLoadBalancer return *pgSpec.EnableReplicaLoadBalancer
} }
// otherwise, follow the operator configuration // otherwise, follow the operator configuration
@ -1318,8 +1325,8 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac
case Master: case Master:
if spec.EnableMasterLoadBalancer != nil { if pgSpec.EnableMasterLoadBalancer != nil {
return *spec.EnableMasterLoadBalancer return *pgSpec.EnableMasterLoadBalancer
} }
return c.OpConfig.EnableMasterLoadBalancer return c.OpConfig.EnableMasterLoadBalancer
@ -1330,7 +1337,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac
} }
func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service { func (c *Cluster) generateService(role PostgresRole, pgSpec *acidv1.PostgresSpec) *v1.Service {
serviceSpec := v1.ServiceSpec{ serviceSpec := v1.ServiceSpec{
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Type: v1.ServiceTypeClusterIP, Type: v1.ServiceTypeClusterIP,
@ -1340,12 +1347,12 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
serviceSpec.Selector = c.roleLabelsSet(false, role) serviceSpec.Selector = c.roleLabelsSet(false, role)
} }
if c.shouldCreateLoadBalancerForService(role, spec) { if c.shouldCreateLoadBalancerForService(role, pgSpec) {
// spec.AllowedSourceRanges evaluates to the empty slice of zero length // pgSpec.AllowedSourceRanges evaluates to the empty slice of zero length
// when omitted or set to 'null'/empty sequence in the PG manifest // when omitted or set to 'null'/empty sequence in the PG manifest
if len(spec.AllowedSourceRanges) > 0 { if len(pgSpec.AllowedSourceRanges) > 0 {
serviceSpec.LoadBalancerSourceRanges = spec.AllowedSourceRanges serviceSpec.LoadBalancerSourceRanges = pgSpec.AllowedSourceRanges
} else { } else {
// safe default value: lock a load balancer only to the local address unless overridden explicitly // safe default value: lock a load balancer only to the local address unless overridden explicitly
serviceSpec.LoadBalancerSourceRanges = []string{localHost} serviceSpec.LoadBalancerSourceRanges = []string{localHost}
@ -1364,7 +1371,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
Name: c.serviceName(role), Name: c.serviceName(role),
Namespace: c.Namespace, Namespace: c.Namespace,
Labels: c.roleLabelsSet(true, role), Labels: c.roleLabelsSet(true, role),
Annotations: c.generateServiceAnnotations(role, spec), Annotations: c.generateServiceAnnotations(role, pgSpec),
}, },
Spec: serviceSpec, Spec: serviceSpec,
} }
@ -1372,19 +1379,19 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
return service return service
} }
func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string { func (c *Cluster) generateServiceAnnotations(role PostgresRole, pgSpec *acidv1.PostgresSpec) map[string]string {
annotations := make(map[string]string) annotations := make(map[string]string)
for k, v := range c.OpConfig.CustomServiceAnnotations { for k, v := range c.OpConfig.CustomServiceAnnotations {
annotations[k] = v annotations[k] = v
} }
if spec != nil || spec.ServiceAnnotations != nil { if pgSpec != nil || pgSpec.ServiceAnnotations != nil {
for k, v := range spec.ServiceAnnotations { for k, v := range pgSpec.ServiceAnnotations {
annotations[k] = v annotations[k] = v
} }
} }
if c.shouldCreateLoadBalancerForService(role, spec) { if c.shouldCreateLoadBalancerForService(role, pgSpec) {
var dnsName string var dnsName string
if role == Master { if role == Master {
dnsName = c.masterDNSName() dnsName = c.masterDNSName()

View File

@ -21,31 +21,31 @@ type CRD struct {
// Resources describes kubernetes resource specific configuration parameters // Resources describes kubernetes resource specific configuration parameters
type Resources struct { type Resources struct {
ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"` ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"`
ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"` ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"`
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"` PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"` PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
SpiloFSGroup *int64 `name:"spilo_fsgroup"` SpiloFSGroup *int64 `name:"spilo_fsgroup"`
PodPriorityClassName string `name:"pod_priority_class_name"` PodPriorityClassName string `name:"pod_priority_class_name"`
ClusterDomain string `name:"cluster_domain" default:"cluster.local"` ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
SpiloPrivileged bool `name:"spilo_privileged" default:"false"` SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
InheritedLabels []string `name:"inherited_labels" default:""` InheritedLabels []string `name:"inherited_labels" default:""`
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"` ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
PodToleration map[string]string `name:"toleration" default:""` PodToleration map[string]string `name:"toleration" default:""`
DefaultCPURequest string `name:"default_cpu_request" default:"100m"` DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"` DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"`
DefaultCPULimit string `name:"default_cpu_limit" default:"1"` DefaultCPULimit string `name:"default_cpu_limit" default:"1"`
DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"` DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"`
MinCPULimit string `name:"min_cpu_limit" default:"250m"` MinCPULimit string `name:"min_cpu_limit" default:"250m"`
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"` MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
PodEnvironmentConfigMap string `name:"pod_environment_configmap" default:""` PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""` NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
MaxInstances int32 `name:"max_instances" default:"-1"` MaxInstances int32 `name:"max_instances" default:"-1"`
MinInstances int32 `name:"min_instances" default:"-1"` MinInstances int32 `name:"min_instances" default:"-1"`
ShmVolume *bool `name:"enable_shm_volume" default:"true"` ShmVolume *bool `name:"enable_shm_volume" default:"true"`
} }
// Auth describes authentication specific configuration parameters // Auth describes authentication specific configuration parameters