annotation to bypass globally configured instance limits (#1943)
This commit is contained in:
parent
ad320488b0
commit
5e4badd99c
|
|
@ -91,6 +91,8 @@ spec:
|
||||||
etcd_host:
|
etcd_host:
|
||||||
type: string
|
type: string
|
||||||
default: ""
|
default: ""
|
||||||
|
ignore_instance_limits_annotation_key:
|
||||||
|
type: string
|
||||||
kubernetes_use_configmaps:
|
kubernetes_use_configmaps:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
|
||||||
|
|
@ -35,10 +35,15 @@ configGeneral:
|
||||||
enable_spilo_wal_path_compat: false
|
enable_spilo_wal_path_compat: false
|
||||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
|
||||||
# kubernetes_use_configmaps: false
|
|
||||||
# Spilo docker image
|
# Spilo docker image
|
||||||
docker_image: registry.opensource.zalan.do/acid/spilo-14:2.1-p6
|
docker_image: registry.opensource.zalan.do/acid/spilo-14:2.1-p6
|
||||||
|
|
||||||
|
# key name for annotation to ignore globally configured instance limits
|
||||||
|
# ignore_instance_limits_annotation_key: ""
|
||||||
|
|
||||||
|
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||||
|
# kubernetes_use_configmaps: false
|
||||||
|
|
||||||
# min number of instances in Postgres cluster. -1 = no limit
|
# min number of instances in Postgres cluster. -1 = no limit
|
||||||
min_instances: -1
|
min_instances: -1
|
||||||
# max number of instances in Postgres cluster. -1 = no limit
|
# max number of instances in Postgres cluster. -1 = no limit
|
||||||
|
|
|
||||||
|
|
@ -147,6 +147,12 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
When `-1` is specified for `min_instances`, no limits are applied. The default
|
When `-1` is specified for `min_instances`, no limits are applied. The default
|
||||||
is `-1`.
|
is `-1`.
|
||||||
|
|
||||||
|
* **ignore_instance_limits_annotation_key**
|
||||||
|
for some clusters it might be required to scale beyond the limits that can be
|
||||||
|
configured with `min_instances` and `max_instances` options. You can define
|
||||||
|
an annotation key that can be used as a toggle in cluster manifests to ignore
|
||||||
|
globally configured instance limits. The default is empty.
|
||||||
|
|
||||||
* **resync_period**
|
* **resync_period**
|
||||||
period between consecutive sync requests. The default is `30m`.
|
period between consecutive sync requests. The default is `30m`.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -66,6 +66,7 @@ data:
|
||||||
# ignored_annotations: ""
|
# ignored_annotations: ""
|
||||||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||||
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
||||||
|
# ignore_instance_limits_annotation_key: ""
|
||||||
# inherited_annotations: owned-by
|
# inherited_annotations: owned-by
|
||||||
# inherited_labels: application,environment
|
# inherited_labels: application,environment
|
||||||
# kube_iam_role: ""
|
# kube_iam_role: ""
|
||||||
|
|
|
||||||
|
|
@ -89,6 +89,8 @@ spec:
|
||||||
etcd_host:
|
etcd_host:
|
||||||
type: string
|
type: string
|
||||||
default: ""
|
default: ""
|
||||||
|
ignore_instance_limits_annotation_key:
|
||||||
|
type: string
|
||||||
kubernetes_use_configmaps:
|
kubernetes_use_configmaps:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@ configuration:
|
||||||
# enable_shm_volume: true
|
# enable_shm_volume: true
|
||||||
enable_spilo_wal_path_compat: false
|
enable_spilo_wal_path_compat: false
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
|
# ignore_instance_limits_annotation_key: ""
|
||||||
# kubernetes_use_configmaps: false
|
# kubernetes_use_configmaps: false
|
||||||
max_instances: -1
|
max_instances: -1
|
||||||
min_instances: -1
|
min_instances: -1
|
||||||
|
|
|
||||||
|
|
@ -1115,6 +1115,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"etcd_host": {
|
"etcd_host": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
"ignore_instance_limits_annotation_key": {
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
"kubernetes_use_configmaps": {
|
"kubernetes_use_configmaps": {
|
||||||
Type: "boolean",
|
Type: "boolean",
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -236,8 +236,6 @@ type OperatorConfigurationData struct {
|
||||||
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
|
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
|
||||||
DockerImage string `json:"docker_image,omitempty"`
|
DockerImage string `json:"docker_image,omitempty"`
|
||||||
Workers uint32 `json:"workers,omitempty"`
|
Workers uint32 `json:"workers,omitempty"`
|
||||||
MinInstances int32 `json:"min_instances,omitempty"`
|
|
||||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
|
||||||
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
||||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||||
|
|
@ -257,6 +255,10 @@ type OperatorConfigurationData struct {
|
||||||
Scalyr ScalyrConfiguration `json:"scalyr"`
|
Scalyr ScalyrConfiguration `json:"scalyr"`
|
||||||
LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"`
|
LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"`
|
||||||
ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"`
|
ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"`
|
||||||
|
|
||||||
|
MinInstances int32 `json:"min_instances,omitempty"`
|
||||||
|
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||||
|
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
//Duration shortens this frequently used name
|
//Duration shortens this frequently used name
|
||||||
|
|
|
||||||
|
|
@ -1343,6 +1343,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
return nil, fmt.Errorf("could not generate volume claim template: %v", err)
|
return nil, fmt.Errorf("could not generate volume claim template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// global minInstances and maxInstances settings can overwrite manifest
|
||||||
numberOfInstances := c.getNumberOfInstances(spec)
|
numberOfInstances := c.getNumberOfInstances(spec)
|
||||||
|
|
||||||
// the operator has domain-specific logic on how to do rolling updates of PG clusters
|
// the operator has domain-specific logic on how to do rolling updates of PG clusters
|
||||||
|
|
@ -1443,9 +1444,16 @@ func (c *Cluster) generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dock
|
||||||
func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
||||||
min := c.OpConfig.MinInstances
|
min := c.OpConfig.MinInstances
|
||||||
max := c.OpConfig.MaxInstances
|
max := c.OpConfig.MaxInstances
|
||||||
|
instanceLimitAnnotationKey := c.OpConfig.IgnoreInstanceLimitsAnnotationKey
|
||||||
cur := spec.NumberOfInstances
|
cur := spec.NumberOfInstances
|
||||||
newcur := cur
|
newcur := cur
|
||||||
|
|
||||||
|
if instanceLimitAnnotationKey != "" {
|
||||||
|
if value, exists := c.ObjectMeta.Annotations[instanceLimitAnnotationKey]; exists && value == "true" {
|
||||||
|
return cur
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if spec.StandbyCluster != nil {
|
if spec.StandbyCluster != nil {
|
||||||
if newcur == 1 {
|
if newcur == 1 {
|
||||||
min = newcur
|
min = newcur
|
||||||
|
|
|
||||||
|
|
@ -864,6 +864,136 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetNumberOfInstances(t *testing.T) {
|
||||||
|
testName := "TestGetNumberOfInstances"
|
||||||
|
tests := []struct {
|
||||||
|
subTest string
|
||||||
|
config config.Config
|
||||||
|
annotationKey string
|
||||||
|
annotationValue string
|
||||||
|
desired int32
|
||||||
|
provided int32
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "no constraints",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
MinInstances: -1,
|
||||||
|
MaxInstances: -1,
|
||||||
|
IgnoreInstanceLimitsAnnotationKey: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
annotationKey: "",
|
||||||
|
annotationValue: "",
|
||||||
|
desired: 2,
|
||||||
|
provided: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "minInstances defined",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
MinInstances: 2,
|
||||||
|
MaxInstances: -1,
|
||||||
|
IgnoreInstanceLimitsAnnotationKey: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
annotationKey: "",
|
||||||
|
annotationValue: "",
|
||||||
|
desired: 1,
|
||||||
|
provided: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "maxInstances defined",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
MinInstances: -1,
|
||||||
|
MaxInstances: 5,
|
||||||
|
IgnoreInstanceLimitsAnnotationKey: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
annotationKey: "",
|
||||||
|
annotationValue: "",
|
||||||
|
desired: 10,
|
||||||
|
provided: 5,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "ignore minInstances",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
MinInstances: 2,
|
||||||
|
MaxInstances: -1,
|
||||||
|
IgnoreInstanceLimitsAnnotationKey: "ignore-instance-limits",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
annotationKey: "ignore-instance-limits",
|
||||||
|
annotationValue: "true",
|
||||||
|
desired: 1,
|
||||||
|
provided: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "want to ignore minInstances but wrong key",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
MinInstances: 2,
|
||||||
|
MaxInstances: -1,
|
||||||
|
IgnoreInstanceLimitsAnnotationKey: "ignore-instance-limits",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
annotationKey: "ignoring-instance-limits",
|
||||||
|
annotationValue: "true",
|
||||||
|
desired: 1,
|
||||||
|
provided: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "want to ignore minInstances but wrong value",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
MinInstances: 2,
|
||||||
|
MaxInstances: -1,
|
||||||
|
IgnoreInstanceLimitsAnnotationKey: "ignore-instance-limits",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
annotationKey: "ignore-instance-limits",
|
||||||
|
annotationValue: "active",
|
||||||
|
desired: 1,
|
||||||
|
provided: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "annotation set but no constraints to ignore",
|
||||||
|
config: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
MinInstances: -1,
|
||||||
|
MaxInstances: -1,
|
||||||
|
IgnoreInstanceLimitsAnnotationKey: "ignore-instance-limits",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
annotationKey: "ignore-instance-limits",
|
||||||
|
annotationValue: "true",
|
||||||
|
desired: 1,
|
||||||
|
provided: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
var cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: tt.config,
|
||||||
|
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
|
cluster.Spec.NumberOfInstances = tt.desired
|
||||||
|
if tt.annotationKey != "" {
|
||||||
|
cluster.ObjectMeta.Annotations = make(map[string]string)
|
||||||
|
cluster.ObjectMeta.Annotations[tt.annotationKey] = tt.annotationValue
|
||||||
|
}
|
||||||
|
numInstances := cluster.getNumberOfInstances(&cluster.Spec)
|
||||||
|
|
||||||
|
if numInstances != tt.provided {
|
||||||
|
t.Errorf("%s %s: Expected to get %d instances, have %d instead",
|
||||||
|
testName, tt.subTest, tt.provided, numInstances)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestCloneEnv(t *testing.T) {
|
func TestCloneEnv(t *testing.T) {
|
||||||
testName := "TestCloneEnv"
|
testName := "TestCloneEnv"
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
|
|
||||||
|
|
@ -42,6 +42,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||||
result.MinInstances = fromCRD.MinInstances
|
result.MinInstances = fromCRD.MinInstances
|
||||||
result.MaxInstances = fromCRD.MaxInstances
|
result.MaxInstances = fromCRD.MaxInstances
|
||||||
|
result.IgnoreInstanceLimitsAnnotationKey = fromCRD.IgnoreInstanceLimitsAnnotationKey
|
||||||
result.ResyncPeriod = util.CoalesceDuration(time.Duration(fromCRD.ResyncPeriod), "30m")
|
result.ResyncPeriod = util.CoalesceDuration(time.Duration(fromCRD.ResyncPeriod), "30m")
|
||||||
result.RepairPeriod = util.CoalesceDuration(time.Duration(fromCRD.RepairPeriod), "5m")
|
result.RepairPeriod = util.CoalesceDuration(time.Duration(fromCRD.RepairPeriod), "5m")
|
||||||
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
||||||
|
|
|
||||||
|
|
@ -58,9 +58,11 @@ type Resources struct {
|
||||||
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
||||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||||
NodeReadinessLabelMerge string `name:"node_readiness_label_merge" default:"OR"`
|
NodeReadinessLabelMerge string `name:"node_readiness_label_merge" default:"OR"`
|
||||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
|
||||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
|
||||||
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
||||||
|
|
||||||
|
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||||
|
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||||
|
IgnoreInstanceLimitsAnnotationKey string `name:"ignore_instance_limits_annotation_key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type InfrastructureRole struct {
|
type InfrastructureRole struct {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue