put SetMemoryRequestToLimit back to general config and fix docs

This commit is contained in:
Felix Kunde 2019-06-29 02:46:05 +02:00
parent 605e68186a
commit cd132c400b
6 changed files with 24 additions and 21 deletions

View File

@ -28,6 +28,9 @@ configGeneral:
# period between consecutive sync requests
resync_period: 30m
# map of sidecar names to docker images
# can prevent certain cases of memory overcommitment
# set_memory_request_to_limit: false
# sidecar_docker_images
# example: "exampleimage:exampletag"
@ -103,8 +106,6 @@ configPostgresPodResources:
default_memory_limit: 1Gi
# memory request value for the postgres containers
default_memory_request: 100Mi
# can prevent certain cases of memory overcommitment
# set_memory_request_to_limit: false
# timeouts related to some operator actions
configTimeouts:

View File

@ -27,6 +27,9 @@ configGeneral:
repair_period: 5m
# period between consecutive sync requests
resync_period: 30m
# can prevent certain cases of memory overcommitment
# set_memory_request_to_limit: "false"
# map of sidecar names to docker images
# sidecar_docker_images: ""
@ -99,8 +102,6 @@ configPostgresPodResources:
default_memory_limit: 1Gi
# memory request value for the postgres containers
default_memory_request: 100Mi
# can prevent certain cases of memory overcommitment
# set_memory_request_to_limit: "false"
# timeouts related to some operator actions
configTimeouts:

View File

@ -105,6 +105,17 @@ Those are top-level keys, containing both leaf keys and groups.
* **repair_period**
period between consecutive repair requests. The default is `5m`.
* **set_memory_request_to_limit**
Set `memory_request` to `memory_limit` for all Postgres clusters (the default
value is also increased). This prevents certain cases of memory overcommitment
at the cost of overprovisioning memory and potential scheduling problems for
containers with high memory limits due to the lack of memory on Kubernetes
cluster nodes. This affects all containers created by the operator (Postgres,
Scalyr sidecar, and other sidecars); to set resources for the operator's own
container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13).
The default is `false`.
## Postgres users
Parameters describing Postgres users. In a CRD-configuration, they are grouped
@ -288,16 +299,6 @@ CRD-based configuration.
memory limits for the postgres containers, unless overridden by cluster-specific
settings. The default is `1Gi`.
* **set_memory_request_to_limit**
Set `memory_request` to `memory_limit` for all Postgres clusters (the default
value is also increased). This prevents certain cases of memory overcommitment
at the cost of overprovisioning memory and potential scheduling problems for
containers with high memory limits due to the lack of memory on Kubernetes
cluster nodes. This affects all containers created by the operator (Postgres,
Scalyr sidecar, and other sidecars); to set resources for the operator's own
container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13).
The default is `false`.
* **enable_shm_volume**
Instruct operator to start any new database pod without limitations on shm
memory. If this option is enabled, to the target database pod will be mounted

View File

@ -9,6 +9,7 @@ configuration:
min_instances: -1
resync_period: 30m
repair_period: 5m
# set_memory_request_to_limit: false
# sidecar_docker_images:
# example: "exampleimage:exampletag"
workers: 4
@ -45,7 +46,6 @@ configuration:
default_cpu_request: 100m
default_memory_limit: 1Gi
default_memory_request: 100Mi
# set_memory_request_to_limit: false
timeouts:
pod_label_wait_timeout: 10m
pod_deletion_wait_timeout: 10m

View File

@ -72,11 +72,10 @@ type KubernetesMetaConfiguration struct {
// PostgresPodResourcesDefaults defines the spec of default resources
type PostgresPodResourcesDefaults struct {
DefaultCPURequest string `json:"default_cpu_request,omitempty"`
DefaultMemoryRequest string `json:"default_memory_request,omitempty"`
DefaultCPULimit string `json:"default_cpu_limit,omitempty"`
DefaultMemoryLimit string `json:"default_memory_limit,omitempty"`
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
DefaultCPURequest string `json:"default_cpu_request,omitempty"`
DefaultMemoryRequest string `json:"default_memory_request,omitempty"`
DefaultCPULimit string `json:"default_cpu_limit,omitempty"`
DefaultMemoryLimit string `json:"default_memory_limit,omitempty"`
}
// OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait
@ -157,6 +156,7 @@ type OperatorConfigurationData struct {
MaxInstances int32 `json:"max_instances,omitempty"`
ResyncPeriod Duration `json:"resync_period,omitempty"`
RepairPeriod Duration `json:"repair_period,omitempty"`
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`

View File

@ -32,6 +32,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.MaxInstances = fromCRD.MaxInstances
result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod)
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
result.Sidecars = fromCRD.Sidecars
// user config
@ -69,7 +70,6 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest
result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit
result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit
result.SetMemoryRequestToLimit = fromCRD.PostgresPodResources.SetMemoryRequestToLimit
// timeout config
result.ResourceCheckInterval = time.Duration(fromCRD.Timeouts.ResourceCheckInterval)