enable shmVolume setting in OperatorConfiguration (#605)

* enable shmVolume setting in OperatorConfiguration
This commit is contained in:
Felix Kunde 2019-07-05 16:48:37 +02:00 committed by GitHub
parent ff80fc4d0f
commit 36003b8264
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 48 additions and 33 deletions

View File

@ -18,6 +18,7 @@ data:
docker_image: {{ .Values.docker_image }}
debug_logging: "{{ .Values.configDebug.debug_logging }}"
enable_database_access: "{{ .Values.configDebug.enable_database_access }}"
enable_shm_volume: "{{ .Values.enable_shm_volume }}"
repair_period: {{ .Values.repair_period }}
resync_period: {{ .Values.resync_period }}
ring_log_lines: "{{ .Values.configLoggingRestApi.ring_log_lines }}"

View File

@ -9,6 +9,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
configuration:
docker_image: {{ .Values.docker_image }}
enable_shm_volume: {{ .Values.enable_shm_volume }}
repair_period: {{ .Values.repair_period }}
resync_period: {{ .Values.resync_period }}
workers: {{ .Values.workers }}

View File

@ -15,6 +15,7 @@ podLabels: {}
# config shared from ConfigMap and CRD
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7
enable_shm_volume: true
repair_period: 5m
resync_period: 5m
spilo_privileged: false

View File

@ -85,6 +85,14 @@ Those are top-level keys, containing both leaf keys and groups.
Spilo. In case of the name conflict with the definition in the cluster
manifest the cluster-specific one is preferred.
* **enable_shm_volume**
Instruct operator to start any new database pod without limitations on shm
memory. If this option is enabled, to the target database pod will be mounted
a new tmpfs volume to remove shm memory limitation (see e.g. the
[docker issue](https://github.com/docker-library/postgres/issues/416)).
This option is global for an operator object, and can be overwritten by
`enableShmVolume` parameter from Postgres manifest. The default is `true`.
* **workers**
number of working routines the operator spawns to process requests to
create/update/delete/sync clusters concurrently. The default is `4`.
@ -298,14 +306,6 @@ CRD-based configuration.
container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13).
The default is `false`.
* **enable_shm_volume**
Instruct operator to start any new database pod without limitations on shm
memory. If this option is enabled, to the target database pod will be mounted
a new tmpfs volume to remove shm memory limitation (see e.g. the [docker
issue](https://github.com/docker-library/postgres/issues/416)). This option
is global for an operator object, and can be overwritten by `enableShmVolume`
parameter from Postgres manifest. The default is `true`
## Operator timeouts
This set of parameters define various timeouts related to some operator

View File

@ -17,6 +17,7 @@ data:
super_username: postgres
enable_teams_api: "false"
spilo_privileged: "false"
# enable_shm_volume: "true"
# custom_service_annotations:
# "keyx:valuez,keya:valuea"
# set_memory_request_to_limit: "true"

View File

@ -10,6 +10,7 @@ configuration:
max_instances: -1
resync_period: 30m
repair_period: 5m
# enable_shm_volume: true
#sidecar_docker_images:
# example: "exampleimage:exampletag"

View File

@ -155,6 +155,7 @@ type OperatorConfigurationData struct {
MaxInstances int32 `json:"max_instances,omitempty"`
ResyncPeriod Duration `json:"resync_period,omitempty"`
RepairPeriod Duration `json:"repair_period,omitempty"`
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`

View File

@ -8,6 +8,7 @@ import (
"testing"
"time"
"github.com/zalando/postgres-operator/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -215,6 +216,7 @@ var unmarshalCluster = []struct {
"clone" : {
"cluster": "acid-batman"
},
"enableShmVolume": false,
"patroni": {
"initdb": {
"encoding": "UTF8",
@ -266,6 +268,7 @@ var unmarshalCluster = []struct {
StorageClass: "SSD",
SubPath: "subdir",
},
ShmVolume: util.False(),
Patroni: Patroni{
InitDB: map[string]string{
"encoding": "UTF8",
@ -313,7 +316,7 @@ var unmarshalCluster = []struct {
},
Error: "",
},
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
err: nil},
// example with teamId set in input
{

View File

@ -209,6 +209,11 @@ func (in *OperatorConfiguration) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData) {
*out = *in
if in.ShmVolume != nil {
in, out := &in.ShmVolume, &out.ShmVolume
*out = new(bool)
**out = **in
}
if in.Sidecars != nil {
in, out := &in.Sidecars, &out.Sidecars
*out = make(map[string]string, len(*in))

View File

@ -360,8 +360,6 @@ func generateContainer(
volumeMounts []v1.VolumeMount,
privilegedMode bool,
) *v1.Container {
falseBool := false
return &v1.Container{
Name: name,
Image: *dockerImage,
@ -385,7 +383,7 @@ func generateContainer(
Env: envVars,
SecurityContext: &v1.SecurityContext{
Privileged: &privilegedMode,
ReadOnlyRootFilesystem: &falseBool,
ReadOnlyRootFilesystem: util.False(),
},
}
}
@ -421,9 +419,9 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
// Check whether or not we're requested to mount an shm volume,
// taking into account that PostgreSQL manifest has precedence.
func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) bool {
if pgSpec.ShmVolume != nil {
return *pgSpec.ShmVolume
func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) *bool {
if pgSpec.ShmVolume != nil && *pgSpec.ShmVolume {
return pgSpec.ShmVolume
}
return opConfig.ShmVolume
@ -442,7 +440,7 @@ func generatePodTemplate(
podServiceAccountName string,
kubeIAMRole string,
priorityClassName string,
shmVolume bool,
shmVolume *bool,
podAntiAffinity bool,
podAntiAffinityTopologyKey string,
additionalSecretMount string,
@ -467,7 +465,7 @@ func generatePodTemplate(
SecurityContext: &securityContext,
}
if shmVolume {
if shmVolume != nil && *shmVolume {
addShmVolume(&podSpec)
}
@ -1456,7 +1454,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
c.OpConfig.PodServiceAccountName,
c.OpConfig.KubeIAMRole,
"",
false,
util.False(),
false,
"",
"",

View File

@ -8,6 +8,7 @@ import (
"testing"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
@ -17,16 +18,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
)
func True() *bool {
b := true
return &b
}
func False() *bool {
b := false
return &b
}
func toIntStr(val int) *intstr.IntOrString {
b := intstr.FromInt(val)
return &b
@ -118,14 +109,14 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
{
subtest: "new format, load balancer is enabled for replica",
role: Replica,
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: True()},
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.True()},
opConfig: config.Config{},
result: true,
},
{
subtest: "new format, load balancer is disabled for replica",
role: Replica,
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: False()},
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.False()},
opConfig: config.Config{},
result: false,
},
@ -208,7 +199,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
// With PodDisruptionBudget disabled.
{
New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: False()}},
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
@ -231,7 +222,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
// With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled.
{
New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: True()}},
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},

View File

@ -31,6 +31,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.MaxInstances = fromCRD.MaxInstances
result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod)
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
result.ShmVolume = fromCRD.ShmVolume
result.Sidecars = fromCRD.Sidecars
result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername

View File

@ -42,7 +42,7 @@ type Resources struct {
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
MaxInstances int32 `name:"max_instances" default:"-1"`
MinInstances int32 `name:"min_instances" default:"-1"`
ShmVolume bool `name:"enable_shm_volume" default:"true"`
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
}
// Auth describes authentication specific configuration parameters

View File

@ -26,6 +26,17 @@ func init() {
rand.Seed(time.Now().Unix())
}
// helper function to get bool pointers
func True() *bool {
b := true
return &b
}
func False() *bool {
b := false
return &b
}
// RandomPassword generates random alphanumeric password of a given length.
func RandomPassword(n int) string {
b := make([]byte, n)