Merge branch 'master' into update-docs-v1.2

This commit is contained in:
Felix Kunde 2019-07-05 19:34:31 +02:00
commit 39f0ec3ddc
17 changed files with 90 additions and 39 deletions

View File

@ -18,6 +18,7 @@ data:
docker_image: {{ .Values.docker_image }} docker_image: {{ .Values.docker_image }}
debug_logging: "{{ .Values.configDebug.debug_logging }}" debug_logging: "{{ .Values.configDebug.debug_logging }}"
enable_database_access: "{{ .Values.configDebug.enable_database_access }}" enable_database_access: "{{ .Values.configDebug.enable_database_access }}"
enable_shm_volume: "{{ .Values.enable_shm_volume }}"
repair_period: {{ .Values.repair_period }} repair_period: {{ .Values.repair_period }}
resync_period: {{ .Values.resync_period }} resync_period: {{ .Values.resync_period }}
ring_log_lines: "{{ .Values.configLoggingRestApi.ring_log_lines }}" ring_log_lines: "{{ .Values.configLoggingRestApi.ring_log_lines }}"

View File

@ -9,6 +9,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
configuration: configuration:
docker_image: {{ .Values.docker_image }} docker_image: {{ .Values.docker_image }}
enable_shm_volume: {{ .Values.enable_shm_volume }}
repair_period: {{ .Values.repair_period }} repair_period: {{ .Values.repair_period }}
resync_period: {{ .Values.resync_period }} resync_period: {{ .Values.resync_period }}
workers: {{ .Values.workers }} workers: {{ .Values.workers }}

View File

@ -15,6 +15,7 @@ podLabels: {}
# config shared from ConfigMap and CRD # config shared from ConfigMap and CRD
docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7 docker_image: registry.opensource.zalan.do/acid/spilo-11:1.5-p7
enable_shm_volume: true
repair_period: 5m repair_period: 5m
resync_period: 5m resync_period: 5m
spilo_privileged: false spilo_privileged: false

View File

@ -87,6 +87,14 @@ Those are top-level keys, containing both leaf keys and groups.
Spilo. In case of the name conflict with the definition in the cluster Spilo. In case of the name conflict with the definition in the cluster
manifest the cluster-specific one is preferred. manifest the cluster-specific one is preferred.
* **enable_shm_volume**
Instruct operator to start any new database pod without limitations on shm
memory. If this option is enabled, to the target database pod will be mounted
a new tmpfs volume to remove shm memory limitation (see e.g. the
[docker issue](https://github.com/docker-library/postgres/issues/416)).
This option is global for an operator object, and can be overwritten by
`enableShmVolume` parameter from Postgres manifest. The default is `true`.
* **workers** * **workers**
number of working routines the operator spawns to process requests to number of working routines the operator spawns to process requests to
create/update/delete/sync clusters concurrently. The default is `4`. create/update/delete/sync clusters concurrently. The default is `4`.
@ -300,14 +308,6 @@ CRD-based configuration.
container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13). container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13).
The default is `false`. The default is `false`.
* **enable_shm_volume**
Instruct operator to start any new database pod without limitations on shm
memory. If this option is enabled, to the target database pod will be mounted
a new tmpfs volume to remove shm memory limitation (see e.g. the [docker
issue](https://github.com/docker-library/postgres/issues/416)). This option
is global for an operator object, and can be overwritten by `enableShmVolume`
parameter from Postgres manifest. The default is `true`
## Operator timeouts ## Operator timeouts
This set of parameters define various timeouts related to some operator This set of parameters define various timeouts related to some operator

View File

@ -366,7 +366,7 @@ metadata:
name: acid-minimal-cluster name: acid-minimal-cluster
spec: spec:
... ...
init_containers: initContainers:
- name: "container-name" - name: "container-name"
image: "company/image:tag" image: "company/image:tag"
env: env:
@ -374,7 +374,7 @@ spec:
value: "any-k8s-env-things" value: "any-k8s-env-things"
``` ```
`init_containers` accepts full `v1.Container` definition. `initContainers` accepts full `v1.Container` definition.
## Increase volume size ## Increase volume size

View File

@ -4,7 +4,7 @@ kind: postgresql
metadata: metadata:
name: acid-test-cluster name: acid-test-cluster
spec: spec:
init_containers: initContainers:
- name: date - name: date
image: busybox image: busybox
command: [ "/bin/date" ] command: [ "/bin/date" ]
@ -58,7 +58,7 @@ spec:
loop_wait: &loop_wait 10 loop_wait: &loop_wait 10
retry_timeout: 10 retry_timeout: 10
maximum_lag_on_failover: 33554432 maximum_lag_on_failover: 33554432
# restore a Postgres DB with point-in-time-recovery # restore a Postgres DB with point-in-time-recovery
# with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp # with a non-empty timestamp, clone from an S3 bucket using the latest backup before the timestamp
# with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup # with an empty/absent timestamp, clone from an existing alive cluster using pg_basebackup
# clone: # clone:

View File

@ -17,6 +17,7 @@ data:
super_username: postgres super_username: postgres
enable_teams_api: "false" enable_teams_api: "false"
spilo_privileged: "false" spilo_privileged: "false"
# enable_shm_volume: "true"
# custom_service_annotations: # custom_service_annotations:
# "keyx:valuez,keya:valuea" # "keyx:valuez,keya:valuea"
# set_memory_request_to_limit: "true" # set_memory_request_to_limit: "true"

View File

@ -10,6 +10,7 @@ configuration:
max_instances: -1 max_instances: -1
resync_period: 30m resync_period: 30m
repair_period: 5m repair_period: 5m
# enable_shm_volume: true
#sidecar_docker_images: #sidecar_docker_images:
# example: "exampleimage:exampletag" # example: "exampleimage:exampletag"

View File

@ -155,6 +155,7 @@ type OperatorConfigurationData struct {
MaxInstances int32 `json:"max_instances,omitempty"` MaxInstances int32 `json:"max_instances,omitempty"`
ResyncPeriod Duration `json:"resync_period,omitempty"` ResyncPeriod Duration `json:"resync_period,omitempty"`
RepairPeriod Duration `json:"repair_period,omitempty"` RepairPeriod Duration `json:"repair_period,omitempty"`
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"` Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`

View File

@ -53,12 +53,16 @@ type PostgresSpec struct {
Databases map[string]string `json:"databases,omitempty"` Databases map[string]string `json:"databases,omitempty"`
Tolerations []v1.Toleration `json:"tolerations,omitempty"` Tolerations []v1.Toleration `json:"tolerations,omitempty"`
Sidecars []Sidecar `json:"sidecars,omitempty"` Sidecars []Sidecar `json:"sidecars,omitempty"`
InitContainers []v1.Container `json:"init_containers,omitempty"` InitContainers []v1.Container `json:"initContainers,omitempty"`
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"` PodPriorityClassName string `json:"podPriorityClassName,omitempty"`
ShmVolume *bool `json:"enableShmVolume,omitempty"` ShmVolume *bool `json:"enableShmVolume,omitempty"`
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
StandbyCluster *StandbyDescription `json:"standby"` StandbyCluster *StandbyDescription `json:"standby"`
// deprectaed json tags
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"`
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -8,6 +8,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/zalando/postgres-operator/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
@ -170,6 +171,7 @@ var unmarshalCluster = []struct {
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
err: nil}, err: nil},
// example with detailed input manifest // example with detailed input manifest
// and deprecated pod_priority_class_name -> podPriorityClassName
{ {
in: []byte(`{ in: []byte(`{
"kind": "Postgresql", "kind": "Postgresql",
@ -179,6 +181,7 @@ var unmarshalCluster = []struct {
}, },
"spec": { "spec": {
"teamId": "ACID", "teamId": "ACID",
"pod_priority_class_name": "spilo-pod-priority",
"volume": { "volume": {
"size": "5Gi", "size": "5Gi",
"storageClass": "SSD", "storageClass": "SSD",
@ -215,6 +218,7 @@ var unmarshalCluster = []struct {
"clone" : { "clone" : {
"cluster": "acid-batman" "cluster": "acid-batman"
}, },
"enableShmVolume": false,
"patroni": { "patroni": {
"initdb": { "initdb": {
"encoding": "UTF8", "encoding": "UTF8",
@ -261,11 +265,13 @@ var unmarshalCluster = []struct {
"log_statement": "all", "log_statement": "all",
}, },
}, },
PodPriorityClassNameOld: "spilo-pod-priority",
Volume: Volume{ Volume: Volume{
Size: "5Gi", Size: "5Gi",
StorageClass: "SSD", StorageClass: "SSD",
SubPath: "subdir", SubPath: "subdir",
}, },
ShmVolume: util.False(),
Patroni: Patroni{ Patroni: Patroni{
InitDB: map[string]string{ InitDB: map[string]string{
"encoding": "UTF8", "encoding": "UTF8",
@ -313,7 +319,7 @@ var unmarshalCluster = []struct {
}, },
Error: "", Error: "",
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
err: nil}, err: nil},
// example with teamId set in input // example with teamId set in input
{ {

View File

@ -209,6 +209,11 @@ func (in *OperatorConfiguration) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData) { func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData) {
*out = *in *out = *in
if in.ShmVolume != nil {
in, out := &in.ShmVolume, &out.ShmVolume
*out = new(bool)
**out = **in
}
if in.Sidecars != nil { if in.Sidecars != nil {
in, out := &in.Sidecars, &out.Sidecars in, out := &in.Sidecars, &out.Sidecars
*out = make(map[string]string, len(*in)) *out = make(map[string]string, len(*in))
@ -508,6 +513,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = new(StandbyDescription) *out = new(StandbyDescription)
**out = **in **out = **in
} }
if in.InitContainersOld != nil {
in, out := &in.InitContainersOld, &out.InitContainersOld
*out = make([]corev1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return return
} }

View File

@ -360,8 +360,6 @@ func generateContainer(
volumeMounts []v1.VolumeMount, volumeMounts []v1.VolumeMount,
privilegedMode bool, privilegedMode bool,
) *v1.Container { ) *v1.Container {
falseBool := false
return &v1.Container{ return &v1.Container{
Name: name, Name: name,
Image: *dockerImage, Image: *dockerImage,
@ -385,7 +383,7 @@ func generateContainer(
Env: envVars, Env: envVars,
SecurityContext: &v1.SecurityContext{ SecurityContext: &v1.SecurityContext{
Privileged: &privilegedMode, Privileged: &privilegedMode,
ReadOnlyRootFilesystem: &falseBool, ReadOnlyRootFilesystem: util.False(),
}, },
} }
} }
@ -421,9 +419,9 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
// Check whether or not we're requested to mount an shm volume, // Check whether or not we're requested to mount an shm volume,
// taking into account that PostgreSQL manifest has precedence. // taking into account that PostgreSQL manifest has precedence.
func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) bool { func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) *bool {
if pgSpec.ShmVolume != nil { if pgSpec.ShmVolume != nil && *pgSpec.ShmVolume {
return *pgSpec.ShmVolume return pgSpec.ShmVolume
} }
return opConfig.ShmVolume return opConfig.ShmVolume
@ -442,7 +440,7 @@ func generatePodTemplate(
podServiceAccountName string, podServiceAccountName string,
kubeIAMRole string, kubeIAMRole string,
priorityClassName string, priorityClassName string,
shmVolume bool, shmVolume *bool,
podAntiAffinity bool, podAntiAffinity bool,
podAntiAffinityTopologyKey string, podAntiAffinityTopologyKey string,
additionalSecretMount string, additionalSecretMount string,
@ -467,7 +465,7 @@ func generatePodTemplate(
SecurityContext: &securityContext, SecurityContext: &securityContext,
} }
if shmVolume { if shmVolume != nil && *shmVolume {
addShmVolume(&podSpec) addShmVolume(&podSpec)
} }
@ -801,6 +799,28 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster") return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
} }
// backward compatible check for InitContainers
if spec.InitContainersOld != nil {
msg := "Manifest parameter init_containers is deprecated."
if spec.InitContainers == nil {
c.logger.Warningf("%s Consider using initContainers instead.", msg)
spec.InitContainers = spec.InitContainersOld
} else {
c.logger.Warningf("%s Only value from initContainers is used", msg)
}
}
// backward compatible check for PodPriorityClassName
if spec.PodPriorityClassNameOld != "" {
msg := "Manifest parameter pod_priority_class_name is deprecated."
if spec.PodPriorityClassName == "" {
c.logger.Warningf("%s Consider using podPriorityClassName instead.", msg)
spec.PodPriorityClassName = spec.PodPriorityClassNameOld
} else {
c.logger.Warningf("%s Only value from podPriorityClassName is used", msg)
}
}
spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger) spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err) return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err)
@ -1456,7 +1476,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
c.OpConfig.PodServiceAccountName, c.OpConfig.PodServiceAccountName,
c.OpConfig.KubeIAMRole, c.OpConfig.KubeIAMRole,
"", "",
false, util.False(),
false, false,
"", "",
"", "",

View File

@ -8,6 +8,7 @@ import (
"testing" "testing"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/k8sutil"
@ -17,16 +18,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
) )
func True() *bool {
b := true
return &b
}
func False() *bool {
b := false
return &b
}
func toIntStr(val int) *intstr.IntOrString { func toIntStr(val int) *intstr.IntOrString {
b := intstr.FromInt(val) b := intstr.FromInt(val)
return &b return &b
@ -118,14 +109,14 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
{ {
subtest: "new format, load balancer is enabled for replica", subtest: "new format, load balancer is enabled for replica",
role: Replica, role: Replica,
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: True()}, spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.True()},
opConfig: config.Config{}, opConfig: config.Config{},
result: true, result: true,
}, },
{ {
subtest: "new format, load balancer is disabled for replica", subtest: "new format, load balancer is disabled for replica",
role: Replica, role: Replica,
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: False()}, spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.False()},
opConfig: config.Config{}, opConfig: config.Config{},
result: false, result: false,
}, },
@ -208,7 +199,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
// With PodDisruptionBudget disabled. // With PodDisruptionBudget disabled.
{ {
New( New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: False()}}, Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}},
k8sutil.KubernetesClient{}, k8sutil.KubernetesClient{},
acidv1.Postgresql{ acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
@ -231,7 +222,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
// With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled. // With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled.
{ {
New( New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: True()}}, Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}},
k8sutil.KubernetesClient{}, k8sutil.KubernetesClient{},
acidv1.Postgresql{ acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},

View File

@ -31,6 +31,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.MaxInstances = fromCRD.MaxInstances result.MaxInstances = fromCRD.MaxInstances
result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod) result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod)
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod) result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
result.ShmVolume = fromCRD.ShmVolume
result.Sidecars = fromCRD.Sidecars result.Sidecars = fromCRD.Sidecars
result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername result.SuperUsername = fromCRD.PostgresUsersConfiguration.SuperUsername

View File

@ -42,7 +42,7 @@ type Resources struct {
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""` NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
MaxInstances int32 `name:"max_instances" default:"-1"` MaxInstances int32 `name:"max_instances" default:"-1"`
MinInstances int32 `name:"min_instances" default:"-1"` MinInstances int32 `name:"min_instances" default:"-1"`
ShmVolume bool `name:"enable_shm_volume" default:"true"` ShmVolume *bool `name:"enable_shm_volume" default:"true"`
} }
// Auth describes authentication specific configuration parameters // Auth describes authentication specific configuration parameters

View File

@ -26,6 +26,17 @@ func init() {
rand.Seed(time.Now().Unix()) rand.Seed(time.Now().Unix())
} }
// helper function to get bool pointers
func True() *bool {
b := true
return &b
}
func False() *bool {
b := false
return &b
}
// RandomPassword generates random alphanumeric password of a given length. // RandomPassword generates random alphanumeric password of a given length.
func RandomPassword(n int) string { func RandomPassword(n int) string {
b := make([]byte, n) b := make([]byte, n)