merged with master
This commit is contained in:
commit
b9bc1786a8
|
|
@ -17,6 +17,8 @@ configTarget: "OperatorConfigurationCRD"
|
||||||
|
|
||||||
# general top-level configuration parameters
|
# general top-level configuration parameters
|
||||||
configGeneral:
|
configGeneral:
|
||||||
|
# start any new database pod without limitations on shm memory
|
||||||
|
enable_shm_volume: true
|
||||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
# Spilo docker image
|
# Spilo docker image
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,8 @@ configTarget: "ConfigMap"
|
||||||
|
|
||||||
# general configuration parameters
|
# general configuration parameters
|
||||||
configGeneral:
|
configGeneral:
|
||||||
|
# start any new database pod without limitations on shm memory
|
||||||
|
enable_shm_volume: "true"
|
||||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||||
etcd_host: ""
|
etcd_host: ""
|
||||||
# Spilo docker image
|
# Spilo docker image
|
||||||
|
|
|
||||||
|
|
@ -51,10 +51,12 @@ Please, report any issues discovered to https://github.com/zalando/postgres-oper
|
||||||
|
|
||||||
## Talks
|
## Talks
|
||||||
|
|
||||||
1. "PostgreSQL and Kubernetes: DBaaS without a vendor-lock" talk by Oleksii Kliukin, PostgreSQL Sessions 2018: [video](https://www.youtube.com/watch?v=q26U2rQcqMw) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-and-kubernetes-dbaas-without-a-vendor-lock)
|
1. "Building your own PostgreSQL-as-a-Service on Kubernetes" talk by Alexander Kukushkin, KubeCon NA 2018: [video](https://www.youtube.com/watch?v=G8MnpkbhClc) | [slides](https://static.sched.com/hosted_files/kccna18/1d/Building%20your%20own%20PostgreSQL-as-a-Service%20on%20Kubernetes.pdf)
|
||||||
|
|
||||||
2. "PostgreSQL High Availability on Kubernetes with Patroni" talk by Oleksii Kliukin, Atmosphere 2018: [video](https://www.youtube.com/watch?v=cFlwQOPPkeg) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-high-availability-on-kubernetes-with-patroni)
|
2. "PostgreSQL and Kubernetes: DBaaS without a vendor-lock" talk by Oleksii Kliukin, PostgreSQL Sessions 2018: [video](https://www.youtube.com/watch?v=q26U2rQcqMw) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-and-kubernetes-dbaas-without-a-vendor-lock)
|
||||||
|
|
||||||
2. "Blue elephant on-demand: Postgres + Kubernetes" talk by Oleksii Kliukin and Jan Mussler, FOSDEM 2018: [video](https://fosdem.org/2018/schedule/event/blue_elephant_on_demand_postgres_kubernetes/) | [slides (pdf)](https://www.postgresql.eu/events/fosdem2018/sessions/session/1735/slides/59/FOSDEM%202018_%20Blue_Elephant_On_Demand.pdf)
|
3. "PostgreSQL High Availability on Kubernetes with Patroni" talk by Oleksii Kliukin, Atmosphere 2018: [video](https://www.youtube.com/watch?v=cFlwQOPPkeg) | [slides](https://speakerdeck.com/alexeyklyukin/postgresql-high-availability-on-kubernetes-with-patroni)
|
||||||
|
|
||||||
|
4. "Blue elephant on-demand: Postgres + Kubernetes" talk by Oleksii Kliukin and Jan Mussler, FOSDEM 2018: [video](https://fosdem.org/2018/schedule/event/blue_elephant_on_demand_postgres_kubernetes/) | [slides (pdf)](https://www.postgresql.eu/events/fosdem2018/sessions/session/1735/slides/59/FOSDEM%202018_%20Blue_Elephant_On_Demand.pdf)
|
||||||
|
|
||||||
3. "Kube-Native Postgres" talk by Josh Berkus, KubeCon 2017: [video](https://www.youtube.com/watch?v=Zn1vd7sQ_bc)
|
3. "Kube-Native Postgres" talk by Josh Berkus, KubeCon 2017: [video](https://www.youtube.com/watch?v=Zn1vd7sQ_bc)
|
||||||
|
|
|
||||||
|
|
@ -85,6 +85,14 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
Spilo. In case of the name conflict with the definition in the cluster
|
Spilo. In case of the name conflict with the definition in the cluster
|
||||||
manifest the cluster-specific one is preferred.
|
manifest the cluster-specific one is preferred.
|
||||||
|
|
||||||
|
* **enable_shm_volume**
|
||||||
|
Instruct operator to start any new database pod without limitations on shm
|
||||||
|
memory. If this option is enabled, to the target database pod will be mounted
|
||||||
|
a new tmpfs volume to remove shm memory limitation (see e.g. the
|
||||||
|
[docker issue](https://github.com/docker-library/postgres/issues/416)).
|
||||||
|
This option is global for an operator object, and can be overwritten by
|
||||||
|
`enableShmVolume` parameter from Postgres manifest. The default is `true`.
|
||||||
|
|
||||||
* **workers**
|
* **workers**
|
||||||
number of working routines the operator spawns to process requests to
|
number of working routines the operator spawns to process requests to
|
||||||
create/update/delete/sync clusters concurrently. The default is `4`.
|
create/update/delete/sync clusters concurrently. The default is `4`.
|
||||||
|
|
@ -115,7 +123,6 @@ Those are top-level keys, containing both leaf keys and groups.
|
||||||
container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13).
|
container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L13).
|
||||||
The default is `false`.
|
The default is `false`.
|
||||||
|
|
||||||
|
|
||||||
## Postgres users
|
## Postgres users
|
||||||
|
|
||||||
Parameters describing Postgres users. In a CRD-configuration, they are grouped
|
Parameters describing Postgres users. In a CRD-configuration, they are grouped
|
||||||
|
|
@ -299,14 +306,6 @@ CRD-based configuration.
|
||||||
memory limits for the postgres containers, unless overridden by cluster-specific
|
memory limits for the postgres containers, unless overridden by cluster-specific
|
||||||
settings. The default is `1Gi`.
|
settings. The default is `1Gi`.
|
||||||
|
|
||||||
* **enable_shm_volume**
|
|
||||||
Instruct operator to start any new database pod without limitations on shm
|
|
||||||
memory. If this option is enabled, to the target database pod will be mounted
|
|
||||||
a new tmpfs volume to remove shm memory limitation (see e.g. the [docker
|
|
||||||
issue](https://github.com/docker-library/postgres/issues/416)). This option
|
|
||||||
is global for an operator object, and can be overwritten by `enableShmVolume`
|
|
||||||
parameter from Postgres manifest. The default is `true`
|
|
||||||
|
|
||||||
## Operator timeouts
|
## Operator timeouts
|
||||||
|
|
||||||
This set of parameters define various timeouts related to some operator
|
This set of parameters define various timeouts related to some operator
|
||||||
|
|
@ -374,7 +373,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
|
||||||
with the hosted zone (the value of the `db_hosted_zone` parameter). No other
|
with the hosted zone (the value of the `db_hosted_zone` parameter). No other
|
||||||
placeholders are allowed.
|
placeholders are allowed.
|
||||||
|
|
||||||
** **replica_dns_name_format** defines the DNS name string template for the
|
* **replica_dns_name_format** defines the DNS name string template for the
|
||||||
replica load balancer cluster. The default is
|
replica load balancer cluster. The default is
|
||||||
`{cluster}-repl.{team}.{hostedzone}`, where `{cluster}` is replaced by the
|
`{cluster}-repl.{team}.{hostedzone}`, where `{cluster}` is replaced by the
|
||||||
cluster name, `{team}` is replaced with the team name and `{hostedzone}` is
|
cluster name, `{team}` is replaced with the team name and `{hostedzone}` is
|
||||||
|
|
@ -406,7 +405,7 @@ yet officially supported.
|
||||||
empty.
|
empty.
|
||||||
|
|
||||||
* **aws_region**
|
* **aws_region**
|
||||||
AWS region used to store ESB volumes. The default is `eu-central-1`.
|
AWS region used to store EBS volumes. The default is `eu-central-1`.
|
||||||
|
|
||||||
* **additional_secret_mount**
|
* **additional_secret_mount**
|
||||||
Additional Secret (aws or gcp credentials) to mount in the pod. The default is empty.
|
Additional Secret (aws or gcp credentials) to mount in the pod. The default is empty.
|
||||||
|
|
@ -529,7 +528,6 @@ scalyr sidecar. In the CRD-based configuration they are grouped under the
|
||||||
* **scalyr_memory_limit**
|
* **scalyr_memory_limit**
|
||||||
Memory limit value for the Scalyr sidecar. The default is `1Gi`.
|
Memory limit value for the Scalyr sidecar. The default is `1Gi`.
|
||||||
|
|
||||||
|
|
||||||
## Logical backup
|
## Logical backup
|
||||||
|
|
||||||
These parameters configure a k8s cron job managed by the operator to produce
|
These parameters configure a k8s cron job managed by the operator to produce
|
||||||
|
|
|
||||||
|
|
@ -351,7 +351,7 @@ metadata:
|
||||||
name: acid-minimal-cluster
|
name: acid-minimal-cluster
|
||||||
spec:
|
spec:
|
||||||
...
|
...
|
||||||
init_containers:
|
initContainers:
|
||||||
- name: "container-name"
|
- name: "container-name"
|
||||||
image: "company/image:tag"
|
image: "company/image:tag"
|
||||||
env:
|
env:
|
||||||
|
|
@ -359,7 +359,7 @@ spec:
|
||||||
value: "any-k8s-env-things"
|
value: "any-k8s-env-things"
|
||||||
```
|
```
|
||||||
|
|
||||||
`init_containers` accepts full `v1.Container` definition.
|
`initContainers` accepts full `v1.Container` definition.
|
||||||
|
|
||||||
|
|
||||||
## Increase volume size
|
## Increase volume size
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ metadata:
|
||||||
name: acid-test-cluster
|
name: acid-test-cluster
|
||||||
spec:
|
spec:
|
||||||
dockerImage: registry.opensource.zalan.do/acid/spilo-11:1.5-p9
|
dockerImage: registry.opensource.zalan.do/acid/spilo-11:1.5-p9
|
||||||
init_containers:
|
initContainers:
|
||||||
- name: date
|
- name: date
|
||||||
image: busybox
|
image: busybox
|
||||||
command: [ "/bin/date" ]
|
command: [ "/bin/date" ]
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@ data:
|
||||||
# enable_pod_antiaffinity: "false"
|
# enable_pod_antiaffinity: "false"
|
||||||
# enable_pod_disruption_budget: "true"
|
# enable_pod_disruption_budget: "true"
|
||||||
enable_replica_load_balancer: "false"
|
enable_replica_load_balancer: "false"
|
||||||
|
# enable_shm_volume: "true"
|
||||||
# enable_team_superuser: "false"
|
# enable_team_superuser: "false"
|
||||||
enable_teams_api: "false"
|
enable_teams_api: "false"
|
||||||
# etcd_host: ""
|
# etcd_host: ""
|
||||||
|
|
|
||||||
|
|
@ -9,10 +9,17 @@ configuration:
|
||||||
min_instances: -1
|
min_instances: -1
|
||||||
resync_period: 30m
|
resync_period: 30m
|
||||||
repair_period: 5m
|
repair_period: 5m
|
||||||
|
<<<<<<< HEAD
|
||||||
# set_memory_request_to_limit: false
|
# set_memory_request_to_limit: false
|
||||||
# sidecar_docker_images:
|
# sidecar_docker_images:
|
||||||
# example: "exampleimage:exampletag"
|
# example: "exampleimage:exampletag"
|
||||||
workers: 4
|
workers: 4
|
||||||
|
=======
|
||||||
|
# enable_shm_volume: true
|
||||||
|
|
||||||
|
#sidecar_docker_images:
|
||||||
|
# example: "exampleimage:exampletag"
|
||||||
|
>>>>>>> master
|
||||||
users:
|
users:
|
||||||
replication_username: standby
|
replication_username: standby
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
|
|
|
||||||
|
|
@ -157,6 +157,7 @@ type OperatorConfigurationData struct {
|
||||||
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
||||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||||
|
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
||||||
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
|
Sidecars map[string]string `json:"sidecar_docker_images,omitempty"`
|
||||||
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
||||||
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
||||||
|
|
|
||||||
|
|
@ -53,12 +53,16 @@ type PostgresSpec struct {
|
||||||
Databases map[string]string `json:"databases,omitempty"`
|
Databases map[string]string `json:"databases,omitempty"`
|
||||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||||
Sidecars []Sidecar `json:"sidecars,omitempty"`
|
Sidecars []Sidecar `json:"sidecars,omitempty"`
|
||||||
InitContainers []v1.Container `json:"init_containers,omitempty"`
|
InitContainers []v1.Container `json:"initContainers,omitempty"`
|
||||||
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
PodPriorityClassName string `json:"podPriorityClassName,omitempty"`
|
||||||
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
||||||
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
||||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||||
StandbyCluster *StandbyDescription `json:"standby"`
|
StandbyCluster *StandbyDescription `json:"standby"`
|
||||||
|
|
||||||
|
// deprectaed json tags
|
||||||
|
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
|
||||||
|
PodPriorityClassNameOld string `json:"pod_priority_class_name,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -170,6 +171,7 @@ var unmarshalCluster = []struct {
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"resources":{"requests":{"cpu":"","memory":""},"limits":{"cpu":"","memory":""}},"teamId":"","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":{}},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
// example with detailed input manifest
|
// example with detailed input manifest
|
||||||
|
// and deprecated pod_priority_class_name -> podPriorityClassName
|
||||||
{
|
{
|
||||||
in: []byte(`{
|
in: []byte(`{
|
||||||
"kind": "Postgresql",
|
"kind": "Postgresql",
|
||||||
|
|
@ -179,6 +181,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
"teamId": "ACID",
|
"teamId": "ACID",
|
||||||
|
"pod_priority_class_name": "spilo-pod-priority",
|
||||||
"volume": {
|
"volume": {
|
||||||
"size": "5Gi",
|
"size": "5Gi",
|
||||||
"storageClass": "SSD",
|
"storageClass": "SSD",
|
||||||
|
|
@ -215,6 +218,7 @@ var unmarshalCluster = []struct {
|
||||||
"clone" : {
|
"clone" : {
|
||||||
"cluster": "acid-batman"
|
"cluster": "acid-batman"
|
||||||
},
|
},
|
||||||
|
"enableShmVolume": false,
|
||||||
"patroni": {
|
"patroni": {
|
||||||
"initdb": {
|
"initdb": {
|
||||||
"encoding": "UTF8",
|
"encoding": "UTF8",
|
||||||
|
|
@ -261,11 +265,13 @@ var unmarshalCluster = []struct {
|
||||||
"log_statement": "all",
|
"log_statement": "all",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
PodPriorityClassNameOld: "spilo-pod-priority",
|
||||||
Volume: Volume{
|
Volume: Volume{
|
||||||
Size: "5Gi",
|
Size: "5Gi",
|
||||||
StorageClass: "SSD",
|
StorageClass: "SSD",
|
||||||
SubPath: "subdir",
|
SubPath: "subdir",
|
||||||
},
|
},
|
||||||
|
ShmVolume: util.False(),
|
||||||
Patroni: Patroni{
|
Patroni: Patroni{
|
||||||
InitDB: map[string]string{
|
InitDB: map[string]string{
|
||||||
"encoding": "UTF8",
|
"encoding": "UTF8",
|
||||||
|
|
@ -313,7 +319,7 @@ var unmarshalCluster = []struct {
|
||||||
},
|
},
|
||||||
Error: "",
|
Error: "",
|
||||||
},
|
},
|
||||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"ACID","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||||
err: nil},
|
err: nil},
|
||||||
// example with teamId set in input
|
// example with teamId set in input
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -209,6 +209,11 @@ func (in *OperatorConfiguration) DeepCopyObject() runtime.Object {
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData) {
|
func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.ShmVolume != nil {
|
||||||
|
in, out := &in.ShmVolume, &out.ShmVolume
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.Sidecars != nil {
|
if in.Sidecars != nil {
|
||||||
in, out := &in.Sidecars, &out.Sidecars
|
in, out := &in.Sidecars, &out.Sidecars
|
||||||
*out = make(map[string]string, len(*in))
|
*out = make(map[string]string, len(*in))
|
||||||
|
|
@ -508,6 +513,13 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
||||||
*out = new(StandbyDescription)
|
*out = new(StandbyDescription)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.InitContainersOld != nil {
|
||||||
|
in, out := &in.InitContainersOld, &out.InitContainersOld
|
||||||
|
*out = make([]corev1.Container, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -360,8 +360,6 @@ func generateContainer(
|
||||||
volumeMounts []v1.VolumeMount,
|
volumeMounts []v1.VolumeMount,
|
||||||
privilegedMode bool,
|
privilegedMode bool,
|
||||||
) *v1.Container {
|
) *v1.Container {
|
||||||
falseBool := false
|
|
||||||
|
|
||||||
return &v1.Container{
|
return &v1.Container{
|
||||||
Name: name,
|
Name: name,
|
||||||
Image: *dockerImage,
|
Image: *dockerImage,
|
||||||
|
|
@ -385,7 +383,7 @@ func generateContainer(
|
||||||
Env: envVars,
|
Env: envVars,
|
||||||
SecurityContext: &v1.SecurityContext{
|
SecurityContext: &v1.SecurityContext{
|
||||||
Privileged: &privilegedMode,
|
Privileged: &privilegedMode,
|
||||||
ReadOnlyRootFilesystem: &falseBool,
|
ReadOnlyRootFilesystem: util.False(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -421,9 +419,9 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
|
||||||
|
|
||||||
// Check whether or not we're requested to mount an shm volume,
|
// Check whether or not we're requested to mount an shm volume,
|
||||||
// taking into account that PostgreSQL manifest has precedence.
|
// taking into account that PostgreSQL manifest has precedence.
|
||||||
func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) bool {
|
func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) *bool {
|
||||||
if pgSpec.ShmVolume != nil {
|
if pgSpec.ShmVolume != nil && *pgSpec.ShmVolume {
|
||||||
return *pgSpec.ShmVolume
|
return pgSpec.ShmVolume
|
||||||
}
|
}
|
||||||
|
|
||||||
return opConfig.ShmVolume
|
return opConfig.ShmVolume
|
||||||
|
|
@ -442,7 +440,7 @@ func generatePodTemplate(
|
||||||
podServiceAccountName string,
|
podServiceAccountName string,
|
||||||
kubeIAMRole string,
|
kubeIAMRole string,
|
||||||
priorityClassName string,
|
priorityClassName string,
|
||||||
shmVolume bool,
|
shmVolume *bool,
|
||||||
podAntiAffinity bool,
|
podAntiAffinity bool,
|
||||||
podAntiAffinityTopologyKey string,
|
podAntiAffinityTopologyKey string,
|
||||||
additionalSecretMount string,
|
additionalSecretMount string,
|
||||||
|
|
@ -467,7 +465,7 @@ func generatePodTemplate(
|
||||||
SecurityContext: &securityContext,
|
SecurityContext: &securityContext,
|
||||||
}
|
}
|
||||||
|
|
||||||
if shmVolume {
|
if shmVolume != nil && *shmVolume {
|
||||||
addShmVolume(&podSpec)
|
addShmVolume(&podSpec)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -801,6 +799,28 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
|
||||||
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
|
return nil, fmt.Errorf("s3_wal_path is empty for standby cluster")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// backward compatible check for InitContainers
|
||||||
|
if spec.InitContainersOld != nil {
|
||||||
|
msg := "Manifest parameter init_containers is deprecated."
|
||||||
|
if spec.InitContainers == nil {
|
||||||
|
c.logger.Warningf("%s Consider using initContainers instead.", msg)
|
||||||
|
spec.InitContainers = spec.InitContainersOld
|
||||||
|
} else {
|
||||||
|
c.logger.Warningf("%s Only value from initContainers is used", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// backward compatible check for PodPriorityClassName
|
||||||
|
if spec.PodPriorityClassNameOld != "" {
|
||||||
|
msg := "Manifest parameter pod_priority_class_name is deprecated."
|
||||||
|
if spec.PodPriorityClassName == "" {
|
||||||
|
c.logger.Warningf("%s Consider using podPriorityClassName instead.", msg)
|
||||||
|
spec.PodPriorityClassName = spec.PodPriorityClassNameOld
|
||||||
|
} else {
|
||||||
|
c.logger.Warningf("%s Only value from podPriorityClassName is used", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger)
|
spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err)
|
return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err)
|
||||||
|
|
@ -1456,7 +1476,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
||||||
c.OpConfig.PodServiceAccountName,
|
c.OpConfig.PodServiceAccountName,
|
||||||
c.OpConfig.KubeIAMRole,
|
c.OpConfig.KubeIAMRole,
|
||||||
"",
|
"",
|
||||||
false,
|
util.False(),
|
||||||
false,
|
false,
|
||||||
"",
|
"",
|
||||||
"",
|
"",
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
|
|
@ -17,16 +18,6 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func True() *bool {
|
|
||||||
b := true
|
|
||||||
return &b
|
|
||||||
}
|
|
||||||
|
|
||||||
func False() *bool {
|
|
||||||
b := false
|
|
||||||
return &b
|
|
||||||
}
|
|
||||||
|
|
||||||
func toIntStr(val int) *intstr.IntOrString {
|
func toIntStr(val int) *intstr.IntOrString {
|
||||||
b := intstr.FromInt(val)
|
b := intstr.FromInt(val)
|
||||||
return &b
|
return &b
|
||||||
|
|
@ -118,14 +109,14 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
|
||||||
{
|
{
|
||||||
subtest: "new format, load balancer is enabled for replica",
|
subtest: "new format, load balancer is enabled for replica",
|
||||||
role: Replica,
|
role: Replica,
|
||||||
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: True()},
|
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.True()},
|
||||||
opConfig: config.Config{},
|
opConfig: config.Config{},
|
||||||
result: true,
|
result: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
subtest: "new format, load balancer is disabled for replica",
|
subtest: "new format, load balancer is disabled for replica",
|
||||||
role: Replica,
|
role: Replica,
|
||||||
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: False()},
|
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.False()},
|
||||||
opConfig: config.Config{},
|
opConfig: config.Config{},
|
||||||
result: false,
|
result: false,
|
||||||
},
|
},
|
||||||
|
|
@ -208,7 +199,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
||||||
// With PodDisruptionBudget disabled.
|
// With PodDisruptionBudget disabled.
|
||||||
{
|
{
|
||||||
New(
|
New(
|
||||||
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: False()}},
|
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}},
|
||||||
k8sutil.KubernetesClient{},
|
k8sutil.KubernetesClient{},
|
||||||
acidv1.Postgresql{
|
acidv1.Postgresql{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||||
|
|
@ -231,7 +222,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
|
||||||
// With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled.
|
// With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled.
|
||||||
{
|
{
|
||||||
New(
|
New(
|
||||||
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: True()}},
|
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}},
|
||||||
k8sutil.KubernetesClient{},
|
k8sutil.KubernetesClient{},
|
||||||
acidv1.Postgresql{
|
acidv1.Postgresql{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod)
|
result.ResyncPeriod = time.Duration(fromCRD.ResyncPeriod)
|
||||||
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
|
result.RepairPeriod = time.Duration(fromCRD.RepairPeriod)
|
||||||
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
||||||
|
result.ShmVolume = fromCRD.ShmVolume
|
||||||
result.Sidecars = fromCRD.Sidecars
|
result.Sidecars = fromCRD.Sidecars
|
||||||
|
|
||||||
// user config
|
// user config
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ type Resources struct {
|
||||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||||
ShmVolume bool `name:"enable_shm_volume" default:"true"`
|
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Auth describes authentication specific configuration parameters
|
// Auth describes authentication specific configuration parameters
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,17 @@ func init() {
|
||||||
rand.Seed(time.Now().Unix())
|
rand.Seed(time.Now().Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// helper function to get bool pointers
|
||||||
|
func True() *bool {
|
||||||
|
b := true
|
||||||
|
return &b
|
||||||
|
}
|
||||||
|
|
||||||
|
func False() *bool {
|
||||||
|
b := false
|
||||||
|
return &b
|
||||||
|
}
|
||||||
|
|
||||||
// RandomPassword generates random alphanumeric password of a given length.
|
// RandomPassword generates random alphanumeric password of a given length.
|
||||||
func RandomPassword(n int) string {
|
func RandomPassword(n int) string {
|
||||||
b := make([]byte, n)
|
b := make([]byte, n)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue