polish global config about sharing postgresql-run socket (#2155)

* polish global config about sharing postgresql-run socket
This commit is contained in:
Felix Kunde 2023-01-02 18:28:48 +01:00 committed by GitHub
parent be7b52db92
commit d7e1fb57f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 159 additions and 89 deletions

View File

@ -314,6 +314,9 @@ spec:
secret_name_template:
type: string
default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
share_pgsocket_with_sidecars:
type: boolean
default: false
spilo_allow_privilege_escalation:
type: boolean
default: true

View File

@ -191,9 +191,12 @@ configKubernetes:
# if the user is in different namespace than cluster and cross namespace secrets
# are enabled via `enable_cross_namespace_secret` flag in the configuration.
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# sharing unix socket of PostgreSQL (`pg_socket`) with the sidecars
share_pgsocket_with_sidecars: false
# set user and group for the spilo container (required to run Spilo as non-root process)
# spilo_runasuser: 101
# spilo_runasgroup: 103
# group ID with write-access to volumes (required to run Spilo as non-root process)
# spilo_fsgroup: 103

View File

@ -344,7 +344,7 @@ configuration they are grouped under the `kubernetes` key.
to run alongside Spilo on the same pod. Globally defined sidecars are always
enabled. Default is true.
* **share_pg_socket_with_sidecars**
* **share_pgsocket_with_sidecars**
global option to create an emptyDir volume named `postgresql-run`. This is
mounted by all containers at `/var/run/postgresql` sharing the unix socket of
PostgreSQL (`pg_socket`) with the sidecars this way.

View File

@ -1008,11 +1008,39 @@ If you want to add a sidecar to every cluster managed by the operator, you can s
### Accessing the PostgreSQL socket from sidecars
If enabled by the `share_pg_socket_with_sidecars` option in the operator
configuration the PostgreSQL socket is placed in a volume of type
`emptyDir` named `postgresql-run`.
To allow access to the socket from any sidecar container simply add a
VolumeMount to this volume to your sidecar spec.
If enabled by the `share_pgsocket_with_sidecars` option in the operator
configuration the PostgreSQL socket is placed in a volume of type `emptyDir`
named `postgresql-run`. To allow access to the socket from any sidecar
container simply add a VolumeMount to this volume to your sidecar spec.
```yaml
- name: "container-name"
image: "company/image:tag"
volumeMounts:
- mountPath: /var/run
name: postgresql-run
```
If you do not want to globally enable this feature and only use it for single
Postgres clusters, specify an `EmptyDir` volume under `additionalVolumes` in
the manifest:
```yaml
spec:
additionalVolumes:
- name: postgresql-run
mountPath: /var/run/postgresql
targetContainers:
- all
volumeSource:
emptyDir: {}
sidecars:
- name: "container-name"
image: "company/image:tag"
volumeMounts:
- mountPath: /var/run
name: postgresql-run
```
## InitContainers Support

View File

@ -134,6 +134,7 @@ data:
ring_log_lines: "100"
role_deletion_suffix: "_deleted"
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
share_pgsocket_with_sidecars: "false"
# sidecar_docker_images: ""
# set_memory_request_to_limit: "false"
spilo_allow_privilege_escalation: "true"

View File

@ -222,9 +222,6 @@ spec:
type: array
items:
type: string
share_pg_socket_with_sidecars:
type: boolean
default: false
infrastructure_roles_secret_name:
type: string
infrastructure_roles_secrets:
@ -312,6 +309,9 @@ spec:
secret_name_template:
type: string
default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
share_pgsocket_with_sidecars:
type: boolean
default: false
spilo_allow_privilege_escalation:
type: boolean
default: true

View File

@ -1289,9 +1289,6 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
},
},
},
"share_pg_socket_with_sidecars": {
Type: "boolean",
},
"infrastructure_roles_secret_name": {
Type: "string",
},
@ -1419,6 +1416,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"secret_name_template": {
Type: "string",
},
"share_pgsocket_with_sidecars": {
Type: "boolean",
},
"spilo_runasuser": {
Type: "integer",
},

View File

@ -72,7 +72,7 @@ type KubernetesMetaConfiguration struct {
StorageResizeMode string `json:"storage_resize_mode,omitempty"`
EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
EnableSidecars *bool `json:"enable_sidecars,omitempty"`
SharePGSocketWithSidecars *bool `json:"share_pgsocket_with_sidecars,omitempty"`
SharePgSocketWithSidecars *bool `json:"share_pgsocket_with_sidecars,omitempty"`
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
ClusterDomain string `json:"cluster_domain,omitempty"`
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`

View File

@ -193,8 +193,8 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
*out = new(bool)
**out = **in
}
if in.SharePGSocketWithSidecars != nil {
in, out := &in.SharePGSocketWithSidecars, &out.SharePGSocketWithSidecars
if in.SharePgSocketWithSidecars != nil {
in, out := &in.SharePgSocketWithSidecars, &out.SharePgSocketWithSidecars
*out = new(bool)
**out = **in
}

View File

@ -723,7 +723,7 @@ func (c *Cluster) generatePodTemplate(
spiloContainer *v1.Container,
initContainers []v1.Container,
sidecarContainers []v1.Container,
sharePGSocketWithSidecars *bool,
sharePgSocketWithSidecars *bool,
tolerationsSpec *[]v1.Toleration,
spiloRunAsUser *int64,
spiloRunAsGroup *int64,
@ -792,7 +792,7 @@ func (c *Cluster) generatePodTemplate(
podSpec.PriorityClassName = priorityClassName
}
if sharePGSocketWithSidecars != nil && *sharePGSocketWithSidecars {
if sharePgSocketWithSidecars != nil && *sharePgSocketWithSidecars {
addVarRunVolume(&podSpec)
}
@ -1378,7 +1378,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
spiloContainer,
initContainers,
sidecarContainers,
c.OpConfig.SharePGSocketWithSidecars,
c.OpConfig.SharePgSocketWithSidecars,
&tolerationSpec,
effectiveRunAsUser,
effectiveRunAsGroup,
@ -1586,8 +1586,8 @@ func addVarRunVolume(podSpec *v1.PodSpec) {
for i := range podSpec.Containers {
mounts := append(podSpec.Containers[i].VolumeMounts,
v1.VolumeMount{
Name: "postgresql-run",
MountPath: "/var/run/postgresql",
Name: constants.RunVolumeName,
MountPath: constants.RunVolumePath,
})
podSpec.Containers[i].VolumeMounts = mounts
}

View File

@ -64,7 +64,6 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
testName := "TestGenerateSpiloConfig"
tests := []struct {
subtest string
pgParam *acidv1.PostgresqlParam
@ -159,13 +158,12 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
}
if tt.result != result {
t.Errorf("%s %s: Spilo Config is %v, expected %v for role %#v and param %#v",
testName, tt.subtest, result, tt.result, tt.opConfig.Auth.PamRoleName, tt.pgParam)
t.Name(), tt.subtest, result, tt.result, tt.opConfig.Auth.PamRoleName, tt.pgParam)
}
}
}
func TestExtractPgVersionFromBinPath(t *testing.T) {
testName := "TestExtractPgVersionFromBinPath"
tests := []struct {
subTest string
binPath string
@ -199,7 +197,7 @@ func TestExtractPgVersionFromBinPath(t *testing.T) {
}
if pgVersion != tt.expected {
t.Errorf("%s %s: Expected version %s, have %s instead",
testName, tt.subTest, tt.expected, pgVersion)
t.Name(), tt.subTest, tt.expected, pgVersion)
}
}
}
@ -287,7 +285,6 @@ func newMockCluster(opConfig config.Config) *Cluster {
}
func TestPodEnvironmentConfigMapVariables(t *testing.T) {
testName := "TestPodEnvironmentConfigMapVariables"
tests := []struct {
subTest string
opConfig config.Config
@ -343,17 +340,17 @@ func TestPodEnvironmentConfigMapVariables(t *testing.T) {
vars, err := c.getPodEnvironmentConfigMapVariables()
if !reflect.DeepEqual(vars, tt.envVars) {
t.Errorf("%s %s: expected `%v` but got `%v`",
testName, tt.subTest, tt.envVars, vars)
t.Name(), tt.subTest, tt.envVars, vars)
}
if tt.err != nil {
if err.Error() != tt.err.Error() {
t.Errorf("%s %s: expected error `%v` but got `%v`",
testName, tt.subTest, tt.err, err)
t.Name(), tt.subTest, tt.err, err)
}
} else {
if err != nil {
t.Errorf("%s %s: expected no error but got error: `%v`",
testName, tt.subTest, err)
t.Name(), tt.subTest, err)
}
}
}
@ -362,7 +359,6 @@ func TestPodEnvironmentConfigMapVariables(t *testing.T) {
// Test if the keys of an existing secret are properly referenced
func TestPodEnvironmentSecretVariables(t *testing.T) {
maxRetries := int(testResourceCheckTimeout / testResourceCheckInterval)
testName := "TestPodEnvironmentSecretVariables"
tests := []struct {
subTest string
opConfig config.Config
@ -448,17 +444,17 @@ func TestPodEnvironmentSecretVariables(t *testing.T) {
sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name })
if !reflect.DeepEqual(vars, tt.envVars) {
t.Errorf("%s %s: expected `%v` but got `%v`",
testName, tt.subTest, tt.envVars, vars)
t.Name(), tt.subTest, tt.envVars, vars)
}
if tt.err != nil {
if err.Error() != tt.err.Error() {
t.Errorf("%s %s: expected error `%v` but got `%v`",
testName, tt.subTest, tt.err, err)
t.Name(), tt.subTest, tt.err, err)
}
} else {
if err != nil {
t.Errorf("%s %s: expected no error but got error: `%v`",
testName, tt.subTest, err)
t.Name(), tt.subTest, err)
}
}
}
@ -657,7 +653,6 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
},
}
testName := "TestGenerateSpiloPodEnvVars"
tests := []struct {
subTest string
opConfig config.Config
@ -895,27 +890,26 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
if env.Name != ev.envVarConstant {
t.Errorf("%s %s: expected env name %s, have %s instead",
testName, tt.subTest, ev.envVarConstant, env.Name)
t.Name(), tt.subTest, ev.envVarConstant, env.Name)
}
if ev.envVarValueRef != nil {
if !reflect.DeepEqual(env.ValueFrom, ev.envVarValueRef) {
t.Errorf("%s %s: expected env value reference %#v, have %#v instead",
testName, tt.subTest, ev.envVarValueRef, env.ValueFrom)
t.Name(), tt.subTest, ev.envVarValueRef, env.ValueFrom)
}
continue
}
if env.Value != ev.envVarValue {
t.Errorf("%s %s: expected env value %s, have %s instead",
testName, tt.subTest, ev.envVarValue, env.Value)
t.Name(), tt.subTest, ev.envVarValue, env.Value)
}
}
}
}
func TestGetNumberOfInstances(t *testing.T) {
testName := "TestGetNumberOfInstances"
tests := []struct {
subTest string
config config.Config
@ -1039,13 +1033,12 @@ func TestGetNumberOfInstances(t *testing.T) {
if numInstances != tt.provided {
t.Errorf("%s %s: Expected to get %d instances, have %d instead",
testName, tt.subTest, tt.provided, numInstances)
t.Name(), tt.subTest, tt.provided, numInstances)
}
}
}
func TestCloneEnv(t *testing.T) {
testName := "TestCloneEnv"
tests := []struct {
subTest string
cloneOpts *acidv1.CloneDescription
@ -1112,18 +1105,17 @@ func TestCloneEnv(t *testing.T) {
if env.Name != tt.env.Name {
t.Errorf("%s %s: Expected env name %s, have %s instead",
testName, tt.subTest, tt.env.Name, env.Name)
t.Name(), tt.subTest, tt.env.Name, env.Name)
}
if env.Value != tt.env.Value {
t.Errorf("%s %s: Expected env value %s, have %s instead",
testName, tt.subTest, tt.env.Value, env.Value)
t.Name(), tt.subTest, tt.env.Value, env.Value)
}
}
}
func TestAppendEnvVar(t *testing.T) {
testName := "TestAppendEnvVar"
tests := []struct {
subTest string
envs []v1.EnvVar
@ -1179,7 +1171,7 @@ func TestAppendEnvVar(t *testing.T) {
if len(finalEnvs) != tt.expectedSize {
t.Errorf("%s %s: expected %d env variables, got %d",
testName, tt.subTest, tt.expectedSize, len(finalEnvs))
t.Name(), tt.subTest, tt.expectedSize, len(finalEnvs))
}
for _, env := range tt.envs {
@ -1187,7 +1179,7 @@ func TestAppendEnvVar(t *testing.T) {
if env.Name == finalEnv.Name {
if env.Value != finalEnv.Value {
t.Errorf("%s %s: expected env value %s of variable %s, got %s instead",
testName, tt.subTest, env.Value, env.Name, finalEnv.Value)
t.Name(), tt.subTest, env.Value, env.Name, finalEnv.Value)
}
}
}
@ -1196,7 +1188,6 @@ func TestAppendEnvVar(t *testing.T) {
}
func TestStandbyEnv(t *testing.T) {
testName := "TestStandbyEnv"
tests := []struct {
subTest string
standbyOpts *acidv1.StandbyDescription
@ -1279,17 +1270,17 @@ func TestStandbyEnv(t *testing.T) {
if env.Name != tt.env.Name {
t.Errorf("%s %s: Expected env name %s, have %s instead",
testName, tt.subTest, tt.env.Name, env.Name)
t.Name(), tt.subTest, tt.env.Name, env.Name)
}
if env.Value != tt.env.Value {
t.Errorf("%s %s: Expected env value %s, have %s instead",
testName, tt.subTest, tt.env.Value, env.Value)
t.Name(), tt.subTest, tt.env.Value, env.Value)
}
if len(envs) != tt.envLen {
t.Errorf("%s %s: Expected number of env variables %d, have %d instead",
testName, tt.subTest, tt.envLen, len(envs))
t.Name(), tt.subTest, tt.envLen, len(envs))
}
}
}
@ -1471,8 +1462,59 @@ func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role Postg
return nil
}
func TestTLS(t *testing.T) {
func TestSharePgSocketWithSidecars(t *testing.T) {
tests := []struct {
subTest string
podSpec *v1.PodSpec
runVolPos int
}{
{
subTest: "empty PodSpec",
podSpec: &v1.PodSpec{
Volumes: []v1.Volume{},
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{},
},
},
},
runVolPos: 0,
},
{
subTest: "non empty PodSpec",
podSpec: &v1.PodSpec{
Volumes: []v1.Volume{{}},
Containers: []v1.Container{
{
Name: "postgres",
VolumeMounts: []v1.VolumeMount{
{},
},
},
},
},
runVolPos: 1,
},
}
for _, tt := range tests {
addVarRunVolume(tt.podSpec)
postgresContainer := getPostgresContainer(tt.podSpec)
volumeName := tt.podSpec.Volumes[tt.runVolPos].Name
volumeMountName := postgresContainer.VolumeMounts[tt.runVolPos].Name
if volumeName != constants.RunVolumeName {
t.Errorf("%s %s: Expected volume %s was not created, have %s instead",
t.Name(), tt.subTest, constants.RunVolumeName, volumeName)
}
if volumeMountName != constants.RunVolumeName {
t.Errorf("%s %s: Expected mount %s was not created, have %s instead",
t.Name(), tt.subTest, constants.RunVolumeName, volumeMountName)
}
}
}
func TestTLS(t *testing.T) {
client, _ := newFakeK8sTestClient()
clusterName := "acid-test-cluster"
namespace := "default"
@ -1561,7 +1603,6 @@ func TestTLS(t *testing.T) {
}
func TestShmVolume(t *testing.T) {
testName := "TestShmVolume"
tests := []struct {
subTest string
podSpec *v1.PodSpec
@ -1604,17 +1645,16 @@ func TestShmVolume(t *testing.T) {
if volumeName != constants.ShmVolumeName {
t.Errorf("%s %s: Expected volume %s was not created, have %s instead",
testName, tt.subTest, constants.ShmVolumeName, volumeName)
t.Name(), tt.subTest, constants.ShmVolumeName, volumeName)
}
if volumeMountName != constants.ShmVolumeName {
t.Errorf("%s %s: Expected mount %s was not created, have %s instead",
testName, tt.subTest, constants.ShmVolumeName, volumeMountName)
t.Name(), tt.subTest, constants.ShmVolumeName, volumeMountName)
}
}
}
func TestSecretVolume(t *testing.T) {
testName := "TestSecretVolume"
tests := []struct {
subTest string
podSpec *v1.PodSpec
@ -1664,7 +1704,7 @@ func TestSecretVolume(t *testing.T) {
if volumeName != additionalSecretMount {
t.Errorf("%s %s: Expected volume %s was not created, have %s instead",
testName, tt.subTest, additionalSecretMount, volumeName)
t.Name(), tt.subTest, additionalSecretMount, volumeName)
}
for i := range tt.podSpec.Containers {
@ -1672,7 +1712,7 @@ func TestSecretVolume(t *testing.T) {
if volumeMountName != additionalSecretMount {
t.Errorf("%s %s: Expected mount %s was not created, have %s instead",
testName, tt.subTest, additionalSecretMount, volumeMountName)
t.Name(), tt.subTest, additionalSecretMount, volumeMountName)
}
}
@ -1687,8 +1727,6 @@ func TestSecretVolume(t *testing.T) {
}
func TestAdditionalVolume(t *testing.T) {
testName := "TestAdditionalVolume"
client, _ := newFakeK8sTestClient()
clusterName := "acid-test-cluster"
namespace := "default"
@ -1800,14 +1838,13 @@ func TestAdditionalVolume(t *testing.T) {
if !util.IsEqualIgnoreOrder(mounts, tt.expectedMounts) {
t.Errorf("%s %s: different volume mounts: got %v, epxected %v",
testName, tt.subTest, mounts, tt.expectedMounts)
t.Name(), tt.subTest, mounts, tt.expectedMounts)
}
}
}
}
func TestVolumeSelector(t *testing.T) {
testName := "TestVolumeSelector"
makeSpec := func(volume acidv1.Volume) acidv1.PostgresSpec {
return acidv1.PostgresSpec{
TeamID: "myapp",
@ -1888,7 +1925,7 @@ func TestVolumeSelector(t *testing.T) {
pgSpec := makeSpec(tt.volume)
sts, err := cluster.generateStatefulSet(&pgSpec)
if err != nil {
t.Fatalf("%s %s: no statefulset created %v", testName, tt.subTest, err)
t.Fatalf("%s %s: no statefulset created %v", t.Name(), tt.subTest, err)
}
volIdx := len(sts.Spec.VolumeClaimTemplates)
@ -1899,12 +1936,12 @@ func TestVolumeSelector(t *testing.T) {
}
}
if volIdx == len(sts.Spec.VolumeClaimTemplates) {
t.Errorf("%s %s: no datavolume found in sts", testName, tt.subTest)
t.Errorf("%s %s: no datavolume found in sts", t.Name(), tt.subTest)
}
selector := sts.Spec.VolumeClaimTemplates[volIdx].Spec.Selector
if !reflect.DeepEqual(selector, tt.wantSelector) {
t.Errorf("%s %s: expected: %#v but got: %#v", testName, tt.subTest, tt.wantSelector, selector)
t.Errorf("%s %s: expected: %#v but got: %#v", t.Name(), tt.subTest, tt.wantSelector, selector)
}
}
}
@ -2320,7 +2357,6 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
testName := "TestCreateLoadBalancerLogic"
tests := []struct {
subtest string
role PostgresRole
@ -2362,7 +2398,7 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
result := cluster.shouldCreateLoadBalancerForService(tt.role, tt.spec)
if tt.result != result {
t.Errorf("%s %s: Load balancer is %t, expect %t for role %#v and spec %#v",
testName, tt.subtest, result, tt.result, tt.role, tt.spec)
t.Name(), tt.subtest, result, tt.result, tt.role, tt.spec)
}
}
}
@ -2410,7 +2446,6 @@ func getServices(serviceType v1.ServiceType, sourceRanges []string, extTrafficPo
}
func TestEnableLoadBalancers(t *testing.T) {
testName := "Test enabling LoadBalancers"
client, _ := newLBFakeClient()
clusterName := "acid-test-cluster"
namespace := "default"
@ -2545,13 +2580,12 @@ func TestEnableLoadBalancers(t *testing.T) {
generatedServices = append(generatedServices, cluster.ConnectionPooler[role].Service.Spec)
}
if !reflect.DeepEqual(tt.expectedServices, generatedServices) {
t.Errorf("%s %s: expected %#v but got %#v", testName, tt.subTest, tt.expectedServices, generatedServices)
t.Errorf("%s %s: expected %#v but got %#v", t.Name(), tt.subTest, tt.expectedServices, generatedServices)
}
}
}
func TestGenerateResourceRequirements(t *testing.T) {
testName := "TestGenerateResourceRequirements"
client, _ := newFakeK8sTestClient()
clusterName := "acid-test-cluster"
namespace := "default"
@ -2921,14 +2955,12 @@ func TestGenerateResourceRequirements(t *testing.T) {
}
assert.NoError(t, err)
if !reflect.DeepEqual(tt.expectedResources, clusterResources) {
t.Errorf("%s - %s: expected %#v but got %#v", testName, tt.subTest, tt.expectedResources, clusterResources)
t.Errorf("%s - %s: expected %#v but got %#v", t.Name(), tt.subTest, tt.expectedResources, clusterResources)
}
}
}
func TestGenerateCapabilities(t *testing.T) {
testName := "TestGenerateCapabilities"
tests := []struct {
subTest string
configured []string
@ -2968,7 +3000,7 @@ func TestGenerateCapabilities(t *testing.T) {
caps := generateCapabilities(tt.configured)
if !reflect.DeepEqual(caps, tt.capabilities) {
t.Errorf("%s %s: expected `%v` but got `%v`",
testName, tt.subTest, tt.capabilities, caps)
t.Name(), tt.subTest, tt.capabilities, caps)
}
}
}

View File

@ -86,7 +86,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc")
result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True())
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
result.SharePGSocketWithSidecars = util.CoalesceBool(fromCRD.Kubernetes.SharePGSocketWithSidecars, util.False())
result.SharePgSocketWithSidecars = util.CoalesceBool(fromCRD.Kubernetes.SharePgSocketWithSidecars, util.False())
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
result.EnableCrossNamespaceSecret = fromCRD.Kubernetes.EnableCrossNamespaceSecret

View File

@ -45,14 +45,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.

View File

@ -45,14 +45,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.

View File

@ -213,7 +213,7 @@ type Config struct {
EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"`
EnableInitContainers *bool `name:"enable_init_containers" default:"true"`
EnableSidecars *bool `name:"enable_sidecars" default:"true"`
SharePGSocketWithSidecars *bool `name:"share_pg_socket_with_sidecars" default:"false"`
SharePgSocketWithSidecars *bool `name:"share_pgsocket_with_sidecars" default:"false"`
Workers uint32 `name:"workers" default:"8"`
APIPort int `name:"api_port" default:"8080"`
RingLogLines int `name:"ring_log_lines" default:"100"`
@ -232,11 +232,11 @@ type Config struct {
EnableTeamIdClusternamePrefix bool `name:"enable_team_id_clustername_prefix" default:"false"`
MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"`
MajorVersionUpgradeTeamAllowList []string `name:"major_version_upgrade_team_allow_list" default:""`
MinimalMajorVersion string `name:"minimal_major_version" default:"9.6"`
MinimalMajorVersion string `name:"minimal_major_version" default:"11"`
TargetMajorVersion string `name:"target_major_version" default:"14"`
PatroniAPICheckInterval time.Duration `name:"patroni_api_check_interval" default:"1s"`
PatroniAPICheckTimeout time.Duration `name:"patroni_api_check_timeout" default:"5s"`
EnablePatroniFailsafeMode *bool `name:"enable_patroni_failsafe_mode" default:"false"`
EnablePatroniFailsafeMode *bool `name:"enable_patroni_failsafe_mode" default:"false"`
}
// MustMarshal marshals the config or panics

View File

@ -15,4 +15,7 @@ const (
ShmVolumeName = "dshm"
ShmVolumePath = "/dev/shm"
RunVolumeName = "postgresql-run"
RunVolumePath = "/var/run/postgresql"
)