convert constant variable to function

This commit is contained in:
inovindasari 2024-11-27 15:34:03 +01:00
parent a2328b9ec8
commit a48b5b6fb6
13 changed files with 135 additions and 125 deletions

View File

@ -175,6 +175,14 @@ func (c *Cluster) clusterNamespace() string {
return c.ObjectMeta.Namespace
}
func (c *Cluster) masterRole() PostgresRole {
return PostgresRole(c.OpConfig.PodLeaderLabelValue)
}
func (c *Cluster) replicaRole() PostgresRole {
return PostgresRole("replica")
}
func (c *Cluster) teamName() string {
// TODO: check Teams API for the actual name (in case the user passes an integer Id).
return c.Spec.TeamID
@ -294,7 +302,7 @@ func (c *Cluster) Create() (err error) {
}
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources")
for _, role := range []PostgresRole{Master, Replica} {
for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} {
// if kubernetes_use_configmaps is set Patroni will create configmaps
// otherwise it will use endpoints
@ -302,7 +310,7 @@ func (c *Cluster) Create() (err error) {
if c.Endpoints[role] != nil {
return fmt.Errorf("%s endpoint already exists in the cluster", role)
}
if role == Master {
if role == c.masterRole() {
// replica endpoint will be created by the replica service. Master endpoint needs to be created by us,
// since the corresponding master service does not define any selectors.
ep, err = c.createEndpoint(role)
@ -1213,7 +1221,7 @@ func (c *Cluster) Delete() error {
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budget: %v", err)
}
for _, role := range []PostgresRole{Master, Replica} {
for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} {
if !c.patroniKubernetesUseConfigMaps() {
if err := c.deleteEndpoint(role); err != nil {
anyErrors = true
@ -1238,7 +1246,7 @@ func (c *Cluster) Delete() error {
// Delete connection pooler objects anyway, even if it's not mentioned in the
// manifest, just to not keep orphaned components in case if something went
// wrong
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} {
if err := c.deleteConnectionPooler(role); err != nil {
anyErrors = true
c.logger.Warningf("could not remove connection pooler: %v", err)

View File

@ -55,6 +55,7 @@ var cl = New(
},
Resources: config.Resources{
DownscalerAnnotations: []string{"downscaler/*"},
PodLeaderLabelValue: "master",
},
ConnectionPooler: config.ConnectionPooler{
User: poolerUserName,
@ -147,6 +148,7 @@ func TestCreate(t *testing.T) {
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
PodRoleLabel: "spilo-role",
PodLeaderLabelValue: "master",
ResourceCheckInterval: time.Duration(3),
ResourceCheckTimeout: time.Duration(10),
},

View File

@ -49,7 +49,7 @@ type ConnectionPoolerObjects struct {
func (c *Cluster) connectionPoolerName(role PostgresRole) string {
name := fmt.Sprintf("%s-%s", c.Name, constants.ConnectionPoolerResourceSuffix)
if role == Replica {
if role == c.replicaRole() {
name = fmt.Sprintf("%s-%s", name, "repl")
}
return name
@ -537,8 +537,8 @@ func (c *Cluster) generatePoolerServiceAnnotations(role PostgresRole, spec *acid
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
}
// -repl suffix will be added by replicaDNSName
clusterNameWithPoolerSuffix := c.connectionPoolerName(Master)
if role == Master {
clusterNameWithPoolerSuffix := c.connectionPoolerName(c.masterRole())
if role == c.masterRole() {
dnsString = c.masterDNSName(clusterNameWithPoolerSuffix)
} else {
dnsString = c.replicaDNSName(clusterNameWithPoolerSuffix)
@ -557,7 +557,7 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp
switch role {
case Replica:
case c.replicaRole():
// if the value is explicitly set in a Postgresql manifest, follow this setting
if spec.EnableReplicaPoolerLoadBalancer != nil {
return *spec.EnableReplicaPoolerLoadBalancer
@ -565,7 +565,7 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp
// otherwise, follow the operator configuration
return c.OpConfig.EnableReplicaPoolerLoadBalancer
case Master:
case c.masterRole():
if spec.EnableMasterPoolerLoadBalancer != nil {
return *spec.EnableMasterPoolerLoadBalancer
}
@ -877,9 +877,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
logPoolerEssentials(c.logger, oldSpec, newSpec)
// Check and perform the sync requirements for each of the roles.
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} {
if role == Master {
if role == c.masterRole() {
connectionPoolerNeeded = needMasterConnectionPoolerWorker(&newSpec.Spec)
} else {
connectionPoolerNeeded = needReplicaConnectionPoolerWorker(&newSpec.Spec)

View File

@ -42,7 +42,7 @@ func boolToPointer(value bool) *bool {
}
func deploymentUpdated(cluster *Cluster, err error, reason SyncReason) error {
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
poolerLabels := cluster.labelsSet(false)
poolerLabels["application"] = "db-connection-pooler"
@ -63,7 +63,7 @@ func objectsAreSaved(cluster *Cluster, err error, reason SyncReason) error {
return fmt.Errorf("Connection pooler resources are empty")
}
for _, role := range []PostgresRole{Master, Replica} {
for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
poolerLabels := cluster.labelsSet(false)
poolerLabels["application"] = "db-connection-pooler"
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role)
@ -87,14 +87,14 @@ func MasterObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) error
poolerLabels := cluster.labelsSet(false)
poolerLabels["application"] = "db-connection-pooler"
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Master)
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(cluster.masterRole())
if cluster.ConnectionPooler[Master].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Master].Deployment.Labels, poolerLabels) {
return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Deployment.Labels)
if cluster.ConnectionPooler[cluster.masterRole()].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[cluster.masterRole()].Deployment.Labels, poolerLabels) {
return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.masterRole()].Deployment.Labels)
}
if cluster.ConnectionPooler[Master].Service == nil || !util.MapContains(cluster.ConnectionPooler[Master].Service.Labels, poolerLabels) {
return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Master].Service.Labels)
if cluster.ConnectionPooler[cluster.masterRole()].Service == nil || !util.MapContains(cluster.ConnectionPooler[cluster.masterRole()].Service.Labels, poolerLabels) {
return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.masterRole()].Service.Labels)
}
return nil
@ -107,21 +107,21 @@ func ReplicaObjectsAreSaved(cluster *Cluster, err error, reason SyncReason) erro
poolerLabels := cluster.labelsSet(false)
poolerLabels["application"] = "db-connection-pooler"
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(Replica)
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(cluster.replicaRole())
if cluster.ConnectionPooler[Replica].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Deployment.Labels, poolerLabels) {
return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Deployment.Labels)
if cluster.ConnectionPooler[cluster.replicaRole()].Deployment == nil || !util.MapContains(cluster.ConnectionPooler[cluster.replicaRole()].Deployment.Labels, poolerLabels) {
return fmt.Errorf("Deployment was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.replicaRole()].Deployment.Labels)
}
if cluster.ConnectionPooler[Replica].Service == nil || !util.MapContains(cluster.ConnectionPooler[Replica].Service.Labels, poolerLabels) {
return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[Replica].Service.Labels)
if cluster.ConnectionPooler[cluster.replicaRole()].Service == nil || !util.MapContains(cluster.ConnectionPooler[cluster.replicaRole()].Service.Labels, poolerLabels) {
return fmt.Errorf("Service was not saved or labels not attached %s", cluster.ConnectionPooler[cluster.replicaRole()].Service.Labels)
}
return nil
}
func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error {
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
if cluster.ConnectionPooler[role] != nil &&
(cluster.ConnectionPooler[role].Deployment != nil || cluster.ConnectionPooler[role].Service != nil) {
return fmt.Errorf("Connection pooler was not deleted for role %v", role)
@ -133,8 +133,8 @@ func objectsAreDeleted(cluster *Cluster, err error, reason SyncReason) error {
func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error {
if cluster.ConnectionPooler[Master] != nil &&
(cluster.ConnectionPooler[Master].Deployment != nil || cluster.ConnectionPooler[Master].Service != nil) {
if cluster.ConnectionPooler[cluster.masterRole()] != nil &&
(cluster.ConnectionPooler[cluster.masterRole()].Deployment != nil || cluster.ConnectionPooler[cluster.masterRole()].Service != nil) {
return fmt.Errorf("Connection pooler master was not deleted")
}
return nil
@ -142,8 +142,8 @@ func OnlyMasterDeleted(cluster *Cluster, err error, reason SyncReason) error {
func OnlyReplicaDeleted(cluster *Cluster, err error, reason SyncReason) error {
if cluster.ConnectionPooler[Replica] != nil &&
(cluster.ConnectionPooler[Replica].Deployment != nil || cluster.ConnectionPooler[Replica].Service != nil) {
if cluster.ConnectionPooler[cluster.replicaRole()] != nil &&
(cluster.ConnectionPooler[cluster.replicaRole()].Deployment != nil || cluster.ConnectionPooler[cluster.replicaRole()].Service != nil) {
return fmt.Errorf("Connection pooler replica was not deleted")
}
return nil
@ -323,7 +323,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) {
cluster.Name = "acid-fake-cluster"
cluster.Namespace = "default"
_, err := cluster.createService(Master)
_, err := cluster.createService(cluster.masterRole()) //PROBLEM1
assert.NoError(t, err)
_, err = cluster.createStatefulSet()
assert.NoError(t, err)
@ -334,7 +334,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) {
t.Errorf("%s: Cannot create connection pooler, %s, %+v",
testName, err, reason)
}
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
poolerLabels := cluster.labelsSet(false)
poolerLabels["application"] = "db-connection-pooler"
poolerLabels["connection-pooler"] = cluster.connectionPoolerName(role)
@ -369,7 +369,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) {
t.Errorf("%s: Cannot sync connection pooler, %s", testName, err)
}
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
err = cluster.deleteConnectionPooler(role)
if err != nil {
t.Errorf("%s: Cannot delete connection pooler, %s", testName, err)
@ -424,6 +424,7 @@ func TestConnectionPoolerSync(t *testing.T) {
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
PodRoleLabel: "spilo-role",
PodLeaderLabelValue: "master",
},
},
}, client, pg, logger, eventRecorder)
@ -431,7 +432,7 @@ func TestConnectionPoolerSync(t *testing.T) {
cluster.Name = "acid-fake-cluster"
cluster.Namespace = "default"
_, err := cluster.createService(Master)
_, err := cluster.createService(cluster.masterRole())
assert.NoError(t, err)
_, err = cluster.createStatefulSet()
assert.NoError(t, err)
@ -765,7 +766,7 @@ func TestConnectionPoolerPodSpec(t *testing.T) {
check: testEnvs,
},
}
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
for _, tt := range tests {
podSpec, _ := tt.cluster.generateConnectionPoolerPodTemplate(role)
@ -802,12 +803,12 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
},
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
cluster.masterRole(): {
Deployment: nil,
Service: nil,
LookupFunction: true,
Name: "",
Role: Master,
Role: cluster.masterRole(),
},
}
@ -854,7 +855,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
},
}
for _, tt := range tests {
deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[Master])
deployment, err := tt.cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[cluster.masterRole()])
if err != tt.expected && err.Error() != tt.expected.Error() {
t.Errorf("%s [%s]: Could not generate deployment spec,\n %+v, expected\n %+v",
@ -921,7 +922,7 @@ func testLabels(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole
func testSelector(cluster *Cluster, deployment *appsv1.Deployment) error {
labels := deployment.Spec.Selector.MatchLabels
expected := cluster.connectionPoolerLabels(Master, true).MatchLabels
expected := cluster.connectionPoolerLabels(cluster.masterRole(), true).MatchLabels
if labels["connection-pooler"] != expected["connection-pooler"] {
return fmt.Errorf("Labels are incorrect, got %+v, expected %+v",
@ -1018,20 +1019,20 @@ func TestPoolerTLS(t *testing.T) {
// create pooler resources
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{}
cluster.ConnectionPooler[Master] = &ConnectionPoolerObjects{
cluster.ConnectionPooler[cluster.masterRole()] = &ConnectionPoolerObjects{
Deployment: nil,
Service: nil,
Name: cluster.connectionPoolerName(Master),
Name: cluster.connectionPoolerName(cluster.masterRole()),
ClusterName: clusterName,
Namespace: namespace,
LookupFunction: false,
Role: Master,
Role: cluster.masterRole(),
}
_, err = cluster.syncConnectionPoolerWorker(nil, &pg, Master)
_, err = cluster.syncConnectionPoolerWorker(nil, &pg, cluster.masterRole())
assert.NoError(t, err)
deploy, err := client.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(Master), metav1.GetOptions{})
deploy, err := client.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(cluster.masterRole()), metav1.GetOptions{})
assert.NoError(t, err)
fsGroup := int64(103)
@ -1088,17 +1089,17 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
},
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
cluster.masterRole(): {
Deployment: nil,
Service: nil,
LookupFunction: false,
Role: Master,
Role: cluster.masterRole(),
},
Replica: {
cluster.replicaRole(): {
Deployment: nil,
Service: nil,
LookupFunction: false,
Role: Replica,
Role: cluster.replicaRole(),
},
}
@ -1138,7 +1139,7 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
check: testServiceSelector,
},
}
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
for _, tt := range tests {
service := tt.cluster.generateConnectionPoolerService(tt.cluster.ConnectionPooler[role])

View File

@ -77,7 +77,7 @@ func (c *Cluster) statefulSetName() string {
func (c *Cluster) serviceName(role PostgresRole) string {
name := c.Name
switch role {
case Replica:
case c.replicaRole():
name = fmt.Sprintf("%s-%s", name, "repl")
case Patroni:
name = fmt.Sprintf("%s-%s", name, "config")
@ -1536,7 +1536,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
Spec: appsv1.StatefulSetSpec{
Replicas: &numberOfInstances,
Selector: c.labelsSelector(),
ServiceName: c.serviceName(Master),
ServiceName: c.serviceName(c.masterRole()),
Template: *podTemplate,
VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate},
UpdateStrategy: updateStrategy,
@ -1955,7 +1955,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac
switch role {
case Replica:
case c.replicaRole():
// if the value is explicitly set in a Postgresql manifest, follow this setting
if spec.EnableReplicaLoadBalancer != nil {
@ -1965,7 +1965,7 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac
// otherwise, follow the operator configuration
return c.OpConfig.EnableReplicaLoadBalancer
case Master:
case c.masterRole():
if spec.EnableMasterLoadBalancer != nil {
return *spec.EnableMasterLoadBalancer
@ -1987,7 +1987,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
// no selector for master, see https://github.com/zalando/postgres-operator/issues/340
// if kubernetes_use_configmaps is set master service needs a selector
if role == Replica || c.patroniKubernetesUseConfigMaps() {
if role == c.replicaRole() || c.patroniKubernetesUseConfigMaps() {
serviceSpec.Selector = c.roleLabelsSet(false, role)
}
@ -2054,9 +2054,9 @@ func (c *Cluster) getCustomServiceAnnotations(role PostgresRole, spec *acidv1.Po
maps.Copy(annotations, spec.ServiceAnnotations)
switch role {
case Master:
case c.masterRole():
maps.Copy(annotations, spec.MasterServiceAnnotations)
case Replica:
case c.replicaRole():
maps.Copy(annotations, spec.ReplicaServiceAnnotations)
}
}
@ -2227,7 +2227,7 @@ func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget {
// define label selector and add the master role selector if enabled
labels := c.labelsSet(false)
if pdbMasterLabelSelector == nil || *c.OpConfig.PDBMasterLabelSelector {
labels[c.OpConfig.PodRoleLabel] = string(Master)
labels[c.OpConfig.PodRoleLabel] = string(c.masterRole())
}
return &policyv1.PodDisruptionBudget{

View File

@ -576,67 +576,67 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
}
expectedSpiloWalPathCompat := []ExpectedValue{
{
envIndex: 12,
envIndex: 14,
envVarConstant: "ENABLE_WAL_PATH_COMPAT",
envVarValue: "true",
},
}
expectedValuesS3Bucket := []ExpectedValue{
{
envIndex: 15,
envIndex: 17,
envVarConstant: "WAL_S3_BUCKET",
envVarValue: "global-s3-bucket",
},
{
envIndex: 16,
envIndex: 18,
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
envVarValue: fmt.Sprintf("/%s", dummyUUID),
},
{
envIndex: 17,
envIndex: 19,
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
envVarValue: "",
},
}
expectedValuesGCPCreds := []ExpectedValue{
{
envIndex: 15,
envIndex: 17,
envVarConstant: "WAL_GS_BUCKET",
envVarValue: "global-gs-bucket",
},
{
envIndex: 16,
envIndex: 18,
envVarConstant: "WAL_BUCKET_SCOPE_SUFFIX",
envVarValue: fmt.Sprintf("/%s", dummyUUID),
},
{
envIndex: 17,
envIndex: 19,
envVarConstant: "WAL_BUCKET_SCOPE_PREFIX",
envVarValue: "",
},
{
envIndex: 18,
envIndex: 20,
envVarConstant: "GOOGLE_APPLICATION_CREDENTIALS",
envVarValue: "some-path-to-credentials",
},
}
expectedS3BucketConfigMap := []ExpectedValue{
{
envIndex: 17,
envIndex: 19,
envVarConstant: "wal_s3_bucket",
envVarValue: "global-s3-bucket-configmap",
},
}
expectedCustomS3BucketSpec := []ExpectedValue{
{
envIndex: 15,
envIndex: 17,
envVarConstant: "WAL_S3_BUCKET",
envVarValue: "custom-s3-bucket",
},
}
expectedCustomVariableSecret := []ExpectedValue{
{
envIndex: 16,
envIndex: 18,
envVarConstant: "custom_variable",
envVarValueRef: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
@ -650,72 +650,72 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
}
expectedCustomVariableConfigMap := []ExpectedValue{
{
envIndex: 16,
envIndex: 18,
envVarConstant: "custom_variable",
envVarValue: "configmap-test",
},
}
expectedCustomVariableSpec := []ExpectedValue{
{
envIndex: 15,
envIndex: 17,
envVarConstant: "CUSTOM_VARIABLE",
envVarValue: "spec-env-test",
},
}
expectedCloneEnvSpec := []ExpectedValue{
{
envIndex: 16,
envIndex: 18,
envVarConstant: "CLONE_WALE_S3_PREFIX",
envVarValue: "s3://another-bucket",
},
{
envIndex: 19,
envIndex: 21,
envVarConstant: "CLONE_WAL_BUCKET_SCOPE_PREFIX",
envVarValue: "",
},
{
envIndex: 20,
envIndex: 22,
envVarConstant: "CLONE_AWS_ENDPOINT",
envVarValue: "s3.eu-central-1.amazonaws.com",
},
}
expectedCloneEnvSpecEnv := []ExpectedValue{
{
envIndex: 15,
envIndex: 17,
envVarConstant: "CLONE_WAL_BUCKET_SCOPE_PREFIX",
envVarValue: "test-cluster",
},
{
envIndex: 17,
envIndex: 19,
envVarConstant: "CLONE_WALE_S3_PREFIX",
envVarValue: "s3://another-bucket",
},
{
envIndex: 21,
envIndex: 23,
envVarConstant: "CLONE_AWS_ENDPOINT",
envVarValue: "s3.eu-central-1.amazonaws.com",
},
}
expectedCloneEnvConfigMap := []ExpectedValue{
{
envIndex: 16,
envIndex: 18,
envVarConstant: "CLONE_WAL_S3_BUCKET",
envVarValue: "global-s3-bucket",
},
{
envIndex: 17,
envIndex: 19,
envVarConstant: "CLONE_WAL_BUCKET_SCOPE_SUFFIX",
envVarValue: fmt.Sprintf("/%s", dummyUUID),
},
{
envIndex: 21,
envIndex: 23,
envVarConstant: "clone_aws_endpoint",
envVarValue: "s3.eu-west-1.amazonaws.com",
},
}
expectedCloneEnvSecret := []ExpectedValue{
{
envIndex: 21,
envIndex: 23,
envVarConstant: "clone_aws_access_key_id",
envVarValueRef: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
@ -729,12 +729,12 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
}
expectedStandbyEnvSecret := []ExpectedValue{
{
envIndex: 15,
envIndex: 17,
envVarConstant: "STANDBY_WALE_GS_PREFIX",
envVarValue: "gs://some/path/",
},
{
envIndex: 20,
envIndex: 22,
envVarConstant: "standby_google_application_credentials",
envVarValueRef: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
@ -2389,7 +2389,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
{
scenario: "With multiple instances",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}},
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb"}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
@ -2406,7 +2406,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
{
scenario: "With zero instances",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}},
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb"}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
@ -2423,7 +2423,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
{
scenario: "With PodDisruptionBudget disabled",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}},
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
@ -2440,7 +2440,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
{
scenario: "With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}},
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
@ -2457,7 +2457,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
{
scenario: "With PDBMasterLabelSelector disabled",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}},
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
@ -2474,7 +2474,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
{
scenario: "With OwnerReference enabled",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}},
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", PodLeaderLabelValue: "master", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
@ -2550,6 +2550,7 @@ func TestGenerateService(t *testing.T) {
DefaultMemoryRequest: "0.7Gi",
MaxMemoryRequest: "1.0Gi",
DefaultMemoryLimit: "1.3Gi",
PodLeaderLabelValue: "master",
},
SidecarImages: map[string]string{
"deprecated-global-sidecar": "image:123",
@ -2576,10 +2577,10 @@ func TestGenerateService(t *testing.T) {
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
service := cluster.generateService(Master, &spec)
service := cluster.generateService(cluster.masterRole(), &spec)
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeCluster, service.Spec.ExternalTrafficPolicy)
cluster.OpConfig.ExternalTrafficPolicy = "Local"
service = cluster.generateService(Master, &spec)
service = cluster.generateService(cluster.masterRole(), &spec)
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy)
}
@ -2605,28 +2606,28 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
}{
{
subtest: "new format, load balancer is enabled for replica",
role: Replica,
role: cluster.replicaRole(),
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.True()},
opConfig: config.Config{},
result: true,
},
{
subtest: "new format, load balancer is disabled for replica",
role: Replica,
role: cluster.replicaRole(),
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: util.False()},
opConfig: config.Config{},
result: false,
},
{
subtest: "new format, load balancer isn't specified for replica",
role: Replica,
role: cluster.replicaRole(),
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil},
opConfig: config.Config{EnableReplicaLoadBalancer: true},
result: true,
},
{
subtest: "new format, load balancer isn't specified for replica",
role: Replica,
role: cluster.replicaRole(),
spec: &acidv1.PostgresSpec{EnableReplicaLoadBalancer: nil},
opConfig: config.Config{EnableReplicaLoadBalancer: false},
result: false,
@ -2690,7 +2691,7 @@ func TestEnableLoadBalancers(t *testing.T) {
namespace := "default"
clusterNameLabel := "cluster-name"
roleLabel := "spilo-role"
roles := []PostgresRole{Master, Replica}
roles := []PostgresRole{cluster.masterRole(), cluster.replicaRole()}
sourceRanges := []string{"192.186.1.2/22"}
extTrafficPolicy := "Cluster"

View File

@ -44,7 +44,7 @@ func (c *Cluster) getRolePods(role PostgresRole) ([]v1.Pod, error) {
return nil, fmt.Errorf("could not get list of pods: %v", err)
}
if role == Master && len(pods.Items) > 1 {
if role == c.masterRole() && len(pods.Items) > 1 {
return nil, fmt.Errorf("too many masters")
}
@ -234,7 +234,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
return nil
}
if role := PostgresRole(oldMaster.Labels[c.OpConfig.PodRoleLabel]); role != Master {
if role := PostgresRole(oldMaster.Labels[c.OpConfig.PodRoleLabel]); role != c.masterRole() {
c.logger.Warningf("no action needed: pod %q is not the master (anymore)", podName)
return nil
}
@ -312,7 +312,7 @@ func (c *Cluster) MigrateReplicaPod(podName spec.NamespacedName, fromNodeName st
return nil
}
if role := PostgresRole(replicaPod.Labels[c.OpConfig.PodRoleLabel]); role != Replica {
if role := PostgresRole(replicaPod.Labels[c.OpConfig.PodRoleLabel]); role != c.replicaRole() {
return fmt.Errorf("check failed: pod %q is not a replica", podName)
}
@ -416,7 +416,7 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp
for i, pod := range pods {
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
if role == Master {
if role == c.masterRole() {
masterPod = &pods[i]
continue
}
@ -428,9 +428,9 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp
}
newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel])
if newRole == Replica {
if newRole == c.replicaRole() {
replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta))
} else if newRole == Master {
} else if newRole == c.masterRole() {
newMasterPod = newPod
}
}

View File

@ -132,7 +132,7 @@ func getPodIndex(podName string) (int32, error) {
}
func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error {
masterPod, err := c.getRolePods(Master)
masterPod, err := c.getRolePods(c.masterRole())
if err != nil {
return fmt.Errorf("could not get master pod: %v", err)
}
@ -393,7 +393,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset
result := make([]v1.EndpointSubset, 0)
pods, err := c.getRolePods(role)
if err != nil {
if role == Master {
if role == c.masterRole() {
c.logger.Warningf("could not obtain the address for %s pod: %v", role, err)
} else {
c.logger.Warningf("could not obtain the addresses for %s pods: %v", role, err)
@ -410,7 +410,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset
Addresses: endPointAddresses,
Ports: []v1.EndpointPort{{Name: "postgresql", Port: 5432, Protocol: "TCP"}},
})
} else if role == Master {
} else if role == c.masterRole() {
c.logger.Warningf("master is not running, generated master endpoint does not contain any addresses")
}
@ -682,22 +682,22 @@ func (c *Cluster) deleteLogicalBackupJob() error {
// GetServiceMaster returns cluster's kubernetes master Service
func (c *Cluster) GetServiceMaster() *v1.Service {
return c.Services[Master]
return c.Services[c.masterRole()]
}
// GetServiceReplica returns cluster's kubernetes replica Service
func (c *Cluster) GetServiceReplica() *v1.Service {
return c.Services[Replica]
return c.Services[c.replicaRole()]
}
// GetEndpointMaster returns cluster's kubernetes master Endpoint
func (c *Cluster) GetEndpointMaster() *v1.Endpoints {
return c.Endpoints[Master]
return c.Endpoints[c.masterRole()]
}
// GetEndpointReplica returns cluster's kubernetes replica Endpoint
func (c *Cluster) GetEndpointReplica() *v1.Endpoints {
return c.Endpoints[Replica]
return c.Endpoints[c.replicaRole()]
}
// GetStatefulSet returns cluster's kubernetes StatefulSet

View File

@ -340,7 +340,7 @@ func (c *Cluster) syncPatroniService() error {
}
func (c *Cluster) syncServices() error {
for _, role := range []PostgresRole{Master, Replica} {
for _, role := range []PostgresRole{c.masterRole(), c.replicaRole()} {
c.logger.Debugf("syncing %s service", role)
if !c.patroniKubernetesUseConfigMaps() {
@ -545,7 +545,7 @@ func (c *Cluster) syncStatefulSet() error {
podsToRecreate = append(podsToRecreate, pod)
} else {
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
if role == Master {
if role == c.masterRole() {
continue
}
switchoverCandidates = append(switchoverCandidates, util.NameFromMeta(pod.ObjectMeta))
@ -616,7 +616,7 @@ func (c *Cluster) syncStatefulSet() error {
podsToRecreate = append(podsToRecreate, pod)
} else {
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
if role == Master {
if role == c.masterRole() {
continue
}
switchoverCandidates = append(switchoverCandidates, util.NameFromMeta(pod.ObjectMeta))
@ -726,9 +726,9 @@ func (c *Cluster) restartInstances(pods []v1.Pod, restartWait uint32, restartPri
errors := make([]string, 0)
remainingPods := make([]*v1.Pod, 0)
skipRole := Master
skipRole := c.masterRole()
if restartPrimaryFirst {
skipRole = Replica
skipRole = c.replicaRole()
}
for i, pod := range pods {
@ -1422,7 +1422,7 @@ func (c *Cluster) syncDatabases() error {
if len(createDatabases) > 0 {
// trigger creation of pooler objects in new database in syncConnectionPooler
if c.ConnectionPooler != nil {
for _, role := range [2]PostgresRole{Master, Replica} {
for _, role := range [2]PostgresRole{c.masterRole(), c.replicaRole()} {
c.ConnectionPooler[role].LookupFunction = false
}
}

View File

@ -15,8 +15,6 @@ type PostgresRole string
const (
// spilo roles
Master PostgresRole = "master"
Replica PostgresRole = "replica"
Patroni PostgresRole = "config"
// roles returned by Patroni cluster endpoint

View File

@ -340,7 +340,7 @@ func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopCh chan struct{},
podRole := PostgresRole(podEvent.CurPod.Labels[c.OpConfig.PodRoleLabel])
if role == nil {
if podRole == Master || podRole == Replica {
if podRole == c.masterRole() || podRole == c.replicaRole() {
return podEvent.CurPod, nil
}
} else if *role == podRole {
@ -399,12 +399,12 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
}
masterListOption := metav1.ListOptions{
LabelSelector: labels.Merge(ls, labels.Set{
c.OpConfig.PodRoleLabel: string(Master),
c.OpConfig.PodRoleLabel: string(c.masterRole()),
}).String(),
}
replicaListOption := metav1.ListOptions{
LabelSelector: labels.Merge(ls, labels.Set{
c.OpConfig.PodRoleLabel: string(Replica),
c.OpConfig.PodRoleLabel: string(c.replicaRole()),
}).String(),
}
podsNumber = 1
@ -515,7 +515,7 @@ func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) la
func (c *Cluster) dnsName(role PostgresRole) string {
var dnsString, oldDnsString string
if role == Master {
if role == c.masterRole() {
dnsString = c.masterDNSName(c.Name)
} else {
dnsString = c.replicaDNSName(c.Name)
@ -524,7 +524,7 @@ func (c *Cluster) dnsName(role PostgresRole) string {
// if cluster name starts with teamID we might need to provide backwards compatibility
clusterNameWithoutTeamPrefix, _ := acidv1.ExtractClusterName(c.Name, c.Spec.TeamID)
if clusterNameWithoutTeamPrefix != "" {
if role == Master {
if role == c.masterRole() {
oldDnsString = c.oldMasterDNSName(clusterNameWithoutTeamPrefix)
} else {
oldDnsString = c.oldReplicaDNSName(clusterNameWithoutTeamPrefix)

View File

@ -161,7 +161,7 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[
}
checkPooler := func(annotations map[string]string) error {
for _, role := range []PostgresRole{Master, Replica} {
for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{})
if err != nil {
return err
@ -244,7 +244,7 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[
func createPods(cluster *Cluster) []v1.Pod {
podsList := make([]v1.Pod, 0)
for i, role := range []PostgresRole{Master, Replica} {
for i, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
podsList = append(podsList, v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", clusterName, i),
@ -325,7 +325,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
if err != nil {
return nil, err
}
_, err = cluster.createService(Master)
_, err = cluster.createService(cluster.masterRole())
if err != nil {
return nil, err
}
@ -365,7 +365,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
}
func createPatroniResources(cluster *Cluster) error {
patroniService := cluster.generateService(Replica, &pg.Spec)
patroniService := cluster.generateService(cluster.replicaRole(), &pg.Spec)
patroniService.ObjectMeta.Name = cluster.serviceName(Patroni)
_, err := cluster.KubeClient.Services(namespace).Create(context.TODO(), patroniService, metav1.CreateOptions{})
if err != nil {
@ -479,7 +479,7 @@ func annotateResources(cluster *Cluster) error {
}
}
for _, role := range []PostgresRole{Master, Replica} {
for _, role := range []PostgresRole{cluster.masterRole(), cluster.replicaRole()} {
deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{})
if err != nil {
return err

View File

@ -108,7 +108,7 @@ func (c *Controller) attemptToMoveMasterPodsOffNode(node *v1.Node) error {
podName := util.NameFromMeta(pod.ObjectMeta)
role, ok := pod.Labels[c.opConfig.PodRoleLabel]
if !ok || cluster.PostgresRole(role) != cluster.Master {
if !ok || cluster.PostgresRole(role) != cluster.PostgresRole(c.opConfig.PodLeaderLabelValue) {
if !ok {
c.logger.Warningf("could not move pod %q: pod has no role", podName)
}