This commit is contained in:
Lucas Nikola Pape 2026-03-02 12:21:36 +02:00 committed by GitHub
commit 15cc3915c4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 725 additions and 34 deletions

View File

@ -454,6 +454,18 @@ spec:
enable_replica_pooler_load_balancer:
type: boolean
default: false
enable_master_node_port:
type: boolean
default: false
enable_master_pooler_node_port:
type: boolean
default: false
enable_replica_node_port:
type: boolean
default: false
enable_replica_pooler_node_port:
type: boolean
default: false
external_traffic_policy:
type: string
enum:

View File

@ -196,6 +196,26 @@ spec:
type: boolean
enableReplicaPoolerLoadBalancer:
type: boolean
enableMasterNodePort:
type: boolean
masterNodePort:
type: integer
minimum: 0
enableMasterPoolerNodePort:
type: boolean
masterPoolerNodePort:
type: integer
minimum: 0
enableReplicaNodePort:
type: boolean
replicaNodePort:
type: integer
minimum: 0
enableReplicaPoolerNodePort:
type: boolean
replicaPoolerNodePort:
type: integer
minimum: 0
enableShmVolume:
type: boolean
env:

View File

@ -926,6 +926,41 @@ For the `external-dns.alpha.kubernetes.io/hostname` annotation the `-pooler`
suffix will be appended to the cluster name used in the template which is
defined in `master|replica_dns_name_format`.
## Node Ports
Alternatively to Load Balancers Node Ports can be used. Kubernetes services with type
`NodePort` redirect traffic from a specified port on your kubernetes nodes to your service.
To expose your services to an external network with NodePorts you can set `enableMasterNodePort` and/or `enableReplicaNodePort` to `true`
in your cluster manifest. In the case any of these variables are omitted from the manifest, the operator configuration settings `enable_master_node_port` and `enable_replica_node_port` apply.
Note that the operator settings affect all Postgresql services running in all namespaces watched
by the operator.
**Enabling a NodePort configuration will override the corresponding LoadBalancer configuration.**
There are multiple options to specify service annotations that will be merged
with each other and override in the following order (where latter take
precedence):
1. Globally configured `custom_service_annotations`
2. `serviceAnnotations` specified in the cluster manifest
3. `masterServiceAnnotations` and `replicaServiceAnnotations` specified in the cluster manifest
Load-Balancer specific annotations are not applied.
Node port services can also be configured for the [connection pooler](user.md#connection-pooler) pods
with the manifest flags `enableMasterPoolerNodePort` and/or `enableReplicaPoolerNodePort` or in the operator configuration with `enable_master_pooler_node_port`
and/or `enable_replica_pooler_node_port`.
To configure which ports Kubernetes should use for your NodePort service you can configure ports in your cluster manifest
for each type:
- masterNodePort
- masterPoolerNodePort
- replicaNodePort
- replicaPoolerNodePort
When not defined or set to 0 kubernetes will choose a port for you from [your kubernetes cluster's configured range](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport).
## Running periodic 'autorepair' scans of K8s objects
The Postgres Operator periodically scans all K8s objects belonging to each

View File

@ -114,6 +114,48 @@ These parameters are grouped directly under the `spec` key in the manifest.
this parameter. Optional, when empty the load balancer service becomes
inaccessible from outside of the Kubernetes cluster.
* **enableMasterNodePort**
boolean flag to override the operator defaults (set by the
`enable_master_node_port` parameter) to define whether to enable the node
port pointing to the Postgres primary. Optional. Overrides `enableMasterLoadBalancer`.
* **enableMasterPoolerNodePort**
boolean flag to override the operator defaults (set by the
`enable_master_pooler_node_port` parameter) to define whether to enable
the node port for master pooler pods pointing to the Postgres primary.
Optional. Overrides `enableMasterPoolerLoadBalancer`.
* **enableReplicaNodePort**
boolean flag to override the operator defaults (set by the
`enable_replica_node_port` parameter) to define whether to enable the node
port pointing to the Postgres standby instances. Optional. Overrides `enableReplicaLoadBalancer`.
* **enableReplicaPoolerNodePort**
boolean flag to override the operator defaults (set by the
`enable_replica_pooler_node_port` parameter) to define whether to enable
the node port for replica pooler pods pointing to the Postgres standby
instances. Optional. Overrides `enableReplicaPoolerLoadBalancer`.
* **masterNodePort**
integer flag to specify a port number for the node port to the Postgres primary.
Only used when `enableMasterNodePort` or `enable_master_node_port` are enabled.
Optional. Kubernetes will provide a port number for you if not specified.
* **masterPoolerNodePort**
integer flag to specify a port number for the node port for the master pooler pods pointing to the Postgres primary.
Only used when `enableMasterPoolerNodePort` or `enable_master_pooler_node_port` are enabled.
Optional. Kubernetes will provide a port number for you if not specified.
* **replicaNodePort**
integer flag to specify a port number for the node port pointing to the Postgres standby instances.
Only used when `enableReplicaNodePort` or `enable_replica_node_port` are enabled.
Optional. Kubernetes will provide a port number for you if not specified.
* **replicaPoolerNodePort**
integer flag to specify a port number for the node port for the replica pooler pods pointing to the Postgres standby instances
Only used when `enableReplicaPoolerNodePort` or `enable_replica_pooler_node_port` are enabled.
Optional. Kubernetes will provide a port number for you if not specified.
* **maintenanceWindows**
a list which defines specific time frames when certain maintenance operations
such as automatic major upgrades or master pod migration are allowed to happen.

View File

@ -618,6 +618,18 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
"enable_replica_pooler_load_balancer": {
Type: "boolean",
},
"enable_master_node_port": {
Type: "boolean",
},
"enable_master_pooler_node_port": {
Type: "boolean",
},
"enable_replica_node_port": {
Type: "boolean",
},
"enable_replica_pooler_node_port": {
Type: "boolean",
},
"external_traffic_policy": {
Type: "string",
Enum: []apiextv1.JSON{

View File

@ -136,17 +136,24 @@ type OperatorTimeouts struct {
// LoadBalancerConfiguration defines the LB configuration
type LoadBalancerConfiguration struct {
DbHostedZone string `json:"db_hosted_zone,omitempty"`
EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"`
EnableMasterPoolerLoadBalancer bool `json:"enable_master_pooler_load_balancer,omitempty"`
EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"`
EnableReplicaPoolerLoadBalancer bool `json:"enable_replica_pooler_load_balancer,omitempty"`
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
MasterLegacyDNSNameFormat config.StringTemplate `json:"master_legacy_dns_name_format,omitempty"`
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
ReplicaLegacyDNSNameFormat config.StringTemplate `json:"replica_legacy_dns_name_format,omitempty"`
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
DbHostedZone string `json:"db_hosted_zone,omitempty"`
EnableMasterLoadBalancer bool `json:"enable_master_load_balancer,omitempty"`
EnableMasterPoolerLoadBalancer bool `json:"enable_master_pooler_load_balancer,omitempty"`
EnableReplicaLoadBalancer bool `json:"enable_replica_load_balancer,omitempty"`
EnableReplicaPoolerLoadBalancer bool `json:"enable_replica_pooler_load_balancer,omitempty"`
// kept in LoadBalancerConfiguration because all the other parameters apply here too
EnableMasterNodePort bool `json:"enable_master_node_port,omitempty"`
EnableMasterPoolerNodePort bool `json:"enable_master_pooler_node_port,omitempty"`
EnableReplicaNodePort bool `json:"enable_replica_node_port,omitempty"`
EnableReplicaPoolerNodePort bool `json:"enable_replica_pooler_node_port,omitempty"`
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
MasterLegacyDNSNameFormat config.StringTemplate `json:"master_legacy_dns_name_format,omitempty"`
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
ReplicaLegacyDNSNameFormat config.StringTemplate `json:"replica_legacy_dns_name_format,omitempty"`
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
}
// AWSGCPConfiguration defines the configuration for AWS

View File

@ -63,6 +63,18 @@ type PostgresSpec struct {
EnableReplicaLoadBalancer *bool `json:"enableReplicaLoadBalancer,omitempty"`
EnableReplicaPoolerLoadBalancer *bool `json:"enableReplicaPoolerLoadBalancer,omitempty"`
// vars to enable and configure nodeport services
// set ports to 0 or nil to let kubernetes decide which port to use
// overrides loadbalancer configuration
EnableMasterNodePort *bool `json:"enableMasterNodePort,omitempty"`
MasterNodePort *int32 `json:"masterNodePort,omitempty"`
EnableMasterPoolerNodePort *bool `json:"enableMasterPoolerNodePort,omitempty"`
MasterPoolerNodePort *int32 `json:"masterPoolerNodePort,omitempty"`
EnableReplicaNodePort *bool `json:"enableReplicaNodePort,omitempty"`
ReplicaNodePort *int32 `json:"replicaNodePort,omitempty"`
EnableReplicaPoolerNodePort *bool `json:"enableReplicaPoolerNodePort,omitempty"`
ReplicaPoolerNodePort *int32 `json:"replicaPoolerNodePort,omitempty"`
// deprecated load balancer settings maintained for backward compatibility
// see "Load balancers" operator docs
UseLoadBalancer *bool `json:"useLoadBalancer,omitempty"`

View File

@ -715,6 +715,46 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = new(bool)
**out = **in
}
if in.EnableMasterNodePort != nil {
in, out := &in.EnableMasterNodePort, &out.EnableMasterNodePort
*out = new(bool)
**out = **in
}
if in.MasterNodePort != nil {
in, out := &in.MasterNodePort, &out.MasterNodePort
*out = new(int32)
**out = **in
}
if in.EnableMasterPoolerNodePort != nil {
in, out := &in.EnableMasterPoolerNodePort, &out.EnableMasterPoolerNodePort
*out = new(bool)
**out = **in
}
if in.MasterPoolerNodePort != nil {
in, out := &in.MasterPoolerNodePort, &out.MasterPoolerNodePort
*out = new(int32)
**out = **in
}
if in.EnableReplicaNodePort != nil {
in, out := &in.EnableReplicaNodePort, &out.EnableReplicaNodePort
*out = new(bool)
**out = **in
}
if in.ReplicaNodePort != nil {
in, out := &in.ReplicaNodePort, &out.ReplicaNodePort
*out = new(int32)
**out = **in
}
if in.EnableReplicaPoolerNodePort != nil {
in, out := &in.EnableReplicaPoolerNodePort, &out.EnableReplicaPoolerNodePort
*out = new(bool)
**out = **in
}
if in.ReplicaPoolerNodePort != nil {
in, out := &in.ReplicaPoolerNodePort, &out.ReplicaPoolerNodePort
*out = new(int32)
**out = **in
}
if in.UseLoadBalancer != nil {
in, out := &in.UseLoadBalancer, &out.UseLoadBalancer
*out = new(bool)

View File

@ -854,6 +854,14 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) {
return false, "new service's ExternalTrafficPolicy does not match the current one"
}
if len(old.Spec.Ports) > 0 && len(new.Spec.Ports) > 0 {
// we need to check whether the new port is not zero (=user-defined)
// and only overwrite if it is
if new.Spec.Ports[0].NodePort != 0 && old.Spec.Ports[0].NodePort != new.Spec.Ports[0].NodePort {
return false, "new service's NodePort does not match the current one"
}
}
return true, ""
}

View File

@ -1346,7 +1346,8 @@ func newService(
svcType v1.ServiceType,
sourceRanges []string,
selector map[string]string,
policy v1.ServiceExternalTrafficPolicyType) *v1.Service {
policy v1.ServiceExternalTrafficPolicyType,
nodePort *int32) *v1.Service {
svc := &v1.Service{
Spec: v1.ServiceSpec{
Selector: selector,
@ -1356,6 +1357,16 @@ func newService(
},
}
svc.Annotations = annotations
if nodePort != nil {
svc.Spec.Ports = []v1.ServicePort{
{
Name: "port",
NodePort: *nodePort,
},
}
}
return svc
}
@ -1383,6 +1394,7 @@ func TestCompareServices(t *testing.T) {
[]string{"128.141.0.0/16", "137.138.0.0/16"},
nil,
defaultPolicy,
nil,
)
ownerRef := metav1.OwnerReference{
@ -1394,6 +1406,9 @@ func TestCompareServices(t *testing.T) {
serviceWithOwnerReference.ObjectMeta.OwnerReferences = append(serviceWithOwnerReference.ObjectMeta.OwnerReferences, ownerRef)
portZero := int32(0)
portNotZero := int32(1337)
tests := []struct {
about string
current *v1.Service
@ -1410,7 +1425,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"},
nil, defaultPolicy),
nil, defaultPolicy, nil),
new: newService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
@ -1418,7 +1433,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"},
nil, defaultPolicy),
nil, defaultPolicy, nil),
match: true,
},
{
@ -1430,7 +1445,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"},
nil, defaultPolicy),
nil, defaultPolicy, nil),
new: newService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
@ -1438,7 +1453,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"},
nil, defaultPolicy),
nil, defaultPolicy, nil),
match: false,
reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`,
},
@ -1451,7 +1466,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"},
nil, defaultPolicy),
nil, defaultPolicy, nil),
new: newService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
@ -1459,7 +1474,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeLoadBalancer,
[]string{"185.249.56.0/22"},
nil, defaultPolicy),
nil, defaultPolicy, nil),
match: false,
reason: `new service's LoadBalancerSourceRange does not match the current one`,
},
@ -1472,7 +1487,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"},
nil, defaultPolicy),
nil, defaultPolicy, nil),
new: newService(
map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
@ -1480,7 +1495,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeLoadBalancer,
[]string{},
nil, defaultPolicy),
nil, defaultPolicy, nil),
match: false,
reason: `new service's LoadBalancerSourceRange does not match the current one`,
},
@ -1493,7 +1508,7 @@ func TestCompareServices(t *testing.T) {
},
v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"},
nil, defaultPolicy),
nil, defaultPolicy, nil),
new: serviceWithOwnerReference,
match: false,
},
@ -1503,12 +1518,12 @@ func TestCompareServices(t *testing.T) {
map[string]string{},
v1.ServiceTypeClusterIP,
[]string{},
nil, defaultPolicy),
nil, defaultPolicy, nil),
new: newService(
map[string]string{},
v1.ServiceTypeClusterIP,
[]string{},
map[string]string{"cluster-name": "clstr", "spilo-role": "master"}, defaultPolicy),
map[string]string{"cluster-name": "clstr", "spilo-role": "master"}, defaultPolicy, nil),
match: false,
},
{
@ -1517,14 +1532,42 @@ func TestCompareServices(t *testing.T) {
map[string]string{},
v1.ServiceTypeClusterIP,
[]string{},
nil, defaultPolicy),
nil, defaultPolicy, nil),
new: newService(
map[string]string{},
v1.ServiceTypeClusterIP,
[]string{},
nil, v1.ServiceExternalTrafficPolicyTypeLocal),
nil, v1.ServiceExternalTrafficPolicyTypeLocal, nil),
match: false,
},
{
about: "services differ on node port",
current: newService(
map[string]string{},
v1.ServiceTypeNodePort,
[]string{},
nil, defaultPolicy, &portZero),
new: newService(
map[string]string{},
v1.ServiceTypeNodePort,
[]string{},
nil, defaultPolicy, &portNotZero),
match: false,
},
{
about: "services do not differ on node port when requesting 0",
current: newService(
map[string]string{},
v1.ServiceTypeNodePort,
[]string{},
nil, defaultPolicy, &portNotZero),
new: newService(
map[string]string{},
v1.ServiceTypeNodePort,
[]string{},
nil, defaultPolicy, &portZero),
match: true,
},
}
for _, tt := range tests {

View File

@ -508,6 +508,10 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
c.configureLoadBalanceService(&serviceSpec, spec.AllowedSourceRanges)
}
if ok, port := c.shouldCreateNodePortForPoolerService(poolerRole, spec); ok {
c.configureNodePortService(&serviceSpec, port)
}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: connectionPooler.Name,
@ -532,7 +536,10 @@ func (c *Cluster) generatePoolerServiceAnnotations(role PostgresRole, spec *acid
var dnsString string
annotations := c.getCustomServiceAnnotations(role, spec)
if c.shouldCreateLoadBalancerForPoolerService(role, spec) {
// we do not want the load balancer specific annotations on a NodePort service
nodePort, _ := c.shouldCreateNodePortForPoolerService(role, spec)
if c.shouldCreateLoadBalancerForPoolerService(role, spec) && !nodePort {
// set ELB Timeout annotation with default value
if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok {
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
@ -577,6 +584,37 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp
}
}
func (c *Cluster) shouldCreateNodePortForPoolerService(role PostgresRole, spec *acidv1.PostgresSpec) (bool, int32) {
switch role {
case Replica:
// if the value is explicitly set in a Postgresql manifest, follow this setting
if spec.EnableReplicaPoolerNodePort != nil {
port := int32(0)
if spec.ReplicaPoolerNodePort != nil {
port = *spec.ReplicaPoolerNodePort
}
return *spec.EnableReplicaPoolerNodePort, port
}
// otherwise, follow the operator configuration
return c.OpConfig.EnableReplicaPoolerNodePort, 0
case Master:
if spec.EnableMasterPoolerNodePort != nil {
port := int32(0)
if spec.MasterPoolerNodePort != nil {
port = *spec.MasterPoolerNodePort
}
return *spec.EnableMasterPoolerNodePort, port
}
return c.OpConfig.EnableMasterPoolerNodePort, 0
default:
panic(fmt.Sprintf("Unknown role %v", role))
}
}
func (c *Cluster) listPoolerPods(listOptions metav1.ListOptions) ([]v1.Pod, error) {
pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions)
if err != nil {

View File

@ -1149,3 +1149,181 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
}
}
}
func TestConnectionPoolerServiceType(t *testing.T) {
testName := "Test connection pooler service type selection"
cluster := New(
Config{
OpConfig: config.Config{
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
},
Resources: config.Resources{
EnableOwnerReferences: util.True(),
},
},
},
k8sutil.KubernetesClient{},
acidv1.Postgresql{},
logger,
eventRecorder,
)
cluster.Statefulset = &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test-sts",
},
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
Deployment: nil,
Service: nil,
LookupFunction: false,
Role: Master,
},
Replica: {
Deployment: nil,
Service: nil,
LookupFunction: false,
Role: Replica,
},
}
tests := []struct {
subTest string
spec *acidv1.PostgresSpec
cluster *Cluster
expectedType map[PostgresRole]v1.ServiceType
}{
{
subTest: "default configuration -> ClusterIP for both",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
cluster: cluster,
expectedType: map[PostgresRole]v1.ServiceType{
Master: v1.ServiceTypeClusterIP,
Replica: v1.ServiceTypeClusterIP,
},
},
{
subTest: "LoadBalancer for both roles",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableMasterPoolerLoadBalancer: boolToPointer(true),
EnableReplicaPoolerLoadBalancer: boolToPointer(true),
},
cluster: cluster,
expectedType: map[PostgresRole]v1.ServiceType{
Master: v1.ServiceTypeLoadBalancer,
Replica: v1.ServiceTypeLoadBalancer,
},
},
{
subTest: "LoadBalancer for master",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableMasterPoolerLoadBalancer: boolToPointer(true),
},
cluster: cluster,
expectedType: map[PostgresRole]v1.ServiceType{
Master: v1.ServiceTypeLoadBalancer,
Replica: v1.ServiceTypeClusterIP,
},
},
{
subTest: "LoadBalancer for replica",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaPoolerLoadBalancer: boolToPointer(true),
},
cluster: cluster,
expectedType: map[PostgresRole]v1.ServiceType{
Master: v1.ServiceTypeClusterIP,
Replica: v1.ServiceTypeLoadBalancer,
},
},
{
subTest: "NodePort for both roles",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableMasterPoolerNodePort: boolToPointer(true),
EnableReplicaPoolerNodePort: boolToPointer(true),
},
cluster: cluster,
expectedType: map[PostgresRole]v1.ServiceType{
Master: v1.ServiceTypeNodePort,
Replica: v1.ServiceTypeNodePort,
},
},
{
subTest: "NodePort for master",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableMasterPoolerNodePort: boolToPointer(true),
},
cluster: cluster,
expectedType: map[PostgresRole]v1.ServiceType{
Master: v1.ServiceTypeNodePort,
Replica: v1.ServiceTypeClusterIP,
},
},
{
subTest: "NodePort for replica",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableReplicaPoolerNodePort: boolToPointer(true),
},
cluster: cluster,
expectedType: map[PostgresRole]v1.ServiceType{
Master: v1.ServiceTypeClusterIP,
Replica: v1.ServiceTypeNodePort,
},
},
{
subTest: "NodePort overrides LoadBalancer for both roles",
spec: &acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
EnableMasterPoolerLoadBalancer: boolToPointer(true),
EnableReplicaPoolerLoadBalancer: boolToPointer(true),
EnableMasterPoolerNodePort: boolToPointer(true),
EnableReplicaPoolerNodePort: boolToPointer(true),
},
cluster: cluster,
expectedType: map[PostgresRole]v1.ServiceType{
Master: v1.ServiceTypeNodePort,
Replica: v1.ServiceTypeNodePort,
},
},
}
roles := []PostgresRole{Master, Replica}
for _, tt := range tests {
tt.cluster.Spec = *tt.spec
for _, role := range roles {
svc := tt.cluster.generateConnectionPoolerService(tt.cluster.ConnectionPooler[role])
expected, ok := tt.expectedType[role]
if !ok {
t.Fatalf("%s [%s]: missing expectedType for role %v", testName, tt.subTest, role)
}
if svc.Spec.Type != expected {
t.Errorf("%s [%s] role=%s: service Type is incorrect, got %s, expected %s",
testName, tt.subTest, role, svc.Spec.Type, expected)
}
}
}
}

View File

@ -1996,6 +1996,37 @@ func (c *Cluster) shouldCreateLoadBalancerForService(role PostgresRole, spec *ac
}
func (c *Cluster) shouldCreateNodePortForService(role PostgresRole, spec *acidv1.PostgresSpec) (bool, int32) {
switch role {
case Replica:
// if the value is explicitly set in a Postgresql manifest, follow this setting
if spec.EnableReplicaNodePort != nil {
port := int32(0)
if spec.ReplicaNodePort != nil {
port = *spec.ReplicaNodePort
}
return *spec.EnableReplicaNodePort, port
}
// otherwise, follow the operator configuration
return c.OpConfig.EnableReplicaNodePort, 0
case Master:
if spec.EnableMasterNodePort != nil {
port := int32(0)
if spec.MasterNodePort != nil {
port = *spec.MasterNodePort
}
return *spec.EnableMasterNodePort, port
}
return c.OpConfig.EnableMasterNodePort, 0
default:
panic(fmt.Sprintf("Unknown role %v", role))
}
}
func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) *v1.Service {
serviceSpec := v1.ServiceSpec{
Ports: []v1.ServicePort{{Name: "postgresql", Port: pgPort, TargetPort: intstr.IntOrString{IntVal: pgPort}}},
@ -2012,6 +2043,10 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
c.configureLoadBalanceService(&serviceSpec, spec.AllowedSourceRanges)
}
if ok, port := c.shouldCreateNodePortForService(role, spec); ok {
c.configureNodePortService(&serviceSpec, port)
}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: c.serviceName(role),
@ -2041,10 +2076,22 @@ func (c *Cluster) configureLoadBalanceService(serviceSpec *v1.ServiceSpec, sourc
serviceSpec.Type = v1.ServiceTypeLoadBalancer
}
func (c *Cluster) configureNodePortService(serviceSpec *v1.ServiceSpec, port int32) {
serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy)
serviceSpec.Type = v1.ServiceTypeNodePort
if port != 0 && len(serviceSpec.Ports) > 0 {
serviceSpec.Ports[0].NodePort = port
}
}
func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string {
annotations := c.getCustomServiceAnnotations(role, spec)
if c.shouldCreateLoadBalancerForService(role, spec) {
// we do not want the load balancer specific annotations on a NodePort service
nodePort, _ := c.shouldCreateNodePortForService(role, spec)
if c.shouldCreateLoadBalancerForService(role, spec) && !nodePort {
dnsName := c.dnsName(role)
// Just set ELB Timeout annotation with default value, if it does not

View File

@ -2970,32 +2970,32 @@ func newLBFakeClient() (k8sutil.KubernetesClient, *fake.Clientset) {
}, clientSet
}
func getServices(serviceType v1.ServiceType, sourceRanges []string, extTrafficPolicy, clusterName string) []v1.ServiceSpec {
func getServices(serviceType v1.ServiceType, sourceRanges []string, extTrafficPolicy, clusterName string, nodePort int32) []v1.ServiceSpec {
return []v1.ServiceSpec{
{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}, NodePort: nodePort}},
Type: serviceType,
},
{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: clusterName + "-pooler", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Ports: []v1.ServicePort{{Name: clusterName + "-pooler", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}, NodePort: nodePort}},
Selector: map[string]string{"connection-pooler": clusterName + "-pooler"},
Type: serviceType,
},
{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}, NodePort: nodePort}},
Selector: map[string]string{"spilo-role": "replica", "application": "spilo", "cluster-name": clusterName},
Type: serviceType,
},
{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: clusterName + "-pooler-repl", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Ports: []v1.ServicePort{{Name: clusterName + "-pooler-repl", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}, NodePort: nodePort}},
Selector: map[string]string{"connection-pooler": clusterName + "-pooler-repl"},
Type: serviceType,
},
@ -3063,7 +3063,7 @@ func TestEnableLoadBalancers(t *testing.T) {
},
},
},
expectedServices: getServices(v1.ServiceTypeClusterIP, nil, "", clusterName),
expectedServices: getServices(v1.ServiceTypeClusterIP, nil, "", clusterName, 0),
},
{
subTest: "LBs enabled in manifest, disabled in config",
@ -3110,7 +3110,7 @@ func TestEnableLoadBalancers(t *testing.T) {
},
},
},
expectedServices: getServices(v1.ServiceTypeLoadBalancer, sourceRanges, extTrafficPolicy, clusterName),
expectedServices: getServices(v1.ServiceTypeLoadBalancer, sourceRanges, extTrafficPolicy, clusterName, 0),
},
}
@ -3142,6 +3142,195 @@ func TestEnableLoadBalancers(t *testing.T) {
}
}
func TestEnableNodePorts(t *testing.T) {
clusterName := "acid-test-cluster"
namespace := "default"
clusterNameLabel := "cluster-name"
roleLabel := "spilo-role"
roles := []PostgresRole{Master, Replica}
extTrafficPolicy := "Cluster"
port := int32(1337)
tests := []struct {
subTest string
config config.Config
pgSpec acidv1.Postgresql
expectedServices []v1.ServiceSpec
}{
{
subTest: "NodePorts enabled in config, disabled in manifest",
config: config.Config{
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
NumberOfInstances: k8sutil.Int32ToPointer(1),
},
EnableMasterNodePort: true,
EnableMasterPoolerNodePort: true,
EnableReplicaNodePort: true,
EnableReplicaPoolerNodePort: true,
ExternalTrafficPolicy: extTrafficPolicy,
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: clusterNameLabel,
PodRoleLabel: roleLabel,
},
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: util.True(),
EnableReplicaConnectionPooler: util.True(),
EnableMasterNodePort: util.False(),
EnableMasterPoolerNodePort: util.False(),
EnableReplicaNodePort: util.False(),
EnableReplicaPoolerNodePort: util.False(),
NumberOfInstances: 1,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedServices: getServices(v1.ServiceTypeClusterIP, nil, "", clusterName, 0),
},
{
subTest: "NodePorts configured in manifest, disabled in config",
config: config.Config{
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
NumberOfInstances: k8sutil.Int32ToPointer(1),
},
EnableMasterNodePort: false,
EnableMasterPoolerNodePort: false,
EnableReplicaNodePort: false,
EnableReplicaPoolerNodePort: false,
ExternalTrafficPolicy: extTrafficPolicy,
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: clusterNameLabel,
PodRoleLabel: roleLabel,
},
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: util.True(),
EnableReplicaConnectionPooler: util.True(),
EnableMasterNodePort: util.True(),
EnableMasterPoolerNodePort: util.True(),
EnableReplicaNodePort: util.True(),
EnableReplicaPoolerNodePort: util.True(),
NumberOfInstances: 1,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedServices: getServices(v1.ServiceTypeNodePort, nil, extTrafficPolicy, clusterName, 0),
},
{
subTest: "NodePorts configured in manifest, disabled in config, custom port specified",
config: config.Config{
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
NumberOfInstances: k8sutil.Int32ToPointer(1),
},
EnableMasterNodePort: false,
EnableMasterPoolerNodePort: false,
EnableReplicaNodePort: false,
EnableReplicaPoolerNodePort: false,
ExternalTrafficPolicy: extTrafficPolicy,
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: clusterNameLabel,
PodRoleLabel: roleLabel,
},
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
EnableConnectionPooler: util.True(),
EnableReplicaConnectionPooler: util.True(),
EnableMasterNodePort: util.True(),
MasterNodePort: &port,
EnableMasterPoolerNodePort: util.True(),
MasterPoolerNodePort: &port,
EnableReplicaNodePort: util.True(),
ReplicaNodePort: &port,
EnableReplicaPoolerNodePort: util.True(),
ReplicaPoolerNodePort: &port,
NumberOfInstances: 1,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedServices: getServices(v1.ServiceTypeNodePort, nil, extTrafficPolicy, clusterName, port),
},
}
for _, tt := range tests {
client, _ := newLBFakeClient()
var cluster = New(
Config{
OpConfig: tt.config,
}, client, tt.pgSpec, logger, eventRecorder)
cluster.Name = clusterName
cluster.Namespace = namespace
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{}
generatedServices := make([]v1.ServiceSpec, 0)
for _, role := range roles {
cluster.syncService(role)
cluster.ConnectionPooler[role] = &ConnectionPoolerObjects{
Name: cluster.connectionPoolerName(role),
ClusterName: cluster.Name,
Namespace: cluster.Namespace,
Role: role,
}
cluster.syncConnectionPoolerWorker(&tt.pgSpec, &tt.pgSpec, role)
generatedServices = append(generatedServices, cluster.Services[role].Spec)
generatedServices = append(generatedServices, cluster.ConnectionPooler[role].Service.Spec)
}
if !reflect.DeepEqual(tt.expectedServices, generatedServices) {
t.Errorf("%s %s: expected %#v but got %#v", t.Name(), tt.subTest, tt.expectedServices, generatedServices)
}
}
}
func TestGenerateResourceRequirements(t *testing.T) {
client, _ := newFakeK8sTestClient()
clusterName := "acid-test-cluster"

View File

@ -169,6 +169,10 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.EnableMasterPoolerLoadBalancer = fromCRD.LoadBalancer.EnableMasterPoolerLoadBalancer
result.EnableReplicaLoadBalancer = fromCRD.LoadBalancer.EnableReplicaLoadBalancer
result.EnableReplicaPoolerLoadBalancer = fromCRD.LoadBalancer.EnableReplicaPoolerLoadBalancer
result.EnableMasterNodePort = fromCRD.LoadBalancer.EnableMasterNodePort
result.EnableMasterPoolerNodePort = fromCRD.LoadBalancer.EnableMasterPoolerNodePort
result.EnableReplicaNodePort = fromCRD.LoadBalancer.EnableReplicaNodePort
result.EnableReplicaPoolerNodePort = fromCRD.LoadBalancer.EnableReplicaPoolerNodePort
result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
result.MasterLegacyDNSNameFormat = fromCRD.LoadBalancer.MasterLegacyDNSNameFormat

View File

@ -211,6 +211,10 @@ type Config struct {
EnableMasterPoolerLoadBalancer bool `name:"enable_master_pooler_load_balancer" default:"false"`
EnableReplicaLoadBalancer bool `name:"enable_replica_load_balancer" default:"false"`
EnableReplicaPoolerLoadBalancer bool `name:"enable_replica_pooler_load_balancer" default:"false"`
EnableMasterNodePort bool `name:"enable_master_node_port" default:"false"`
EnableMasterPoolerNodePort bool `name:"enable_master_pooler_node_port" default:"false"`
EnableReplicaNodePort bool `name:"enable_replica_node_port" default:"false"`
EnableReplicaPoolerNodePort bool `name:"enable_replica_pooler_node_port" default:"false"`
CustomServiceAnnotations map[string]string `name:"custom_service_annotations"`
CustomPodAnnotations map[string]string `name:"custom_pod_annotations"`
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`