fix unit test and improve stability in e2e test (#1819)
* fix unit test and improve stability in e2e test * fix resource handling
This commit is contained in:
parent
f3b83c0b05
commit
a020708ef1
|
|
@ -208,13 +208,12 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
k8s.update_config(patch_capabilities)
|
k8s.update_config(patch_capabilities)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
|
||||||
"Operator does not get in sync")
|
|
||||||
|
|
||||||
# changed security context of postgres container should trigger a rolling update
|
# changed security context of postgres container should trigger a rolling update
|
||||||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
|
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
self.eventuallyEqual(lambda: k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
|
self.eventuallyEqual(lambda: k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
|
||||||
2, "Container capabilities not updated")
|
2, "Container capabilities not updated")
|
||||||
|
|
||||||
|
|
@ -240,8 +239,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
k8s.update_config(enable_postgres_team_crd)
|
k8s.update_config(enable_postgres_team_crd)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
|
||||||
"Operator does not get in sync")
|
|
||||||
|
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
'acid.zalan.do', 'v1', 'default',
|
'acid.zalan.do', 'v1', 'default',
|
||||||
|
|
@ -1002,12 +999,12 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||||
|
"Operator does not get in sync")
|
||||||
|
|
||||||
# wait for switched over
|
# wait for switched over
|
||||||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||||
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running")
|
|
||||||
|
|
||||||
def verify_pod_limits():
|
def verify_pod_limits():
|
||||||
pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items
|
pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items
|
||||||
|
|
@ -1109,7 +1106,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
plural="postgresqls",
|
plural="postgresqls",
|
||||||
name="acid-minimal-cluster",
|
name="acid-minimal-cluster",
|
||||||
body=patch_node_affinity_config)
|
body=patch_node_affinity_config)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
|
||||||
|
"Operator does not get in sync")
|
||||||
|
|
||||||
# node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement
|
# node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement
|
||||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||||
|
|
|
||||||
|
|
@ -683,6 +683,10 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if spec.Resources == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// setting limits too low can cause unnecessary evictions / OOM kills
|
// setting limits too low can cause unnecessary evictions / OOM kills
|
||||||
minCPULimit := c.OpConfig.MinCPULimit
|
minCPULimit := c.OpConfig.MinCPULimit
|
||||||
minMemoryLimit := c.OpConfig.MinMemoryLimit
|
minMemoryLimit := c.OpConfig.MinMemoryLimit
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ import (
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
|
|
@ -167,8 +168,17 @@ func TestInitAdditionalOwnerRoles(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
cl.initAdditionalOwnerRoles()
|
cl.initAdditionalOwnerRoles()
|
||||||
if !reflect.DeepEqual(cl.pgUsers, expectedUsers) {
|
|
||||||
t.Errorf("%s expected: %#v, got %#v", testName, expectedUsers, cl.pgUsers)
|
for _, additionalOwnerRole := range cl.Config.OpConfig.AdditionalOwnerRoles {
|
||||||
|
expectedPgUser := expectedUsers[additionalOwnerRole]
|
||||||
|
existingPgUser, exists := cl.pgUsers[additionalOwnerRole]
|
||||||
|
if !exists {
|
||||||
|
t.Errorf("%s additional owner role %q not initilaized", testName, additionalOwnerRole)
|
||||||
|
}
|
||||||
|
if !util.IsEqualIgnoreOrder(expectedPgUser.MemberOf, existingPgUser.MemberOf) {
|
||||||
|
t.Errorf("%s unexpected membership of additional owner role %q: expected member of %#v, got member of %#v",
|
||||||
|
testName, additionalOwnerRole, expectedPgUser.MemberOf, existingPgUser.MemberOf)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -139,8 +139,8 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
|
||||||
func generateResourceRequirements(resources *acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
|
func generateResourceRequirements(resources *acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
var specRequests acidv1.ResourceDescription
|
var specRequests, specLimits acidv1.ResourceDescription
|
||||||
var specLimits acidv1.ResourceDescription
|
|
||||||
if resources == nil {
|
if resources == nil {
|
||||||
specRequests = acidv1.ResourceDescription{}
|
specRequests = acidv1.ResourceDescription{}
|
||||||
specLimits = acidv1.ResourceDescription{}
|
specLimits = acidv1.ResourceDescription{}
|
||||||
|
|
@ -1007,14 +1007,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
|
|
||||||
// controller adjusts the default memory request at operator startup
|
// controller adjusts the default memory request at operator startup
|
||||||
|
|
||||||
request := spec.Resources.ResourceRequests.Memory
|
var request, limit string
|
||||||
if request == "" {
|
|
||||||
request = c.OpConfig.Resources.DefaultMemoryRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
limit := spec.Resources.ResourceLimits.Memory
|
if spec.Resources == nil {
|
||||||
if limit == "" {
|
request = c.OpConfig.Resources.DefaultMemoryRequest
|
||||||
limit = c.OpConfig.Resources.DefaultMemoryLimit
|
limit = c.OpConfig.Resources.DefaultMemoryLimit
|
||||||
|
} else {
|
||||||
|
request = spec.Resources.ResourceRequests.Memory
|
||||||
|
limit = spec.Resources.ResourceRequests.Memory
|
||||||
}
|
}
|
||||||
|
|
||||||
isSmaller, err := util.IsSmallerQuantity(request, limit)
|
isSmaller, err := util.IsSmallerQuantity(request, limit)
|
||||||
|
|
@ -1024,7 +1024,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
if isSmaller {
|
if isSmaller {
|
||||||
c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit)
|
c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit)
|
||||||
spec.Resources.ResourceRequests.Memory = limit
|
spec.Resources.ResourceRequests.Memory = limit
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// controller adjusts the Scalyr sidecar request at operator startup
|
// controller adjusts the Scalyr sidecar request at operator startup
|
||||||
|
|
@ -1034,14 +1033,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
for _, sidecar := range spec.Sidecars {
|
for _, sidecar := range spec.Sidecars {
|
||||||
|
|
||||||
// TODO #413
|
// TODO #413
|
||||||
sidecarRequest := sidecar.Resources.ResourceRequests.Memory
|
var sidecarRequest, sidecarLimit string
|
||||||
if request == "" {
|
|
||||||
request = c.OpConfig.Resources.DefaultMemoryRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
sidecarLimit := sidecar.Resources.ResourceLimits.Memory
|
if sidecar.Resources == nil {
|
||||||
if limit == "" {
|
sidecarRequest = c.OpConfig.Resources.DefaultMemoryRequest
|
||||||
limit = c.OpConfig.Resources.DefaultMemoryLimit
|
sidecarLimit = c.OpConfig.Resources.DefaultMemoryLimit
|
||||||
|
} else {
|
||||||
|
sidecarRequest = sidecar.Resources.ResourceRequests.Memory
|
||||||
|
sidecarLimit = sidecar.Resources.ResourceRequests.Memory
|
||||||
}
|
}
|
||||||
|
|
||||||
isSmaller, err := util.IsSmallerQuantity(sidecarRequest, sidecarLimit)
|
isSmaller, err := util.IsSmallerQuantity(sidecarRequest, sidecarLimit)
|
||||||
|
|
@ -1057,7 +1056,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultResources := c.makeDefaultResources()
|
defaultResources := c.makeDefaultResources()
|
||||||
|
|
||||||
resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources)
|
resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
|
||||||
|
|
|
||||||
|
|
@ -1578,7 +1578,7 @@ func TestEnableLoadBalancers(t *testing.T) {
|
||||||
EnableReplicaLoadBalancer: util.False(),
|
EnableReplicaLoadBalancer: util.False(),
|
||||||
EnableReplicaPoolerLoadBalancer: util.False(),
|
EnableReplicaPoolerLoadBalancer: util.False(),
|
||||||
NumberOfInstances: 1,
|
NumberOfInstances: 1,
|
||||||
Resources: acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
},
|
},
|
||||||
|
|
@ -1625,7 +1625,7 @@ func TestEnableLoadBalancers(t *testing.T) {
|
||||||
EnableReplicaLoadBalancer: util.True(),
|
EnableReplicaLoadBalancer: util.True(),
|
||||||
EnableReplicaPoolerLoadBalancer: util.True(),
|
EnableReplicaPoolerLoadBalancer: util.True(),
|
||||||
NumberOfInstances: 1,
|
NumberOfInstances: 1,
|
||||||
Resources: acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
},
|
},
|
||||||
|
|
@ -1720,7 +1720,7 @@ func TestVolumeSelector(t *testing.T) {
|
||||||
return acidv1.PostgresSpec{
|
return acidv1.PostgresSpec{
|
||||||
TeamID: "myapp",
|
TeamID: "myapp",
|
||||||
NumberOfInstances: 0,
|
NumberOfInstances: 0,
|
||||||
Resources: acidv1.Resources{
|
Resources: &acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue