resolve conflict

This commit is contained in:
Felix Kunde 2022-03-24 17:50:07 +01:00
commit 763f0c5025
12 changed files with 453 additions and 225 deletions

View File

@ -150,15 +150,9 @@ spec:
minimum: 1
resources:
type: object
required:
- requests
- limits
properties:
limits:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
@ -168,9 +162,6 @@ spec:
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
requests:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
@ -406,15 +397,9 @@ spec:
description: deprecated
resources:
type: object
required:
- requests
- limits
properties:
limits:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
@ -443,9 +428,6 @@ spec:
# than the corresponding limit.
requests:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
@ -532,10 +514,6 @@ spec:
type: array
items:
type: object
required:
- key
- operator
- effect
properties:
key:
type: string

View File

@ -322,9 +322,7 @@ explanation of `ttl` and `loop_wait` parameters.
Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
for the Postgres container. They are grouped under the `resources` top-level
key with subgroups `requests` and `limits`. The whole section is optional,
however if you specify a request or limit you have to define everything
(unless you are not modifying the default CRD schema validation).
key with subgroups `requests` and `limits`.
### Requests

View File

@ -159,9 +159,9 @@ Those are top-level keys, containing both leaf keys and groups.
at the cost of overprovisioning memory and potential scheduling problems for
containers with high memory limits due to the lack of memory on Kubernetes
cluster nodes. This affects all containers created by the operator (Postgres,
Scalyr sidecar, and other sidecars except **sidecars** defined in the operator
configuration); to set resources for the operator's own container, change the
[operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L20).
connection pooler, logical backup, scalyr sidecar, and other sidecars except
**sidecars** defined in the operator configuration); to set resources for the
operator's own container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L20).
The default is `false`.
## Postgres users

View File

@ -148,15 +148,9 @@ spec:
minimum: 1
resources:
type: object
required:
- requests
- limits
properties:
limits:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
@ -166,9 +160,6 @@ spec:
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
requests:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
@ -404,15 +395,9 @@ spec:
description: deprecated
resources:
type: object
required:
- requests
- limits
properties:
limits:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
@ -441,9 +426,6 @@ spec:
# than the corresponding limit.
requests:
type: object
required:
- cpu
- memory
properties:
cpu:
type: string
@ -530,10 +512,6 @@ spec:
type: array
items:
type: object
required:
- key
- operator
- effect
properties:
key:
type: string

View File

@ -238,12 +238,10 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
Minimum: &min1,
},
"resources": {
Type: "object",
Required: []string{"requests", "limits"},
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"limits": {
Type: "object",
Required: []string{"cpu", "memory"},
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"cpu": {
Type: "string",
@ -256,8 +254,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
},
},
"requests": {
Type: "object",
Required: []string{"cpu", "memory"},
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"cpu": {
Type: "string",
@ -648,12 +645,10 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
Description: "deprecated",
},
"resources": {
Type: "object",
Required: []string{"requests", "limits"},
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"limits": {
Type: "object",
Required: []string{"cpu", "memory"},
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"cpu": {
Type: "string",
@ -666,8 +661,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
},
},
"requests": {
Type: "object",
Required: []string{"cpu", "memory"},
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"cpu": {
Type: "string",
@ -798,8 +792,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Required: []string{"key", "operator", "effect"},
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"key": {
Type: "string",

View File

@ -167,7 +167,7 @@ type Patroni struct {
Slots map[string]map[string]string `json:"slots,omitempty"`
SynchronousMode bool `json:"synchronous_mode,omitempty"`
SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"`
SynchronousNodeCount uint32 `json:"synchronous_node_count,omitempty" defaults:1`
SynchronousNodeCount uint32 `json:"synchronous_node_count,omitempty" defaults:"1"`
}
// StandbyDescription contains s3 wal path

View File

@ -256,10 +256,6 @@ func (c *Cluster) Create() error {
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating)
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources")
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
return fmt.Errorf("could not enforce minimum resource limits: %v", err)
}
for _, role := range []PostgresRole{Master, Replica} {
if c.Endpoints[role] != nil {
@ -678,50 +674,6 @@ func comparePorts(a, b []v1.ContainerPort) bool {
return true
}
func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
var (
isSmaller bool
err error
)
if spec.Resources == nil {
return nil
}
// setting limits too low can cause unnecessary evictions / OOM kills
minCPULimit := c.OpConfig.MinCPULimit
minMemoryLimit := c.OpConfig.MinMemoryLimit
cpuLimit := spec.Resources.ResourceLimits.CPU
if cpuLimit != "" {
isSmaller, err = util.IsSmallerQuantity(cpuLimit, minCPULimit)
if err != nil {
return fmt.Errorf("could not compare defined CPU limit %s with configured minimum value %s: %v", cpuLimit, minCPULimit, err)
}
if isSmaller {
c.logger.Warningf("defined CPU limit %s is below required minimum %s and will be increased", cpuLimit, minCPULimit)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined CPU limit %s is below required minimum %s and will be set to it", cpuLimit, minCPULimit)
spec.Resources.ResourceLimits.CPU = minCPULimit
}
}
memoryLimit := spec.Resources.ResourceLimits.Memory
if memoryLimit != "" {
isSmaller, err = util.IsSmallerQuantity(memoryLimit, minMemoryLimit)
if err != nil {
return fmt.Errorf("could not compare defined memory limit %s with configured minimum value %s: %v", memoryLimit, minMemoryLimit, err)
}
if isSmaller {
c.logger.Warningf("defined memory limit %s is below required minimum %s and will be increased", memoryLimit, minMemoryLimit)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", "defined memory limit %s is below required minimum %s and will be set to it", memoryLimit, minMemoryLimit)
spec.Resources.ResourceLimits.Memory = minMemoryLimit
}
}
return nil
}
func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) {
reason := ""
ignoredAnnotations := make(map[string]bool)
@ -856,12 +808,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// Statefulset
func() {
if err := c.enforceMinResourceLimits(&c.Spec); err != nil {
c.logger.Errorf("could not sync resources: %v", err)
updateFailed = true
return
}
oldSs, err := c.generateStatefulSet(&oldSpec.Spec)
if err != nil {
c.logger.Errorf("could not generate old statefulset spec: %v", err)
@ -869,9 +815,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
return
}
// update newSpec to for latter comparison with oldSpec
c.enforceMinResourceLimits(&newSpec.Spec)
newSs, err := c.generateStatefulSet(&newSpec.Spec)
if err != nil {
c.logger.Errorf("could not generate new statefulset spec: %v", err)

View File

@ -210,9 +210,10 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
connectionPoolerSpec = &acidv1.ConnectionPooler{}
}
gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
resources, err := generateResourceRequirements(
resources, err := c.generateResourceRequirements(
connectionPoolerSpec.Resources,
makeDefaultConnectionPoolerResources(&c.OpConfig))
makeDefaultConnectionPoolerResources(&c.OpConfig),
connectionPoolerContainer)
effectiveDockerImage := util.Coalesce(
connectionPoolerSpec.DockerImage,
@ -627,8 +628,9 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1.
reasons = append(reasons, msg)
}
expectedResources, err := generateResourceRequirements(spec.Resources,
makeDefaultConnectionPoolerResources(&Config.OpConfig))
expectedResources, err := c.generateResourceRequirements(spec.Resources,
makeDefaultConnectionPoolerResources(&Config.OpConfig),
connectionPoolerContainer)
// An error to generate expected resources means something is not quite
// right, but for the purpose of robustness do not panic here, just report

View File

@ -37,6 +37,8 @@ const (
patroniPGBinariesParameterName = "bin_dir"
patroniPGHBAConfParameterName = "pg_hba"
localHost = "127.0.0.1/32"
scalyrSidecarName = "scalyr-sidecar"
logicalBackupContainerName = "logical-backup"
connectionPoolerContainer = "connection-pooler"
pgPort = 5432
)
@ -117,9 +119,7 @@ func (c *Cluster) podDisruptionBudgetName() string {
return c.OpConfig.PDBNameFormat.Format("cluster", c.Name)
}
func (c *Cluster) makeDefaultResources() acidv1.Resources {
config := c.OpConfig
func makeDefaultResources(config *config.Config) acidv1.Resources {
defaultRequests := acidv1.ResourceDescription{
CPU: config.Resources.DefaultCPURequest,
@ -136,32 +136,61 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
}
}
func generateResourceRequirements(resources *acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
var err error
func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) error {
var (
isSmaller bool
err error
msg string
)
var specRequests, specLimits acidv1.ResourceDescription
if resources == nil {
specRequests = acidv1.ResourceDescription{}
specLimits = acidv1.ResourceDescription{}
} else {
specRequests = resources.ResourceRequests
specLimits = resources.ResourceLimits
// setting limits too low can cause unnecessary evictions / OOM kills
cpuLimit := resources.Limits[v1.ResourceCPU]
minCPULimit := c.OpConfig.MinCPULimit
if minCPULimit != "" {
isSmaller, err = util.IsSmallerQuantity(cpuLimit.String(), minCPULimit)
if err != nil {
return fmt.Errorf("could not compare defined CPU limit %s for %q container with configured minimum value %s: %v",
cpuLimit.String(), constants.PostgresContainerName, minCPULimit, err)
}
if isSmaller {
msg = fmt.Sprintf("defined CPU limit %s for %q container is below required minimum %s and will be increased",
cpuLimit.String(), constants.PostgresContainerName, minCPULimit)
c.logger.Warningf(msg)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
resources.Limits[v1.ResourceCPU], _ = resource.ParseQuantity(minCPULimit)
}
}
result := v1.ResourceRequirements{}
result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequests)
if err != nil {
return nil, fmt.Errorf("could not fill resource requests: %v", err)
memoryLimit := resources.Limits[v1.ResourceMemory]
minMemoryLimit := c.OpConfig.MinMemoryLimit
if minMemoryLimit != "" {
isSmaller, err = util.IsSmallerQuantity(memoryLimit.String(), minMemoryLimit)
if err != nil {
return fmt.Errorf("could not compare defined memory limit %s for %q container with configured minimum value %s: %v",
memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit, err)
}
if isSmaller {
msg = fmt.Sprintf("defined memory limit %s for %q container is below required minimum %s and will be increased",
memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit)
c.logger.Warningf(msg)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
resources.Limits[v1.ResourceMemory], _ = resource.ParseQuantity(minMemoryLimit)
}
}
result.Limits, err = fillResourceList(specLimits, defaultResources.ResourceLimits)
if err != nil {
return nil, fmt.Errorf("could not fill resource limits: %v", err)
}
return nil
}
return &result, nil
func setMemoryRequestToLimit(resources *v1.ResourceRequirements, containerName string, logger *logrus.Entry) {
requests := resources.Requests[v1.ResourceMemory]
limits := resources.Limits[v1.ResourceMemory]
isSmaller := requests.Cmp(limits) == -1
if isSmaller {
logger.Warningf("memory request of %s for %q container is increased to match memory limit of %s",
requests.String(), containerName, limits.String())
resources.Requests[v1.ResourceMemory] = limits
}
}
func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) {
@ -194,6 +223,44 @@ func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceD
return requests, nil
}
func (c *Cluster) generateResourceRequirements(
resources *acidv1.Resources,
defaultResources acidv1.Resources,
containerName string) (*v1.ResourceRequirements, error) {
var err error
specRequests := acidv1.ResourceDescription{}
specLimits := acidv1.ResourceDescription{}
result := v1.ResourceRequirements{}
if resources != nil {
specRequests = resources.ResourceRequests
specLimits = resources.ResourceLimits
}
result.Requests, err = fillResourceList(specRequests, defaultResources.ResourceRequests)
if err != nil {
return nil, fmt.Errorf("could not fill resource requests: %v", err)
}
result.Limits, err = fillResourceList(specLimits, defaultResources.ResourceLimits)
if err != nil {
return nil, fmt.Errorf("could not fill resource limits: %v", err)
}
// enforce minimum cpu and memory limits for Postgres containers only
if containerName == constants.PostgresContainerName {
if err = c.enforceMinResourceLimits(&result); err != nil {
return nil, fmt.Errorf("could not enforce minimum resource limits: %v", err)
}
}
if c.OpConfig.SetMemoryRequestToLimit {
setMemoryRequestToLimit(&result, containerName, c.logger)
}
return &result, nil
}
func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, EnablePgVersionEnvVar bool, logger *logrus.Entry) (string, error) {
config := spiloConfiguration{}
@ -514,8 +581,8 @@ func generateContainer(
}
}
func generateSidecarContainers(sidecars []acidv1.Sidecar,
defaultResources acidv1.Resources, startIndex int, logger *logrus.Entry) ([]v1.Container, error) {
func (c *Cluster) generateSidecarContainers(sidecars []acidv1.Sidecar,
defaultResources acidv1.Resources, startIndex int) ([]v1.Container, error) {
if len(sidecars) > 0 {
result := make([]v1.Container, 0)
@ -527,7 +594,7 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
sidecar.Resources.DeepCopyInto(&resourcesSpec)
}
resources, err := generateResourceRequirements(&resourcesSpec, defaultResources)
resources, err := c.generateResourceRequirements(&resourcesSpec, defaultResources, sidecar.Name)
if err != nil {
return nil, err
}
@ -1002,61 +1069,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
additionalVolumes = spec.AdditionalVolumes
)
// Improve me. Please.
if c.OpConfig.SetMemoryRequestToLimit {
// controller adjusts the default memory request at operator startup
var request, limit string
if spec.Resources == nil {
request = c.OpConfig.Resources.DefaultMemoryRequest
limit = c.OpConfig.Resources.DefaultMemoryLimit
} else {
request = spec.Resources.ResourceRequests.Memory
limit = spec.Resources.ResourceRequests.Memory
}
isSmaller, err := util.IsSmallerQuantity(request, limit)
if err != nil {
return nil, err
}
if isSmaller {
c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit)
spec.Resources.ResourceRequests.Memory = limit
}
// controller adjusts the Scalyr sidecar request at operator startup
// as this sidecar is managed separately
// adjust sidecar containers defined for that particular cluster
for _, sidecar := range spec.Sidecars {
// TODO #413
var sidecarRequest, sidecarLimit string
if sidecar.Resources == nil {
sidecarRequest = c.OpConfig.Resources.DefaultMemoryRequest
sidecarLimit = c.OpConfig.Resources.DefaultMemoryLimit
} else {
sidecarRequest = sidecar.Resources.ResourceRequests.Memory
sidecarLimit = sidecar.Resources.ResourceRequests.Memory
}
isSmaller, err := util.IsSmallerQuantity(sidecarRequest, sidecarLimit)
if err != nil {
return nil, err
}
if isSmaller {
c.logger.Warningf("The memory request of %v for the %v sidecar container is increased to match the memory limit of %v.", sidecar.Resources.ResourceRequests.Memory, sidecar.Name, sidecar.Resources.ResourceLimits.Memory)
sidecar.Resources.ResourceRequests.Memory = sidecar.Resources.ResourceLimits.Memory
}
}
}
defaultResources := c.makeDefaultResources()
resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources)
defaultResources := makeDefaultResources(&c.OpConfig)
resourceRequirements, err := c.generateResourceRequirements(
spec.Resources, defaultResources, constants.PostgresContainerName)
if err != nil {
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
}
@ -1232,7 +1247,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail")
}
if clusterSpecificSidecars, err = generateSidecarContainers(spec.Sidecars, defaultResources, 0, c.logger); err != nil {
if clusterSpecificSidecars, err = c.generateSidecarContainers(spec.Sidecars, defaultResources, 0); err != nil {
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
}
}
@ -1243,7 +1258,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
for name, dockerImage := range c.OpConfig.SidecarImages {
globalSidecarsByDockerImage = append(globalSidecarsByDockerImage, acidv1.Sidecar{Name: name, DockerImage: dockerImage})
}
if globalSidecarContainersByDockerImage, err = generateSidecarContainers(globalSidecarsByDockerImage, defaultResources, len(clusterSpecificSidecars), c.logger); err != nil {
if globalSidecarContainersByDockerImage, err = c.generateSidecarContainers(globalSidecarsByDockerImage, defaultResources, len(clusterSpecificSidecars)); err != nil {
return nil, fmt.Errorf("could not generate sidecar containers: %v", err)
}
// make the resulting list reproducible
@ -1256,7 +1271,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
// generate scalyr sidecar container
var scalyrSidecars []v1.Container
if scalyrSidecar, err :=
generateScalyrSidecarSpec(c.Name,
c.generateScalyrSidecarSpec(c.Name,
c.OpConfig.ScalyrAPIKey,
c.OpConfig.ScalyrServerURL,
c.OpConfig.ScalyrImage,
@ -1264,8 +1279,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
c.OpConfig.ScalyrMemoryRequest,
c.OpConfig.ScalyrCPULimit,
c.OpConfig.ScalyrMemoryLimit,
defaultResources,
c.logger); err != nil {
defaultResources); err != nil {
return nil, fmt.Errorf("could not generate Scalyr sidecar: %v", err)
} else {
if scalyrSidecar != nil {
@ -1375,12 +1389,12 @@ func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]s
return annotations
}
func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string,
func (c *Cluster) generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage string,
scalyrCPURequest string, scalyrMemoryRequest string, scalyrCPULimit string, scalyrMemoryLimit string,
defaultResources acidv1.Resources, logger *logrus.Entry) (*v1.Container, error) {
defaultResources acidv1.Resources) (*v1.Container, error) {
if APIKey == "" || dockerImage == "" {
if APIKey == "" && dockerImage != "" {
logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined")
c.logger.Warning("Not running Scalyr sidecar: SCALYR_API_KEY must be defined")
}
return nil, nil
}
@ -1390,7 +1404,8 @@ func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage strin
scalyrCPULimit,
scalyrMemoryLimit,
)
resourceRequirementsScalyrSidecar, err := generateResourceRequirements(&resourcesScalyrSidecar, defaultResources)
resourceRequirementsScalyrSidecar, err := c.generateResourceRequirements(
&resourcesScalyrSidecar, defaultResources, scalyrSidecarName)
if err != nil {
return nil, fmt.Errorf("invalid resources for Scalyr sidecar: %v", err)
}
@ -1408,7 +1423,7 @@ func generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dockerImage strin
env = append(env, v1.EnvVar{Name: "SCALYR_SERVER_URL", Value: serverURL})
}
return &v1.Container{
Name: "scalyr-sidecar",
Name: scalyrSidecarName,
Image: dockerImage,
Env: env,
ImagePullPolicy: v1.PullIfNotPresent,
@ -1991,15 +2006,15 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
c.logger.Debug("Generating logical backup pod template")
// allocate for the backup pod the same amount of resources as for normal DB pods
defaultResources := c.makeDefaultResources()
resourceRequirements, err = generateResourceRequirements(c.Spec.Resources, defaultResources)
resourceRequirements, err = c.generateResourceRequirements(
c.Spec.Resources, makeDefaultResources(&c.OpConfig), logicalBackupContainerName)
if err != nil {
return nil, fmt.Errorf("could not generate resource requirements for logical backup pods: %v", err)
}
envVars := c.generateLogicalBackupPodEnvVars()
logicalBackupContainer := generateContainer(
"logical-backup",
logicalBackupContainerName,
&c.OpConfig.LogicalBackup.LogicalBackupDockerImage,
resourceRequirements,
envVars,

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes/fake"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
)
func newFakeK8sTestClient() (k8sutil.KubernetesClient, *fake.Clientset) {
@ -1667,6 +1668,319 @@ func TestEnableLoadBalancers(t *testing.T) {
}
}
func TestGenerateResourceRequirements(t *testing.T) {
testName := "TestGenerateResourceRequirements"
client, _ := newFakeK8sTestClient()
clusterName := "acid-test-cluster"
namespace := "default"
clusterNameLabel := "cluster-name"
roleLabel := "spilo-role"
sidecarName := "postgres-exporter"
// two test cases will call enforceMinResourceLimits which emits 2 events per call
// hence bufferSize of 4 is required
newEventRecorder := record.NewFakeRecorder(4)
configResources := config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: clusterNameLabel,
DefaultCPURequest: "100m",
DefaultCPULimit: "1",
DefaultMemoryRequest: "100Mi",
DefaultMemoryLimit: "500Mi",
MinCPULimit: "250m",
MinMemoryLimit: "250Mi",
PodRoleLabel: roleLabel,
}
tests := []struct {
subTest string
config config.Config
pgSpec acidv1.Postgresql
expectedResources acidv1.Resources
}{
{
subTest: "test generation of default resources when empty in manifest",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
},
},
{
subTest: "test generation of default resources for sidecar",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
Name: sidecarName,
},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
},
},
{
subTest: "test generation of resources when only requests are defined in manifest",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "50Mi"},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "50Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
},
},
{
subTest: "test generation of resources when only memory is defined in manifest",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{Memory: "100Mi"},
ResourceLimits: acidv1.ResourceDescription{Memory: "1Gi"},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "1Gi"},
},
},
{
subTest: "test SetMemoryRequestToLimit flag",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: true,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "500Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"},
},
},
{
subTest: "test SetMemoryRequestToLimit flag for sidecar container, too",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: true,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
Name: sidecarName,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
},
},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "100Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
},
},
{
subTest: "test generating resources from manifest",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "250Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "400m", Memory: "800Mi"},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "250Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "400m", Memory: "800Mi"},
},
},
{
subTest: "test enforcing min cpu and memory limit",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "200m", Memory: "200Mi"},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "250m", Memory: "250Mi"},
},
},
{
subTest: "test min cpu and memory limit are not enforced on sidecar",
config: config.Config{
Resources: configResources,
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
Name: sidecarName,
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
},
},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"},
},
},
}
for _, tt := range tests {
var cluster = New(
Config{
OpConfig: tt.config,
}, client, tt.pgSpec, logger, newEventRecorder)
cluster.Name = clusterName
cluster.Namespace = namespace
_, err := cluster.createStatefulSet()
if k8sutil.ResourceAlreadyExists(err) {
err = cluster.syncStatefulSet()
}
assert.NoError(t, err)
containers := cluster.Statefulset.Spec.Template.Spec.Containers
clusterResources, err := parseResourceRequirements(containers[0].Resources)
if len(containers) > 1 {
clusterResources, err = parseResourceRequirements(containers[1].Resources)
}
assert.NoError(t, err)
if !reflect.DeepEqual(tt.expectedResources, clusterResources) {
t.Errorf("%s - %s: expected %#v but got %#v", testName, tt.subTest, tt.expectedResources, clusterResources)
}
}
}
func TestGenerateCapabilities(t *testing.T) {
testName := "TestGenerateCapabilities"

View File

@ -76,11 +76,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
}
}
if err = c.enforceMinResourceLimits(&c.Spec); err != nil {
err = fmt.Errorf("could not enforce minimum resource limits: %v", err)
return err
}
c.logger.Debug("syncing statefulsets")
if err = c.syncStatefulSet(); err != nil {
if !k8sutil.ResourceAlreadyExists(err) {

View File

@ -594,3 +594,15 @@ func trimCronjobName(name string) string {
}
return name
}
func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (acidv1.Resources, error) {
var resources acidv1.Resources
resourcesJSON, err := json.Marshal(resourcesRequirement)
if err != nil {
return acidv1.Resources{}, fmt.Errorf("could not marshal K8s resources requirements")
}
if err = json.Unmarshal(resourcesJSON, &resources); err != nil {
return acidv1.Resources{}, fmt.Errorf("could not unmarshal K8s resources requirements into acidv1.Resources struct")
}
return resources, nil
}