minReplicas <= desiredReplicas <= maxReplicas (#267)

* ensure that minReplicas <= desiredReplicas <= maxReplicas no matter what
* before this change, if the number of runners was much larger than the max number, the applied scale down factor might still result in a desired value > maxReplicas
* if for resource constraints in the cluster, runners would be permanently restarted, the number of runners could go up more than the reverse scale down factor until the next reconciliation round, resulting in a situation where the number of runners climbs up even though it should actually go down
* by checking whether the desiredReplicas is always <= maxReplicas, infinite scaling up loops can be prevented
This commit is contained in:
Johannes Nicolai 2021-01-22 02:11:21 +01:00 committed by GitHub
parent 563c79c1b9
commit 94e8c6ffbf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 9 additions and 12 deletions

View File

@ -219,22 +219,19 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
var desiredReplicas int
fractionBusy := float64(numRunnersBusy) / float64(numRunners)
if fractionBusy >= scaleUpThreshold {
scaleUpReplicas := int(math.Ceil(float64(numRunners) * scaleUpFactor))
if scaleUpReplicas > maxReplicas {
desiredReplicas = maxReplicas
} else {
desiredReplicas = scaleUpReplicas
}
desiredReplicas = int(math.Ceil(float64(numRunners) * scaleUpFactor))
} else if fractionBusy < scaleDownThreshold {
scaleDownReplicas := int(float64(numRunners) * scaleDownFactor)
if scaleDownReplicas < minReplicas {
desiredReplicas = minReplicas
} else {
desiredReplicas = scaleDownReplicas
}
desiredReplicas = int(float64(numRunners) * scaleDownFactor)
} else {
desiredReplicas = *rd.Spec.Replicas
}
if desiredReplicas < minReplicas {
desiredReplicas = minReplicas
} else if desiredReplicas > maxReplicas {
desiredReplicas = maxReplicas
}
r.Log.V(1).Info(
"Calculated desired replicas",