Merge branch 'master' into add-with-admin-option

This commit is contained in:
Sergey Dudoladov 2018-12-21 16:53:18 +01:00
commit 9eaab8d130
12 changed files with 179 additions and 35 deletions

View File

@ -96,7 +96,19 @@ Those are parameters grouped directly under the `spec` key in the manifest.
that should be assigned to the cluster pods. When not specified, the value
is taken from the `pod_priority_class_name` operator parameter, if not set
then the default priority class is taken. The priority class itself must be defined in advance.
* **enableShmVolume**
Start a database pod without limitations on shm memory. By default docker
limit `/dev/shm` to `64M` (see e.g. the [docker
issue](https://github.com/docker-library/postgres/issues/416), which could be
not enough if PostgreSQL uses parallel workers heavily. If this option is
present and value is `true`, to the target database pod will be mounted a new
tmpfs volume to remove this limitation. If it's not present, the decision
about mounting a volume will be made based on operator configuration
(`enable_shm_volume`, which is `true` by default). It it's present and value
is `false`, then no volume will be mounted no matter how operator was
configured (so you can override the operator configuration).
## Postgres parameters
Those parameters are grouped under the `postgresql` top-level key.
@ -112,6 +124,7 @@ Those parameters are grouped under the `postgresql` top-level key.
cluster. Optional (Spilo automatically sets reasonable defaults for
parameters like work_mem or max_connections).
## Patroni parameters
Those parameters are grouped under the `patroni` top-level key. See the [patroni

View File

@ -224,6 +224,14 @@ CRD-based configuration.
* **set_memory_request_to_limit**
Set `memory_request` to `memory_limit` for all Postgres clusters (the default value is also increased). This prevents certain cases of memory overcommitment at the cost of overprovisioning memory and potential scheduling problems for containers with high memory limits due to the lack of memory on Kubernetes cluster nodes. This affects all containers (Postgres, Scalyr sidecar, and other sidecars). The default is `false`.
* **enable_shm_volume**
Instruct operator to start any new database pod without limitations on shm
memory. If this option is enabled, to the target database pod will be mounted
a new tmpfs volume to remove shm memory limitation (see e.g. the [docker
issue](https://github.com/docker-library/postgres/issues/416)). This option
is global for an operator object, and can be overwritten by `enableShmVolume`
parameter from Postgres manifest. The default is `true`
## Operator timeouts
This set of parameters define various timeouts related to some operator

View File

@ -13,12 +13,13 @@ spec:
- superuser
- createdb
enableMasterLoadBalancer: true
enableReplicaLoadBalancer: true
enableReplicaLoadBalancer: true
allowedSourceRanges: # load balancers' source ranges for both master and replica services
- 127.0.0.1/32
databases:
foo: zalando
#Expert section
enableShmVolume: true
postgresql:
version: "10"
parameters:

View File

@ -51,6 +51,7 @@ type PostgresSpec struct {
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
Sidecars []Sidecar `json:"sidecars,omitempty"`
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
ShmVolume *bool `json:"enableShmVolume,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -499,7 +499,7 @@ func TestMarshal(t *testing.T) {
t.Errorf("Marshal error: %v", err)
}
if !bytes.Equal(m, tt.marshal) {
t.Errorf("Marshal Postgresql expected: %q, got: %q", string(tt.marshal), string(m))
t.Errorf("Marshal Postgresql \nexpected: %q, \ngot: %q", string(tt.marshal), string(m))
}
}
}
@ -507,11 +507,11 @@ func TestMarshal(t *testing.T) {
func TestPostgresMeta(t *testing.T) {
for _, tt := range unmarshalCluster {
if a := tt.out.GetObjectKind(); a != &tt.out.TypeMeta {
t.Errorf("GetObjectKindMeta expected: %v, got: %v", tt.out.TypeMeta, a)
t.Errorf("GetObjectKindMeta \nexpected: %v, \ngot: %v", tt.out.TypeMeta, a)
}
if a := tt.out.GetObjectMeta(); reflect.DeepEqual(a, tt.out.ObjectMeta) {
t.Errorf("GetObjectMeta expected: %v, got: %v", tt.out.ObjectMeta, a)
t.Errorf("GetObjectMeta \nexpected: %v, \ngot: %v", tt.out.ObjectMeta, a)
}
}
}

View File

@ -18,6 +18,7 @@ import (
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando-incubator/postgres-operator/pkg/spec"
"github.com/zalando-incubator/postgres-operator/pkg/util"
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
"k8s.io/apimachinery/pkg/labels"
)
@ -396,6 +397,16 @@ func generateSidecarContainers(sidecars []acidv1.Sidecar,
return nil, nil
}
// Check whether or not we're requested to mount an shm volume,
// taking into account that PostgreSQL manifest has precedence.
func mountShmVolumeNeeded(opConfig config.Config, pgSpec *acidv1.PostgresSpec) bool {
if pgSpec.ShmVolume != nil {
return *pgSpec.ShmVolume
}
return opConfig.ShmVolume
}
func generatePodTemplate(
namespace string,
labels labels.Set,
@ -407,6 +418,7 @@ func generatePodTemplate(
podServiceAccountName string,
kubeIAMRole string,
priorityClassName string,
shmVolume bool,
) (*v1.PodTemplateSpec, error) {
terminateGracePeriodSeconds := terminateGracePeriod
@ -420,6 +432,10 @@ func generatePodTemplate(
Tolerations: *tolerationsSpec,
}
if shmVolume {
addShmVolume(&podSpec)
}
if nodeAffinity != nil {
podSpec.Affinity = nodeAffinity
}
@ -733,7 +749,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
volumeMounts := generateVolumeMounts()
// generate the spilo container
spiloContainer := generateSpiloContainer(c.containerName(), &effectiveDockerImage, resourceRequirements, spiloEnvVars, volumeMounts)
spiloContainer := generateSpiloContainer(c.containerName(),
&effectiveDockerImage,
resourceRequirements,
spiloEnvVars,
volumeMounts,
)
// resolve conflicts between operator-global and per-cluster sidecards
sideCars := c.mergeSidecars(spec.Sidecars)
@ -775,7 +796,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*v1beta1.State
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
c.OpConfig.PodServiceAccountName,
c.OpConfig.KubeIAMRole,
effectivePodPriorityClassName); err != nil {
effectivePodPriorityClassName,
mountShmVolumeNeeded(c.OpConfig, spec)); err != nil {
return nil, fmt.Errorf("could not generate pod template: %v", err)
}
@ -882,6 +904,32 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
return newcur
}
// To avoid issues with limited /dev/shm inside docker environment, when
// PostgreSQL can't allocate enough of dsa segments from it, we can
// mount an extra memory volume
//
// see https://docs.okd.io/latest/dev_guide/shared_memory.html
func addShmVolume(podSpec *v1.PodSpec) {
volumes := append(podSpec.Volumes, v1.Volume{
Name: constants.ShmVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
Medium: "Memory",
},
},
})
pgIdx := constants.PostgresContainerIdx
mounts := append(podSpec.Containers[pgIdx].VolumeMounts,
v1.VolumeMount{
Name: constants.ShmVolumeName,
MountPath: constants.ShmVolumePath,
})
podSpec.Containers[0].VolumeMounts = mounts
podSpec.Volumes = volumes
}
func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) {
var storageClassName *string

View File

@ -1,8 +1,11 @@
package cluster
import (
"k8s.io/api/core/v1"
acidv1 "github.com/zalando-incubator/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando-incubator/postgres-operator/pkg/util/config"
"github.com/zalando-incubator/postgres-operator/pkg/util/constants"
"github.com/zalando-incubator/postgres-operator/pkg/util/k8sutil"
"testing"
)
@ -75,3 +78,54 @@ func TestCreateLoadBalancerLogic(t *testing.T) {
}
}
}
func TestShmVolume(t *testing.T) {
testName := "TestShmVolume"
tests := []struct {
subTest string
podSpec *v1.PodSpec
shmPos int
}{
{
subTest: "empty PodSpec",
podSpec: &v1.PodSpec{
Volumes: []v1.Volume{},
Containers: []v1.Container{
v1.Container{
VolumeMounts: []v1.VolumeMount{},
},
},
},
shmPos: 0,
},
{
subTest: "non empty PodSpec",
podSpec: &v1.PodSpec{
Volumes: []v1.Volume{v1.Volume{}},
Containers: []v1.Container{
v1.Container{
VolumeMounts: []v1.VolumeMount{
v1.VolumeMount{},
},
},
},
},
shmPos: 1,
},
}
for _, tt := range tests {
addShmVolume(tt.podSpec)
volumeName := tt.podSpec.Volumes[tt.shmPos].Name
volumeMountName := tt.podSpec.Containers[0].VolumeMounts[tt.shmPos].Name
if volumeName != constants.ShmVolumeName {
t.Errorf("%s %s: Expected volume %s was not created, have %s instead",
testName, tt.subTest, constants.ShmVolumeName, volumeName)
}
if volumeMountName != constants.ShmVolumeName {
t.Errorf("%s %s: Expected mount %s was not created, have %s instead",
testName, tt.subTest, constants.ShmVolumeName, volumeMountName)
}
}
}

View File

@ -7,7 +7,10 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"fmt"
"github.com/zalando-incubator/postgres-operator/pkg/cluster"
"github.com/zalando-incubator/postgres-operator/pkg/spec"
"github.com/zalando-incubator/postgres-operator/pkg/util"
)
@ -55,15 +58,16 @@ func (c *Controller) nodeUpdate(prev, cur interface{}) {
return
}
if util.MapContains(nodeCur.Labels, map[string]string{"master": "true"}) {
if !c.nodeIsReady(nodePrev) {
c.logger.Debugf("The decommissioned node %v should have already triggered master pod migration. Previous k8s-reported state of the node: %v", util.NameFromMeta(nodePrev.ObjectMeta), nodePrev)
return
}
// do nothing if the node should have already triggered an update or
// if only one of the label and the unschedulability criteria are met.
if !c.nodeIsReady(nodePrev) || c.nodeIsReady(nodeCur) {
if c.nodeIsReady(nodeCur) {
c.logger.Debugf("The decommissioned node %v become schedulable again. Current k8s-reported state of the node: %v", util.NameFromMeta(nodeCur.ObjectMeta), nodeCur)
return
}
c.moveMasterPodsOffNode(nodeCur)
}
@ -73,8 +77,9 @@ func (c *Controller) nodeIsReady(node *v1.Node) bool {
}
func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
nodeName := util.NameFromMeta(node.ObjectMeta)
c.logger.Infof("moving pods: node %q became unschedulable and does not have a ready label: %q",
c.logger.Infof("moving pods: node %q became unschedulable and does not have a ready label %q",
nodeName, c.opConfig.NodeReadinessLabel)
opts := metav1.ListOptions{
@ -82,7 +87,7 @@ func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
}
podList, err := c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(opts)
if err != nil {
c.logger.Errorf("could not fetch list of the pods: %v", err)
c.logger.Errorf("could not fetch the list of Spilo pods: %v", err)
return
}
@ -93,17 +98,25 @@ func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
}
}
movedMasterPods := 0
movableMasterPods := make(map[*v1.Pod]*cluster.Cluster)
unmovablePods := make(map[spec.NamespacedName]string)
clusters := make(map[*cluster.Cluster]bool)
masterPods := make(map[*v1.Pod]*cluster.Cluster)
movedPods := 0
for _, pod := range nodePods {
podName := util.NameFromMeta(pod.ObjectMeta)
role, ok := pod.Labels[c.opConfig.PodRoleLabel]
if !ok || cluster.PostgresRole(role) != cluster.Master {
if !ok {
c.logger.Warningf("could not move pod %q: pod has no role", podName)
}
if !ok {
// pods with an unknown role cannot be safely moved to another node
unmovablePods[podName] = fmt.Sprintf("could not move pod %q from node %q: pod has no role label %q", podName, nodeName, c.opConfig.PodRoleLabel)
continue
}
// deployments can transparently re-create replicas so we do not move away such pods
if cluster.PostgresRole(role) == cluster.Replica {
continue
}
@ -113,7 +126,7 @@ func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
cl, ok := c.clusters[clusterName]
c.clustersMu.RUnlock()
if !ok {
c.logger.Warningf("could not move pod %q: pod does not belong to a known cluster", podName)
unmovablePods[podName] = fmt.Sprintf("could not move master pod %q from node %q: pod belongs to an unknown Postgres cluster %q", podName, nodeName, clusterName)
continue
}
@ -121,20 +134,20 @@ func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
clusters[cl] = true
}
masterPods[pod] = cl
movableMasterPods[pod] = cl
}
for cl := range clusters {
cl.Lock()
}
for pod, cl := range masterPods {
podName := util.NameFromMeta(pod.ObjectMeta)
for pod, cl := range movableMasterPods {
if err := cl.MigrateMasterPod(podName); err != nil {
c.logger.Errorf("could not move master pod %q: %v", podName, err)
podName := util.NameFromMeta(pod.ObjectMeta)
if err := cl.MigrateMasterPod(podName); err == nil {
movedMasterPods++
} else {
movedPods++
unmovablePods[podName] = fmt.Sprintf("could not move master pod %q from node %q: %v", podName, nodeName, err)
}
}
@ -142,15 +155,16 @@ func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
cl.Unlock()
}
totalPods := len(masterPods)
c.logger.Infof("%d/%d master pods have been moved out from the %q node",
movedPods, totalPods, nodeName)
if leftPods := totalPods - movedPods; leftPods > 0 {
c.logger.Warnf("could not move master %d/%d pods from the %q node",
leftPods, totalPods, nodeName)
if leftPods := len(unmovablePods); leftPods > 0 {
c.logger.Warnf("could not move %d master or unknown role pods from the node %q, you may have to delete them manually",
leftPods, nodeName)
for _, reason := range unmovablePods {
c.logger.Warning(reason)
}
}
c.logger.Infof("%d master pods have been moved out from the node %q", movedMasterPods, nodeName)
}
func (c *Controller) nodeDelete(obj interface{}) {

View File

@ -38,6 +38,7 @@ type Resources struct {
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
MaxInstances int32 `name:"max_instances" default:"-1"`
MinInstances int32 `name:"min_instances" default:"-1"`
ShmVolume bool `name:"enable_shm_volume" default:"true"`
}
// Auth describes authentication specific configuration parameters

View File

@ -5,6 +5,7 @@ import "time"
// General kubernetes-related constants
const (
PostgresContainerName = "postgres"
PostgresContainerIdx = 0
K8sAPIPath = "/apis"
StatefulsetDeletionInterval = 1 * time.Second
StatefulsetDeletionTimeout = 30 * time.Second

View File

@ -10,4 +10,7 @@ const (
PostgresConnectRetryTimeout = 2 * time.Minute
PostgresConnectTimeout = 15 * time.Second
ShmVolumeName = "dshm"
ShmVolumePath = "/dev/shm"
)

View File

@ -121,7 +121,7 @@ function deploy_self_built_image() {
# update the tag in the postgres operator conf
# since the image with this tag already exists on the machine,
# docker should not attempt to fetch it from the registry due to imagePullPolicy
sed --expression "s/\(image\:.*\:\).*$/\1$TAG/" manifests/postgres-operator.yaml > "$PATH_TO_LOCAL_OPERATOR_MANIFEST"
sed --expression "s/\(image\:.*\:\).*$/\1$TAG/; s/smoke-tested-//" manifests/postgres-operator.yaml > "$PATH_TO_LOCAL_OPERATOR_MANIFEST"
retry "kubectl create -f \"$PATH_TO_LOCAL_OPERATOR_MANIFEST\"" "attempt to create $PATH_TO_LOCAL_OPERATOR_MANIFEST resource"
}