pick first container if postgres is not found (#1505)

* pick first container if postgres is not found

* minor change
This commit is contained in:
Felix Kunde 2021-05-28 11:44:10 +02:00 committed by GitHub
parent 7884af2d59
commit dd9c3907b7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 45 additions and 36 deletions

View File

@ -462,7 +462,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
if len(c.Statefulset.Spec.Template.Spec.Volumes) != len(statefulSet.Spec.Template.Spec.Volumes) {
needsReplace = true
reasons = append(reasons, fmt.Sprintf("new statefulset's Volumes contains different number of volumes to the old one"))
reasons = append(reasons, "new statefulset's volumes contains different number of volumes to the old one")
}
// we assume any change in priority happens by rolling out a new priority class
@ -476,7 +476,9 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
// until they are re-created for other reasons, for example node rotation
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {
effectivePodImage := getPostgresContainer(&c.Statefulset.Spec.Template.Spec).Image
desiredImage := getPostgresContainer(&statefulSet.Spec.Template.Spec).Image
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(effectivePodImage, desiredImage) {
needsReplace = true
reasons = append(reasons, "lazy Spilo update: new statefulset's pod image does not match the current one")
}

View File

@ -12,6 +12,7 @@ import (
"k8s.io/client-go/tools/remotecommand"
"github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util/constants"
)
//ExecCommand executes arbitrary command inside the pod
@ -31,14 +32,14 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) (
// iterate through all containers looking for the one running PostgreSQL.
targetContainer := -1
for i, cr := range pod.Spec.Containers {
if cr.Name == c.containerName() {
if cr.Name == constants.PostgresContainerName {
targetContainer = i
break
}
}
if targetContainer < 0 {
return "", fmt.Errorf("could not find %s container to exec to", c.containerName())
return "", fmt.Errorf("could not find %s container to exec to", constants.PostgresContainerName)
}
req := c.KubeClient.RESTClient.Post().

View File

@ -66,10 +66,6 @@ type spiloConfiguration struct {
Bootstrap pgBootstrap `json:"bootstrap"`
}
func (c *Cluster) containerName() string {
return constants.PostgresContainerName
}
func (c *Cluster) statefulSetName() string {
return c.Name
}
@ -1157,10 +1153,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
c.logger.Debugf("Generating Spilo container, environment variables")
c.logger.Debugf("%v", spiloEnvVars)
spiloContainer := generateContainer(c.containerName(),
spiloContainer := generateContainer(constants.PostgresContainerName,
&effectiveDockerImage,
resourceRequirements,
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
deduplicateEnvVars(spiloEnvVars, constants.PostgresContainerName, c.logger),
volumeMounts,
c.OpConfig.Resources.SpiloPrivileged,
c.OpConfig.Resources.SpiloAllowPrivilegeEscalation,
@ -1392,6 +1388,9 @@ func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
//
// see https://docs.okd.io/latest/dev_guide/shared_memory.html
func addShmVolume(podSpec *v1.PodSpec) {
postgresContainerIdx := 0
volumes := append(podSpec.Volumes, v1.Volume{
Name: constants.ShmVolumeName,
VolumeSource: v1.VolumeSource{
@ -1403,16 +1402,18 @@ func addShmVolume(podSpec *v1.PodSpec) {
for i, container := range podSpec.Containers {
if container.Name == constants.PostgresContainerName {
mounts := append(container.VolumeMounts,
v1.VolumeMount{
Name: constants.ShmVolumeName,
MountPath: constants.ShmVolumePath,
})
podSpec.Containers[i].VolumeMounts = mounts
postgresContainerIdx = i
}
}
mounts := append(podSpec.Containers[postgresContainerIdx].VolumeMounts,
v1.VolumeMount{
Name: constants.ShmVolumeName,
MountPath: constants.ShmVolumePath,
})
podSpec.Containers[postgresContainerIdx].VolumeMounts = mounts
podSpec.Volumes = volumes
}
@ -1458,11 +1459,8 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
// if no target container is defined assign it to postgres container
if len(additionalVolume.TargetContainers) == 0 {
for j := range podSpec.Containers {
if podSpec.Containers[j].Name == c.containerName() {
additionalVolumes[i].TargetContainers = []string{c.containerName()}
}
}
postgresContainer := getPostgresContainer(podSpec)
additionalVolumes[i].TargetContainers = []string{postgresContainer.Name}
}
for _, target := range additionalVolume.TargetContainers {

View File

@ -413,7 +413,6 @@ func TestShmVolume(t *testing.T) {
Volumes: []v1.Volume{},
Containers: []v1.Container{
{
Name: "postgres",
VolumeMounts: []v1.VolumeMount{},
},
},
@ -438,9 +437,10 @@ func TestShmVolume(t *testing.T) {
}
for _, tt := range tests {
addShmVolume(tt.podSpec)
postgresContainer := getPostgresContainer(tt.podSpec)
volumeName := tt.podSpec.Volumes[tt.shmPos].Name
volumeMountName := tt.podSpec.Containers[0].VolumeMounts[tt.shmPos].Name
volumeMountName := postgresContainer.VolumeMounts[tt.shmPos].Name
if volumeName != constants.ShmVolumeName {
t.Errorf("%s %s: Expected volume %s was not created, have %s instead",
@ -612,8 +612,9 @@ func TestSecretVolume(t *testing.T) {
for _, tt := range tests {
additionalSecretMount := "aws-iam-s3-role"
additionalSecretMountPath := "/meta/credentials"
postgresContainer := getPostgresContainer(tt.podSpec)
numMounts := len(tt.podSpec.Containers[0].VolumeMounts)
numMounts := len(postgresContainer.VolumeMounts)
addSecretVolume(tt.podSpec, additionalSecretMount, additionalSecretMountPath)
@ -633,7 +634,8 @@ func TestSecretVolume(t *testing.T) {
}
}
numMountsCheck := len(tt.podSpec.Containers[0].VolumeMounts)
postgresContainer = getPostgresContainer(tt.podSpec)
numMountsCheck := len(postgresContainer.VolumeMounts)
if numMountsCheck != numMounts+1 {
t.Errorf("Unexpected number of VolumeMounts: got %v instead of %v",
@ -865,7 +867,8 @@ func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole)
"CONNECTION_POOLER_PORT": false,
}
envs := podSpec.Spec.Containers[0].Env
container := getPostgresContainer(&podSpec.Spec)
envs := container.Env
for _, env := range envs {
required[env.Name] = true
}
@ -1045,14 +1048,15 @@ func TestTLS(t *testing.T) {
}
assert.Contains(t, sts.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
postgresContainer := getPostgresContainer(&sts.Spec.Template.Spec)
assert.Contains(t, postgresContainer.VolumeMounts, v1.VolumeMount{
MountPath: "/tls",
Name: "my-secret",
}, "the volume gets mounted in /tls")
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
assert.Contains(t, postgresContainer.Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
assert.Contains(t, postgresContainer.Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
assert.Contains(t, postgresContainer.Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
}
func TestAdditionalVolume(t *testing.T) {
@ -1147,7 +1151,7 @@ func TestAdditionalVolume(t *testing.T) {
}{
{
subTest: "checking volume mounts of postgres container",
container: cluster.containerName(),
container: constants.PostgresContainerName,
expectedMounts: []string{"pgdata", "test1", "test3", "test4"},
},
{

View File

@ -357,8 +357,8 @@ func (c *Cluster) syncStatefulSet() error {
// and
// (b) some of the pods were not restarted when the lazy update was still in place
for _, pod := range pods {
effectivePodImage := c.getPostgresContainer(&pod.Spec).Image
stsImage := c.getPostgresContainer(&desiredSts.Spec.Template.Spec).Image
effectivePodImage := getPostgresContainer(&pod.Spec).Image
stsImage := getPostgresContainer(&desiredSts.Spec.Template.Spec).Image
if stsImage != effectivePodImage {
if err = c.markRollingUpdateFlagForPod(&pod, "pod not yet restarted due to lazy update"); err != nil {

View File

@ -227,13 +227,17 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU
}
}
func (c *Cluster) getPostgresContainer(podSpec *v1.PodSpec) v1.Container {
var pgContainer v1.Container
func getPostgresContainer(podSpec *v1.PodSpec) (pgContainer v1.Container) {
for _, container := range podSpec.Containers {
if container.Name == constants.PostgresContainerName {
pgContainer = container
}
}
// if no postgres container was found, take the first one in the podSpec
if reflect.DeepEqual(pgContainer, v1.Container{}) && len(podSpec.Containers) > 0 {
pgContainer = podSpec.Containers[0]
}
return pgContainer
}