get postgres container by name, not index
This commit is contained in:
parent
af5378eea5
commit
25af9718f3
|
|
@ -12,7 +12,6 @@ import (
|
|||
"k8s.io/client-go/tools/remotecommand"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
)
|
||||
|
||||
//ExecCommand executes arbitrary command inside the pod
|
||||
|
|
@ -32,14 +31,14 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) (
|
|||
// iterate through all containers looking for the one running PostgreSQL.
|
||||
targetContainer := -1
|
||||
for i, cr := range pod.Spec.Containers {
|
||||
if cr.Name == constants.PostgresContainerName {
|
||||
if cr.Name == c.containerName() {
|
||||
targetContainer = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if targetContainer < 0 {
|
||||
return "", fmt.Errorf("could not find %s container to exec to", constants.PostgresContainerName)
|
||||
return "", fmt.Errorf("could not find %s container to exec to", c.containerName())
|
||||
}
|
||||
|
||||
req := c.KubeClient.RESTClient.Post().
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ type spiloConfiguration struct {
|
|||
}
|
||||
|
||||
func (c *Cluster) containerName() string {
|
||||
return "postgres"
|
||||
return constants.PostgresContainerName
|
||||
}
|
||||
|
||||
func (c *Cluster) statefulSetName() string {
|
||||
|
|
@ -1401,14 +1401,18 @@ func addShmVolume(podSpec *v1.PodSpec) {
|
|||
},
|
||||
})
|
||||
|
||||
pgIdx := constants.PostgresContainerIdx
|
||||
mounts := append(podSpec.Containers[pgIdx].VolumeMounts,
|
||||
v1.VolumeMount{
|
||||
Name: constants.ShmVolumeName,
|
||||
MountPath: constants.ShmVolumePath,
|
||||
})
|
||||
for i, container := range podSpec.Containers {
|
||||
if container.Name == constants.PostgresContainerName {
|
||||
mounts := append(container.VolumeMounts,
|
||||
v1.VolumeMount{
|
||||
Name: constants.ShmVolumeName,
|
||||
MountPath: constants.ShmVolumePath,
|
||||
})
|
||||
|
||||
podSpec.Containers[i].VolumeMounts = mounts
|
||||
}
|
||||
}
|
||||
|
||||
podSpec.Containers[0].VolumeMounts = mounts
|
||||
podSpec.Volumes = volumes
|
||||
}
|
||||
|
||||
|
|
@ -1439,54 +1443,58 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
|
|||
|
||||
volumes := podSpec.Volumes
|
||||
mountPaths := map[string]acidv1.AdditionalVolume{}
|
||||
for i, v := range additionalVolumes {
|
||||
if previousVolume, exist := mountPaths[v.MountPath]; exist {
|
||||
for i, additionalVolume := range additionalVolumes {
|
||||
if previousVolume, exist := mountPaths[additionalVolume.MountPath]; exist {
|
||||
msg := "Volume %+v cannot be mounted to the same path as %+v"
|
||||
c.logger.Warningf(msg, v, previousVolume)
|
||||
c.logger.Warningf(msg, additionalVolume, previousVolume)
|
||||
continue
|
||||
}
|
||||
|
||||
if v.MountPath == constants.PostgresDataMount {
|
||||
if additionalVolume.MountPath == constants.PostgresDataMount {
|
||||
msg := "Cannot mount volume on postgresql data directory, %+v"
|
||||
c.logger.Warningf(msg, v)
|
||||
c.logger.Warningf(msg, additionalVolume)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(v.TargetContainers) == 0 {
|
||||
spiloContainer := podSpec.Containers[0]
|
||||
additionalVolumes[i].TargetContainers = []string{spiloContainer.Name}
|
||||
// if no target container is defined assign it to postgres container
|
||||
if len(additionalVolume.TargetContainers) == 0 {
|
||||
for j := range podSpec.Containers {
|
||||
if podSpec.Containers[j].Name == c.containerName() {
|
||||
additionalVolumes[i].TargetContainers = []string{c.containerName()}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, target := range v.TargetContainers {
|
||||
if target == "all" && len(v.TargetContainers) != 1 {
|
||||
for _, target := range additionalVolume.TargetContainers {
|
||||
if target == "all" && len(additionalVolume.TargetContainers) != 1 {
|
||||
msg := `Target containers could be either "all" or a list
|
||||
of containers, mixing those is not allowed, %+v`
|
||||
c.logger.Warningf(msg, v)
|
||||
c.logger.Warningf(msg, additionalVolume)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
volumes = append(volumes,
|
||||
v1.Volume{
|
||||
Name: v.Name,
|
||||
VolumeSource: v.VolumeSource,
|
||||
Name: additionalVolume.Name,
|
||||
VolumeSource: additionalVolume.VolumeSource,
|
||||
},
|
||||
)
|
||||
|
||||
mountPaths[v.MountPath] = v
|
||||
mountPaths[additionalVolume.MountPath] = additionalVolume
|
||||
}
|
||||
|
||||
c.logger.Infof("Mount additional volumes: %+v", additionalVolumes)
|
||||
|
||||
for i := range podSpec.Containers {
|
||||
mounts := podSpec.Containers[i].VolumeMounts
|
||||
for _, v := range additionalVolumes {
|
||||
for _, target := range v.TargetContainers {
|
||||
for _, additionalVolume := range additionalVolumes {
|
||||
for _, target := range additionalVolume.TargetContainers {
|
||||
if podSpec.Containers[i].Name == target || target == "all" {
|
||||
mounts = append(mounts, v1.VolumeMount{
|
||||
Name: v.Name,
|
||||
MountPath: v.MountPath,
|
||||
SubPath: v.SubPath,
|
||||
Name: additionalVolume.Name,
|
||||
MountPath: additionalVolume.MountPath,
|
||||
SubPath: additionalVolume.SubPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -413,6 +413,7 @@ func TestShmVolume(t *testing.T) {
|
|||
Volumes: []v1.Volume{},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "postgres",
|
||||
VolumeMounts: []v1.VolumeMount{},
|
||||
},
|
||||
},
|
||||
|
|
@ -425,6 +426,7 @@ func TestShmVolume(t *testing.T) {
|
|||
Volumes: []v1.Volume{{}},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "postgres",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -357,8 +357,8 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
// and
|
||||
// (b) some of the pods were not restarted when the lazy update was still in place
|
||||
for _, pod := range pods {
|
||||
effectivePodImage := pod.Spec.Containers[0].Image
|
||||
stsImage := desiredSts.Spec.Template.Spec.Containers[0].Image
|
||||
effectivePodImage := c.getPostgresContainer(&pod.Spec).Image
|
||||
stsImage := c.getPostgresContainer(&desiredSts.Spec.Template.Spec).Image
|
||||
|
||||
if stsImage != effectivePodImage {
|
||||
if err = c.markRollingUpdateFlagForPod(&pod, "pod not yet restarted due to lazy update"); err != nil {
|
||||
|
|
|
|||
|
|
@ -227,6 +227,16 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) getPostgresContainer(podSpec *v1.PodSpec) v1.Container {
|
||||
var pgContainer v1.Container
|
||||
for _, container := range podSpec.Containers {
|
||||
if container.Name == constants.PostgresContainerName {
|
||||
pgContainer = container
|
||||
}
|
||||
}
|
||||
return pgContainer
|
||||
}
|
||||
|
||||
func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
||||
|
||||
if teamID == "" {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import "time"
|
|||
// General kubernetes-related constants
|
||||
const (
|
||||
PostgresContainerName = "postgres"
|
||||
PostgresContainerIdx = 0
|
||||
K8sAPIPath = "/apis"
|
||||
|
||||
QueueResyncPeriodPod = 5 * time.Minute
|
||||
|
|
|
|||
Loading…
Reference in New Issue