Fix go lint errors (#1468)
* fix linter errors * fix linter errors in kubectl plugin * update PyYAML dependency in e2e tests * declare a testVolume in volume_test
This commit is contained in:
parent
32e6c135b9
commit
f0f7f25d30
|
|
@ -1,3 +1,3 @@
|
||||||
kubernetes==11.0.0
|
kubernetes==11.0.0
|
||||||
timeout_decorator==0.4.1
|
timeout_decorator==0.4.1
|
||||||
pyyaml==5.3.1
|
pyyaml==5.4.1
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ func addDb(dbName string, dbOwner string, clusterName string) {
|
||||||
|
|
||||||
var dbOwnerExists bool
|
var dbOwnerExists bool
|
||||||
dbUsers := postgresql.Spec.Users
|
dbUsers := postgresql.Spec.Users
|
||||||
for key, _ := range dbUsers {
|
for key := range dbUsers {
|
||||||
if key == dbOwner {
|
if key == dbOwner {
|
||||||
dbOwnerExists = true
|
dbOwnerExists = true
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,13 +23,14 @@ THE SOFTWARE.
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
user "os/user"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/remotecommand"
|
"k8s.io/client-go/tools/remotecommand"
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
user "os/user"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// connectCmd represents the kubectl pg connect command
|
// connectCmd represents the kubectl pg connect command
|
||||||
|
|
@ -80,13 +81,13 @@ kubectl pg connect -c cluster -p -u user01 -d db01
|
||||||
|
|
||||||
func connect(clusterName string, master bool, replica string, psql bool, user string, dbName string) {
|
func connect(clusterName string, master bool, replica string, psql bool, user string, dbName string) {
|
||||||
config := getConfig()
|
config := getConfig()
|
||||||
client, er := kubernetes.NewForConfig(config)
|
client, err := kubernetes.NewForConfig(config)
|
||||||
if er != nil {
|
if err != nil {
|
||||||
log.Fatal(er)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
podName := getPodName(clusterName, master, replica)
|
podName := getPodName(clusterName, master, replica)
|
||||||
execRequest := &rest.Request{}
|
var execRequest *rest.Request
|
||||||
|
|
||||||
if psql {
|
if psql {
|
||||||
execRequest = client.CoreV1().RESTClient().Post().Resource("pods").
|
execRequest = client.CoreV1().RESTClient().Post().Resource("pods").
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,9 @@ kubectl pg create -f cluster-manifest.yaml
|
||||||
func create(fileName string) {
|
func create(fileName string) {
|
||||||
config := getConfig()
|
config := getConfig()
|
||||||
postgresConfig, err := PostgresqlLister.NewForConfig(config)
|
postgresConfig, err := PostgresqlLister.NewForConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
ymlFile, err := ioutil.ReadFile(fileName)
|
ymlFile, err := ioutil.ReadFile(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|
|
||||||
|
|
@ -67,7 +67,7 @@ func extVolume(increasedVolumeSize string, clusterName string) {
|
||||||
namespace := getCurrentNamespace()
|
namespace := getCurrentNamespace()
|
||||||
postgresql, err := postgresConfig.Postgresqls(namespace).Get(context.TODO(), clusterName, metav1.GetOptions{})
|
postgresql, err := postgresConfig.Postgresqls(namespace).Get(context.TODO(), clusterName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("hii %v", err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
oldSize, err := resource.ParseQuantity(postgresql.Spec.Volume.Size)
|
oldSize, err := resource.ParseQuantity(postgresql.Spec.Volume.Size)
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,6 @@ import (
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||||
v1 "k8s.io/api/apps/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
|
|
@ -46,6 +45,9 @@ var scaleCmd = &cobra.Command{
|
||||||
Scaling to 0 leads to down time.`,
|
Scaling to 0 leads to down time.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
clusterName, err := cmd.Flags().GetString("cluster")
|
clusterName, err := cmd.Flags().GetString("cluster")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
namespace, err := cmd.Flags().GetString("namespace")
|
namespace, err := cmd.Flags().GetString("namespace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|
@ -129,8 +131,7 @@ func allowedMinMaxInstances(config *rest.Config) (int32, int32) {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var operator *v1.Deployment
|
operator := getPostgresOperator(k8sClient)
|
||||||
operator = getPostgresOperator(k8sClient)
|
|
||||||
|
|
||||||
operatorContainer := operator.Spec.Template.Spec.Containers
|
operatorContainer := operator.Spec.Template.Spec.Containers
|
||||||
var configMapName, operatorConfigName string
|
var configMapName, operatorConfigName string
|
||||||
|
|
|
||||||
|
|
@ -57,6 +57,9 @@ kubectl pg update -f cluster-manifest.yaml
|
||||||
func updatePgResources(fileName string) {
|
func updatePgResources(fileName string) {
|
||||||
config := getConfig()
|
config := getConfig()
|
||||||
postgresConfig, err := PostgresqlLister.NewForConfig(config)
|
postgresConfig, err := PostgresqlLister.NewForConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
ymlFile, err := ioutil.ReadFile(fileName)
|
ymlFile, err := ioutil.ReadFile(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|
|
||||||
|
|
@ -99,9 +99,9 @@ func confirmAction(clusterName string, namespace string) {
|
||||||
|
|
||||||
func getPodName(clusterName string, master bool, replicaNumber string) string {
|
func getPodName(clusterName string, master bool, replicaNumber string) string {
|
||||||
config := getConfig()
|
config := getConfig()
|
||||||
client, er := kubernetes.NewForConfig(config)
|
client, err := kubernetes.NewForConfig(config)
|
||||||
if er != nil {
|
if err != nil {
|
||||||
log.Fatal(er)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
postgresConfig, err := PostgresqlLister.NewForConfig(config)
|
postgresConfig, err := PostgresqlLister.NewForConfig(config)
|
||||||
|
|
|
||||||
|
|
@ -1164,7 +1164,7 @@ func (c *Cluster) initHumanUsers() error {
|
||||||
for _, superuserTeam := range superuserTeams {
|
for _, superuserTeam := range superuserTeams {
|
||||||
err := c.initTeamMembers(superuserTeam, true)
|
err := c.initTeamMembers(superuserTeam, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err)
|
return fmt.Errorf("cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err)
|
||||||
}
|
}
|
||||||
if superuserTeam == c.Spec.TeamID {
|
if superuserTeam == c.Spec.TeamID {
|
||||||
clusterIsOwnedBySuperuserTeam = true
|
clusterIsOwnedBySuperuserTeam = true
|
||||||
|
|
@ -1177,7 +1177,7 @@ func (c *Cluster) initHumanUsers() error {
|
||||||
if !(util.SliceContains(superuserTeams, additionalTeam)) {
|
if !(util.SliceContains(superuserTeams, additionalTeam)) {
|
||||||
err := c.initTeamMembers(additionalTeam, false)
|
err := c.initTeamMembers(additionalTeam, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err)
|
return fmt.Errorf("cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1190,7 +1190,7 @@ func (c *Cluster) initHumanUsers() error {
|
||||||
|
|
||||||
err := c.initTeamMembers(c.Spec.TeamID, false)
|
err := c.initTeamMembers(c.Spec.TeamID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err)
|
return fmt.Errorf("cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -420,9 +420,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
|
||||||
|
|
||||||
// Clean up the deployment object. If deployment resource we've remembered
|
// Clean up the deployment object. If deployment resource we've remembered
|
||||||
// is somehow empty, try to delete based on what would we generate
|
// is somehow empty, try to delete based on what would we generate
|
||||||
var deployment *appsv1.Deployment
|
deployment := c.ConnectionPooler[role].Deployment
|
||||||
deployment = c.ConnectionPooler[role].Deployment
|
|
||||||
|
|
||||||
policy := metav1.DeletePropagationForeground
|
policy := metav1.DeletePropagationForeground
|
||||||
options := metav1.DeleteOptions{PropagationPolicy: &policy}
|
options := metav1.DeleteOptions{PropagationPolicy: &policy}
|
||||||
|
|
||||||
|
|
@ -445,8 +443,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Repeat the same for the service object
|
// Repeat the same for the service object
|
||||||
var service *v1.Service
|
service := c.ConnectionPooler[role].Service
|
||||||
service = c.ConnectionPooler[role].Service
|
|
||||||
if service == nil {
|
if service == nil {
|
||||||
c.logger.Debugf("no connection pooler service object to delete")
|
c.logger.Debugf("no connection pooler service object to delete")
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -213,10 +213,10 @@ PatroniInitDBParams:
|
||||||
for _, k := range initdbOptionNames {
|
for _, k := range initdbOptionNames {
|
||||||
v := patroni.InitDB[k]
|
v := patroni.InitDB[k]
|
||||||
for i, defaultParam := range config.Bootstrap.Initdb {
|
for i, defaultParam := range config.Bootstrap.Initdb {
|
||||||
switch defaultParam.(type) {
|
switch t := defaultParam.(type) {
|
||||||
case map[string]string:
|
case map[string]string:
|
||||||
{
|
{
|
||||||
for k1 := range defaultParam.(map[string]string) {
|
for k1 := range t {
|
||||||
if k1 == k {
|
if k1 == k {
|
||||||
(config.Bootstrap.Initdb[i]).(map[string]string)[k] = v
|
(config.Bootstrap.Initdb[i]).(map[string]string)[k] = v
|
||||||
continue PatroniInitDBParams
|
continue PatroniInitDBParams
|
||||||
|
|
@ -226,7 +226,7 @@ PatroniInitDBParams:
|
||||||
case string:
|
case string:
|
||||||
{
|
{
|
||||||
/* if the option already occurs in the list */
|
/* if the option already occurs in the list */
|
||||||
if defaultParam.(string) == v {
|
if t == v {
|
||||||
continue PatroniInitDBParams
|
continue PatroniInitDBParams
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -264,7 +264,7 @@ PatroniInitDBParams:
|
||||||
if patroni.SynchronousMode {
|
if patroni.SynchronousMode {
|
||||||
config.Bootstrap.DCS.SynchronousMode = patroni.SynchronousMode
|
config.Bootstrap.DCS.SynchronousMode = patroni.SynchronousMode
|
||||||
}
|
}
|
||||||
if patroni.SynchronousModeStrict != false {
|
if patroni.SynchronousModeStrict {
|
||||||
config.Bootstrap.DCS.SynchronousModeStrict = patroni.SynchronousModeStrict
|
config.Bootstrap.DCS.SynchronousModeStrict = patroni.SynchronousModeStrict
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -336,7 +336,7 @@ func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAff
|
||||||
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
|
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
nodeAffinityCopy := *&v1.NodeAffinity{}
|
nodeAffinityCopy := v1.NodeAffinity{}
|
||||||
if nodeAffinity != nil {
|
if nodeAffinity != nil {
|
||||||
nodeAffinityCopy = *nodeAffinity.DeepCopy()
|
nodeAffinityCopy = *nodeAffinity.DeepCopy()
|
||||||
}
|
}
|
||||||
|
|
@ -1279,15 +1279,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
||||||
}
|
}
|
||||||
|
|
||||||
stsAnnotations := make(map[string]string)
|
|
||||||
stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil))
|
|
||||||
|
|
||||||
statefulSet := &appsv1.StatefulSet{
|
statefulSet := &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.statefulSetName(),
|
Name: c.statefulSetName(),
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Labels: c.labelsSet(true),
|
Labels: c.labelsSet(true),
|
||||||
Annotations: stsAnnotations,
|
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
|
||||||
},
|
},
|
||||||
Spec: appsv1.StatefulSetSpec{
|
Spec: appsv1.StatefulSetSpec{
|
||||||
Replicas: &numberOfInstances,
|
Replicas: &numberOfInstances,
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
|
|
@ -24,9 +25,21 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func newFakeK8sTestClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||||
|
acidClientSet := fakeacidv1.NewSimpleClientset()
|
||||||
|
clientSet := fake.NewSimpleClientset()
|
||||||
|
|
||||||
|
return k8sutil.KubernetesClient{
|
||||||
|
PodsGetter: clientSet.CoreV1(),
|
||||||
|
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||||
|
StatefulSetsGetter: clientSet.AppsV1(),
|
||||||
|
}, clientSet
|
||||||
|
}
|
||||||
|
|
||||||
// For testing purposes
|
// For testing purposes
|
||||||
type ExpectedValue struct {
|
type ExpectedValue struct {
|
||||||
envIndex int
|
envIndex int
|
||||||
|
|
@ -930,15 +943,6 @@ func TestNodeAffinity(t *testing.T) {
|
||||||
assert.Equal(t, s.Spec.Template.Spec.Affinity.NodeAffinity, nodeAff, "cluster template has correct node affinity")
|
assert.Equal(t, s.Spec.Template.Spec.Affinity.NodeAffinity, nodeAff, "cluster template has correct node affinity")
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error {
|
|
||||||
if podSpec.ObjectMeta.Name != "test-pod-template" {
|
|
||||||
return fmt.Errorf("Custom pod template is not used, current spec %+v",
|
|
||||||
podSpec)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error {
|
func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error {
|
||||||
owner := deployment.ObjectMeta.OwnerReferences[0]
|
owner := deployment.ObjectMeta.OwnerReferences[0]
|
||||||
|
|
||||||
|
|
@ -962,16 +966,23 @@ func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role Postg
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTLS(t *testing.T) {
|
func TestTLS(t *testing.T) {
|
||||||
var err error
|
|
||||||
var spec acidv1.PostgresSpec
|
|
||||||
var cluster *Cluster
|
|
||||||
var spiloRunAsUser = int64(101)
|
|
||||||
var spiloRunAsGroup = int64(103)
|
|
||||||
var spiloFSGroup = int64(103)
|
|
||||||
var additionalVolumes = spec.AdditionalVolumes
|
|
||||||
|
|
||||||
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec {
|
client, _ := newFakeK8sTestClient()
|
||||||
return acidv1.PostgresSpec{
|
clusterName := "acid-test-cluster"
|
||||||
|
namespace := "default"
|
||||||
|
tlsSecretName := "my-secret"
|
||||||
|
spiloRunAsUser := int64(101)
|
||||||
|
spiloRunAsGroup := int64(103)
|
||||||
|
spiloFSGroup := int64(103)
|
||||||
|
defaultMode := int32(0640)
|
||||||
|
mountPath := "/tls"
|
||||||
|
|
||||||
|
pg := acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
TeamID: "myapp", NumberOfInstances: 1,
|
TeamID: "myapp", NumberOfInstances: 1,
|
||||||
Resources: acidv1.Resources{
|
Resources: acidv1.Resources{
|
||||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||||
|
|
@ -980,11 +991,24 @@ func TestTLS(t *testing.T) {
|
||||||
Volume: acidv1.Volume{
|
Volume: acidv1.Volume{
|
||||||
Size: "1G",
|
Size: "1G",
|
||||||
},
|
},
|
||||||
TLS: &tls,
|
TLS: &acidv1.TLSDescription{
|
||||||
}
|
SecretName: tlsSecretName, CAFile: "ca.crt"},
|
||||||
|
AdditionalVolumes: []acidv1.AdditionalVolume{
|
||||||
|
acidv1.AdditionalVolume{
|
||||||
|
Name: tlsSecretName,
|
||||||
|
MountPath: mountPath,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
Secret: &v1.SecretVolumeSource{
|
||||||
|
SecretName: tlsSecretName,
|
||||||
|
DefaultMode: &defaultMode,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster = New(
|
var cluster = New(
|
||||||
Config{
|
Config{
|
||||||
OpConfig: config.Config{
|
OpConfig: config.Config{
|
||||||
PodManagementPolicy: "ordered_ready",
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
|
@ -999,28 +1023,14 @@ func TestTLS(t *testing.T) {
|
||||||
SpiloFSGroup: &spiloFSGroup,
|
SpiloFSGroup: &spiloFSGroup,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
}, client, pg, logger, eventRecorder)
|
||||||
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
|
|
||||||
s, err := cluster.generateStatefulSet(&spec)
|
// create a statefulset
|
||||||
if err != nil {
|
sts, err := cluster.createStatefulSet()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
}
|
|
||||||
|
|
||||||
fsGroup := int64(103)
|
fsGroup := int64(103)
|
||||||
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
|
assert.Equal(t, &fsGroup, sts.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
|
||||||
|
|
||||||
defaultMode := int32(0640)
|
|
||||||
mountPath := "/tls"
|
|
||||||
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
|
|
||||||
Name: spec.TLS.SecretName,
|
|
||||||
MountPath: mountPath,
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
Secret: &v1.SecretVolumeSource{
|
|
||||||
SecretName: spec.TLS.SecretName,
|
|
||||||
DefaultMode: &defaultMode,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
volume := v1.Volume{
|
volume := v1.Volume{
|
||||||
Name: "my-secret",
|
Name: "my-secret",
|
||||||
|
|
@ -1031,16 +1041,16 @@ func TestTLS(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
assert.Contains(t, s.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
|
assert.Contains(t, sts.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
|
||||||
|
|
||||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||||
MountPath: "/tls",
|
MountPath: "/tls",
|
||||||
Name: "my-secret",
|
Name: "my-secret",
|
||||||
}, "the volume gets mounted in /tls")
|
}, "the volume gets mounted in /tls")
|
||||||
|
|
||||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
|
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
|
||||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
|
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
|
||||||
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
|
assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAdditionalVolume(t *testing.T) {
|
func TestAdditionalVolume(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -19,8 +19,8 @@ var VersionMap = map[string]int{
|
||||||
|
|
||||||
// IsBiggerPostgresVersion Compare two Postgres version numbers
|
// IsBiggerPostgresVersion Compare two Postgres version numbers
|
||||||
func IsBiggerPostgresVersion(old string, new string) bool {
|
func IsBiggerPostgresVersion(old string, new string) bool {
|
||||||
oldN, _ := VersionMap[old]
|
oldN := VersionMap[old]
|
||||||
newN, _ := VersionMap[new]
|
newN := VersionMap[new]
|
||||||
return newN > oldN
|
return newN > oldN
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||||
|
|
@ -260,28 +259,6 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) mustUpdatePodsAfterLazyUpdate(desiredSset *appsv1.StatefulSet) (bool, error) {
|
|
||||||
|
|
||||||
pods, err := c.listPods()
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("could not list pods of the statefulset: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pod := range pods {
|
|
||||||
|
|
||||||
effectivePodImage := pod.Spec.Containers[0].Image
|
|
||||||
ssImage := desiredSset.Spec.Template.Spec.Containers[0].Image
|
|
||||||
|
|
||||||
if ssImage != effectivePodImage {
|
|
||||||
c.logger.Infof("not all pods were re-started when the lazy upgrade was enabled; forcing the rolling upgrade now")
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) syncStatefulSet() error {
|
func (c *Cluster) syncStatefulSet() error {
|
||||||
|
|
||||||
podsToRecreate := make([]v1.Pod, 0)
|
podsToRecreate := make([]v1.Pod, 0)
|
||||||
|
|
|
||||||
|
|
@ -227,11 +227,6 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) {
|
|
||||||
c.logger.Infof("volume specification has been changed")
|
|
||||||
logNiceDiff(c.logger, old, new)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
||||||
|
|
||||||
if teamID == "" {
|
if teamID == "" {
|
||||||
|
|
@ -251,9 +246,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, member := range additionalMembers {
|
members = append(members, additionalMembers...)
|
||||||
members = append(members, member)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !c.OpConfig.EnableTeamsAPI {
|
if !c.OpConfig.EnableTeamsAPI {
|
||||||
|
|
@ -292,7 +285,6 @@ func (c *Cluster) annotationsSet(annotations map[string]string) map[string]strin
|
||||||
pgCRDAnnotations := c.ObjectMeta.Annotations
|
pgCRDAnnotations := c.ObjectMeta.Annotations
|
||||||
|
|
||||||
// allow to inherit certain labels from the 'postgres' object
|
// allow to inherit certain labels from the 'postgres' object
|
||||||
if pgCRDAnnotations != nil {
|
|
||||||
for k, v := range pgCRDAnnotations {
|
for k, v := range pgCRDAnnotations {
|
||||||
for _, match := range c.OpConfig.InheritedAnnotations {
|
for _, match := range c.OpConfig.InheritedAnnotations {
|
||||||
if k == match {
|
if k == match {
|
||||||
|
|
@ -300,7 +292,6 @@ func (c *Cluster) annotationsSet(annotations map[string]string) map[string]strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if len(annotations) > 0 {
|
if len(annotations) > 0 {
|
||||||
return annotations
|
return annotations
|
||||||
|
|
|
||||||
|
|
@ -74,10 +74,15 @@ func (c *Cluster) syncVolumes() error {
|
||||||
func (c *Cluster) syncUnderlyingEBSVolume() error {
|
func (c *Cluster) syncUnderlyingEBSVolume() error {
|
||||||
c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size")
|
c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size")
|
||||||
|
|
||||||
var err error
|
var (
|
||||||
|
err error
|
||||||
|
newSize resource.Quantity
|
||||||
|
)
|
||||||
|
|
||||||
targetValue := c.Spec.Volume
|
targetValue := c.Spec.Volume
|
||||||
newSize, err := resource.ParseQuantity(targetValue.Size)
|
if newSize, err = resource.ParseQuantity(targetValue.Size); err != nil {
|
||||||
|
return fmt.Errorf("could not parse volume size: %v", err)
|
||||||
|
}
|
||||||
targetSize := quantityToGigabyte(newSize)
|
targetSize := quantityToGigabyte(newSize)
|
||||||
|
|
||||||
awsGp3 := aws.String("gp3")
|
awsGp3 := aws.String("gp3")
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,20 @@ import (
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type testVolume struct {
|
||||||
|
size int64
|
||||||
|
iops int64
|
||||||
|
throughtput int64
|
||||||
|
volType string
|
||||||
|
}
|
||||||
|
|
||||||
|
var testVol = testVolume{
|
||||||
|
size: 100,
|
||||||
|
iops: 300,
|
||||||
|
throughtput: 125,
|
||||||
|
volType: "gp2",
|
||||||
|
}
|
||||||
|
|
||||||
func newFakeK8sPVCclient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
func newFakeK8sPVCclient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||||
clientSet := fake.NewSimpleClientset()
|
clientSet := fake.NewSimpleClientset()
|
||||||
|
|
||||||
|
|
@ -189,14 +203,7 @@ func TestMigrateEBS(t *testing.T) {
|
||||||
cluster.Namespace = namespace
|
cluster.Namespace = namespace
|
||||||
filterLabels := cluster.labelsSet(false)
|
filterLabels := cluster.labelsSet(false)
|
||||||
|
|
||||||
testVolumes := []testVolume{
|
testVolumes := []testVolume{testVol, testVol}
|
||||||
{
|
|
||||||
size: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
size: 100,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
||||||
|
|
||||||
|
|
@ -220,13 +227,6 @@ func TestMigrateEBS(t *testing.T) {
|
||||||
cluster.executeEBSMigration()
|
cluster.executeEBSMigration()
|
||||||
}
|
}
|
||||||
|
|
||||||
type testVolume struct {
|
|
||||||
iops int64
|
|
||||||
throughtput int64
|
|
||||||
size int64
|
|
||||||
volType string
|
|
||||||
}
|
|
||||||
|
|
||||||
func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustername string, labels labels.Set, volumes []testVolume) {
|
func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustername string, labels labels.Set, volumes []testVolume) {
|
||||||
i := 0
|
i := 0
|
||||||
for _, v := range volumes {
|
for _, v := range volumes {
|
||||||
|
|
@ -305,17 +305,7 @@ func TestMigrateGp3Support(t *testing.T) {
|
||||||
cluster.Namespace = namespace
|
cluster.Namespace = namespace
|
||||||
filterLabels := cluster.labelsSet(false)
|
filterLabels := cluster.labelsSet(false)
|
||||||
|
|
||||||
testVolumes := []testVolume{
|
testVolumes := []testVolume{testVol, testVol, testVol}
|
||||||
{
|
|
||||||
size: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
size: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
size: 100,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
||||||
|
|
||||||
|
|
@ -371,14 +361,7 @@ func TestManualGp2Gp3Support(t *testing.T) {
|
||||||
cluster.Namespace = namespace
|
cluster.Namespace = namespace
|
||||||
filterLabels := cluster.labelsSet(false)
|
filterLabels := cluster.labelsSet(false)
|
||||||
|
|
||||||
testVolumes := []testVolume{
|
testVolumes := []testVolume{testVol, testVol}
|
||||||
{
|
|
||||||
size: 100,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
size: 100,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
initTestVolumesAndPods(cluster.KubeClient, namespace, clusterName, filterLabels, testVolumes)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue