fix linter errors in kubectl plugin

This commit is contained in:
Felix Kunde 2021-04-23 16:44:06 +02:00
parent cc3fc15e63
commit fa79aa9f49
8 changed files with 76 additions and 50 deletions

View File

@ -71,7 +71,7 @@ func addDb(dbName string, dbOwner string, clusterName string) {
var dbOwnerExists bool var dbOwnerExists bool
dbUsers := postgresql.Spec.Users dbUsers := postgresql.Spec.Users
for key, _ := range dbUsers { for key := range dbUsers {
if key == dbOwner { if key == dbOwner {
dbOwnerExists = true dbOwnerExists = true
} }

View File

@ -23,13 +23,14 @@ THE SOFTWARE.
package cmd package cmd
import ( import (
"log"
"os"
user "os/user"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/tools/remotecommand"
"log"
"os"
user "os/user"
) )
// connectCmd represents the kubectl pg connect command // connectCmd represents the kubectl pg connect command
@ -80,13 +81,13 @@ kubectl pg connect -c cluster -p -u user01 -d db01
func connect(clusterName string, master bool, replica string, psql bool, user string, dbName string) { func connect(clusterName string, master bool, replica string, psql bool, user string, dbName string) {
config := getConfig() config := getConfig()
client, er := kubernetes.NewForConfig(config) client, err := kubernetes.NewForConfig(config)
if er != nil { if err != nil {
log.Fatal(er) log.Fatal(err)
} }
podName := getPodName(clusterName, master, replica) podName := getPodName(clusterName, master, replica)
execRequest := &rest.Request{} var execRequest *rest.Request
if psql { if psql {
execRequest = client.CoreV1().RESTClient().Post().Resource("pods"). execRequest = client.CoreV1().RESTClient().Post().Resource("pods").

View File

@ -53,6 +53,9 @@ kubectl pg create -f cluster-manifest.yaml
func create(fileName string) { func create(fileName string) {
config := getConfig() config := getConfig()
postgresConfig, err := PostgresqlLister.NewForConfig(config) postgresConfig, err := PostgresqlLister.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
ymlFile, err := ioutil.ReadFile(fileName) ymlFile, err := ioutil.ReadFile(fileName)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)

View File

@ -67,7 +67,7 @@ func extVolume(increasedVolumeSize string, clusterName string) {
namespace := getCurrentNamespace() namespace := getCurrentNamespace()
postgresql, err := postgresConfig.Postgresqls(namespace).Get(context.TODO(), clusterName, metav1.GetOptions{}) postgresql, err := postgresConfig.Postgresqls(namespace).Get(context.TODO(), clusterName, metav1.GetOptions{})
if err != nil { if err != nil {
log.Fatalf("hii %v", err) log.Fatal(err)
} }
oldSize, err := resource.ParseQuantity(postgresql.Spec.Volume.Size) oldSize, err := resource.ParseQuantity(postgresql.Spec.Volume.Size)

View File

@ -31,7 +31,6 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" PostgresqlLister "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
@ -46,6 +45,9 @@ var scaleCmd = &cobra.Command{
Scaling to 0 leads to down time.`, Scaling to 0 leads to down time.`,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
clusterName, err := cmd.Flags().GetString("cluster") clusterName, err := cmd.Flags().GetString("cluster")
if err != nil {
log.Fatal(err)
}
namespace, err := cmd.Flags().GetString("namespace") namespace, err := cmd.Flags().GetString("namespace")
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -129,8 +131,7 @@ func allowedMinMaxInstances(config *rest.Config) (int32, int32) {
log.Fatal(err) log.Fatal(err)
} }
var operator *v1.Deployment operator := getPostgresOperator(k8sClient)
operator = getPostgresOperator(k8sClient)
operatorContainer := operator.Spec.Template.Spec.Containers operatorContainer := operator.Spec.Template.Spec.Containers
var configMapName, operatorConfigName string var configMapName, operatorConfigName string

View File

@ -57,6 +57,9 @@ kubectl pg update -f cluster-manifest.yaml
func updatePgResources(fileName string) { func updatePgResources(fileName string) {
config := getConfig() config := getConfig()
postgresConfig, err := PostgresqlLister.NewForConfig(config) postgresConfig, err := PostgresqlLister.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
ymlFile, err := ioutil.ReadFile(fileName) ymlFile, err := ioutil.ReadFile(fileName)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)

View File

@ -99,9 +99,9 @@ func confirmAction(clusterName string, namespace string) {
func getPodName(clusterName string, master bool, replicaNumber string) string { func getPodName(clusterName string, master bool, replicaNumber string) string {
config := getConfig() config := getConfig()
client, er := kubernetes.NewForConfig(config) client, err := kubernetes.NewForConfig(config)
if er != nil { if err != nil {
log.Fatal(er) log.Fatal(err)
} }
postgresConfig, err := PostgresqlLister.NewForConfig(config) postgresConfig, err := PostgresqlLister.NewForConfig(config)

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
"github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/config"
@ -24,9 +25,21 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes/fake"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
) )
func newFakeK8sTestClient() (k8sutil.KubernetesClient, *fake.Clientset) {
acidClientSet := fakeacidv1.NewSimpleClientset()
clientSet := fake.NewSimpleClientset()
return k8sutil.KubernetesClient{
PodsGetter: clientSet.CoreV1(),
PostgresqlsGetter: acidClientSet.AcidV1(),
StatefulSetsGetter: clientSet.AppsV1(),
}, clientSet
}
// For testing purposes // For testing purposes
type ExpectedValue struct { type ExpectedValue struct {
envIndex int envIndex int
@ -953,29 +966,23 @@ func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role Postg
} }
func TestTLS(t *testing.T) { func TestTLS(t *testing.T) {
var err error
var spec acidv1.PostgresSpec
var cluster *Cluster
var spiloRunAsUser = int64(101)
var spiloRunAsGroup = int64(103)
var spiloFSGroup = int64(103)
var additionalVolumes = spec.AdditionalVolumes
client, _ := newFakeK8sTestClient()
clusterName := "acid-test-cluster"
namespace := "default"
tlsSecretName := "my-secret"
spiloRunAsUser := int64(101)
spiloRunAsGroup := int64(103)
spiloFSGroup := int64(103)
defaultMode := int32(0640) defaultMode := int32(0640)
mountPath := "/tls" mountPath := "/tls"
additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{
Name: spec.TLS.SecretName,
MountPath: mountPath,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: spec.TLS.SecretName,
DefaultMode: &defaultMode,
},
},
})
makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec { pg := acidv1.Postgresql{
return acidv1.PostgresSpec{ ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
TeamID: "myapp", NumberOfInstances: 1, TeamID: "myapp", NumberOfInstances: 1,
Resources: acidv1.Resources{ Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
@ -984,12 +991,24 @@ func TestTLS(t *testing.T) {
Volume: acidv1.Volume{ Volume: acidv1.Volume{
Size: "1G", Size: "1G",
}, },
TLS: &tls, TLS: &acidv1.TLSDescription{
AdditionalVolumes: additionalVolumes, SecretName: tlsSecretName, CAFile: "ca.crt"},
} AdditionalVolumes: []acidv1.AdditionalVolume{
acidv1.AdditionalVolume{
Name: tlsSecretName,
MountPath: mountPath,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: tlsSecretName,
DefaultMode: &defaultMode,
},
},
},
},
},
} }
cluster = New( var cluster = New(
Config{ Config{
OpConfig: config.Config{ OpConfig: config.Config{
PodManagementPolicy: "ordered_ready", PodManagementPolicy: "ordered_ready",
@ -1004,15 +1023,14 @@ func TestTLS(t *testing.T) {
SpiloFSGroup: &spiloFSGroup, SpiloFSGroup: &spiloFSGroup,
}, },
}, },
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) }, client, pg, logger, eventRecorder)
spec = makeSpec(acidv1.TLSDescription{SecretName: "my-secret", CAFile: "ca.crt"})
s, err := cluster.generateStatefulSet(&spec) // create a statefulset
if err != nil { sts, err := cluster.createStatefulSet()
assert.NoError(t, err) assert.NoError(t, err)
}
fsGroup := int64(103) fsGroup := int64(103)
assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned") assert.Equal(t, &fsGroup, sts.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned")
volume := v1.Volume{ volume := v1.Volume{
Name: "my-secret", Name: "my-secret",
@ -1023,16 +1041,16 @@ func TestTLS(t *testing.T) {
}, },
}, },
} }
assert.Contains(t, s.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume") assert.Contains(t, sts.Spec.Template.Spec.Volumes, volume, "the pod gets a secret volume")
assert.Contains(t, s.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ assert.Contains(t, sts.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
MountPath: "/tls", MountPath: "/tls",
Name: "my-secret", Name: "my-secret",
}, "the volume gets mounted in /tls") }, "the volume gets mounted in /tls")
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"}) assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: "/tls/tls.crt"})
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"}) assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: "/tls/tls.key"})
assert.Contains(t, s.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"}) assert.Contains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "SSL_CA_FILE", Value: "/tls/ca.crt"})
} }
func TestAdditionalVolume(t *testing.T) { func TestAdditionalVolume(t *testing.T) {