fix linter errors
This commit is contained in:
		
							parent
							
								
									6b73ac4282
								
							
						
					
					
						commit
						af6055b700
					
				|  | @ -1163,7 +1163,7 @@ func (c *Cluster) initHumanUsers() error { | |||
| 	for _, superuserTeam := range superuserTeams { | ||||
| 		err := c.initTeamMembers(superuserTeam, true) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("Cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err) | ||||
| 			return fmt.Errorf("cannot initialize members for team %q of Postgres superusers: %v", superuserTeam, err) | ||||
| 		} | ||||
| 		if superuserTeam == c.Spec.TeamID { | ||||
| 			clusterIsOwnedBySuperuserTeam = true | ||||
|  | @ -1176,7 +1176,7 @@ func (c *Cluster) initHumanUsers() error { | |||
| 			if !(util.SliceContains(superuserTeams, additionalTeam)) { | ||||
| 				err := c.initTeamMembers(additionalTeam, false) | ||||
| 				if err != nil { | ||||
| 					return fmt.Errorf("Cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err) | ||||
| 					return fmt.Errorf("cannot initialize members for additional team %q for cluster owned by %q: %v", additionalTeam, c.Spec.TeamID, err) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | @ -1189,7 +1189,7 @@ func (c *Cluster) initHumanUsers() error { | |||
| 
 | ||||
| 	err := c.initTeamMembers(c.Spec.TeamID, false) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("Cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err) | ||||
| 		return fmt.Errorf("cannot initialize members for team %q who owns the Postgres cluster: %v", c.Spec.TeamID, err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
|  |  | |||
|  | @ -420,9 +420,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { | |||
| 
 | ||||
| 	// Clean up the deployment object. If deployment resource we've remembered
 | ||||
| 	// is somehow empty, try to delete based on what would we generate
 | ||||
| 	var deployment *appsv1.Deployment | ||||
| 	deployment = c.ConnectionPooler[role].Deployment | ||||
| 
 | ||||
| 	deployment := c.ConnectionPooler[role].Deployment | ||||
| 	policy := metav1.DeletePropagationForeground | ||||
| 	options := metav1.DeleteOptions{PropagationPolicy: &policy} | ||||
| 
 | ||||
|  | @ -445,8 +443,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { | |||
| 	} | ||||
| 
 | ||||
| 	// Repeat the same for the service object
 | ||||
| 	var service *v1.Service | ||||
| 	service = c.ConnectionPooler[role].Service | ||||
| 	service := c.ConnectionPooler[role].Service | ||||
| 	if service == nil { | ||||
| 		c.logger.Debugf("no connection pooler service object to delete") | ||||
| 	} else { | ||||
|  |  | |||
|  | @ -213,10 +213,10 @@ PatroniInitDBParams: | |||
| 	for _, k := range initdbOptionNames { | ||||
| 		v := patroni.InitDB[k] | ||||
| 		for i, defaultParam := range config.Bootstrap.Initdb { | ||||
| 			switch defaultParam.(type) { | ||||
| 			switch t := defaultParam.(type) { | ||||
| 			case map[string]string: | ||||
| 				{ | ||||
| 					for k1 := range defaultParam.(map[string]string) { | ||||
| 					for k1 := range t { | ||||
| 						if k1 == k { | ||||
| 							(config.Bootstrap.Initdb[i]).(map[string]string)[k] = v | ||||
| 							continue PatroniInitDBParams | ||||
|  | @ -226,7 +226,7 @@ PatroniInitDBParams: | |||
| 			case string: | ||||
| 				{ | ||||
| 					/* if the option already occurs in the list */ | ||||
| 					if defaultParam.(string) == v { | ||||
| 					if t == v { | ||||
| 						continue PatroniInitDBParams | ||||
| 					} | ||||
| 				} | ||||
|  | @ -264,7 +264,7 @@ PatroniInitDBParams: | |||
| 	if patroni.SynchronousMode { | ||||
| 		config.Bootstrap.DCS.SynchronousMode = patroni.SynchronousMode | ||||
| 	} | ||||
| 	if patroni.SynchronousModeStrict != false { | ||||
| 	if patroni.SynchronousModeStrict { | ||||
| 		config.Bootstrap.DCS.SynchronousModeStrict = patroni.SynchronousModeStrict | ||||
| 	} | ||||
| 
 | ||||
|  | @ -336,7 +336,7 @@ func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAff | |||
| 	if len(nodeReadinessLabel) == 0 && nodeAffinity == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	nodeAffinityCopy := *&v1.NodeAffinity{} | ||||
| 	nodeAffinityCopy := v1.NodeAffinity{} | ||||
| 	if nodeAffinity != nil { | ||||
| 		nodeAffinityCopy = *nodeAffinity.DeepCopy() | ||||
| 	} | ||||
|  | @ -1279,15 +1279,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef | |||
| 		return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy) | ||||
| 	} | ||||
| 
 | ||||
| 	stsAnnotations := make(map[string]string) | ||||
| 	stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil)) | ||||
| 
 | ||||
| 	statefulSet := &appsv1.StatefulSet{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:        c.statefulSetName(), | ||||
| 			Namespace:   c.Namespace, | ||||
| 			Labels:      c.labelsSet(true), | ||||
| 			Annotations: stsAnnotations, | ||||
| 			Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), | ||||
| 		}, | ||||
| 		Spec: appsv1.StatefulSetSpec{ | ||||
| 			Replicas:             &numberOfInstances, | ||||
|  |  | |||
|  | @ -930,15 +930,6 @@ func TestNodeAffinity(t *testing.T) { | |||
| 	assert.Equal(t, s.Spec.Template.Spec.Affinity.NodeAffinity, nodeAff, "cluster template has correct node affinity") | ||||
| } | ||||
| 
 | ||||
| func testCustomPodTemplate(cluster *Cluster, podSpec *v1.PodTemplateSpec) error { | ||||
| 	if podSpec.ObjectMeta.Name != "test-pod-template" { | ||||
| 		return fmt.Errorf("Custom pod template is not used, current spec %+v", | ||||
| 			podSpec) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error { | ||||
| 	owner := deployment.ObjectMeta.OwnerReferences[0] | ||||
| 
 | ||||
|  | @ -970,6 +961,19 @@ func TestTLS(t *testing.T) { | |||
| 	var spiloFSGroup = int64(103) | ||||
| 	var additionalVolumes = spec.AdditionalVolumes | ||||
| 
 | ||||
| 	defaultMode := int32(0640) | ||||
| 	mountPath := "/tls" | ||||
| 	additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{ | ||||
| 		Name:      spec.TLS.SecretName, | ||||
| 		MountPath: mountPath, | ||||
| 		VolumeSource: v1.VolumeSource{ | ||||
| 			Secret: &v1.SecretVolumeSource{ | ||||
| 				SecretName:  spec.TLS.SecretName, | ||||
| 				DefaultMode: &defaultMode, | ||||
| 			}, | ||||
| 		}, | ||||
| 	}) | ||||
| 
 | ||||
| 	makeSpec := func(tls acidv1.TLSDescription) acidv1.PostgresSpec { | ||||
| 		return acidv1.PostgresSpec{ | ||||
| 			TeamID: "myapp", NumberOfInstances: 1, | ||||
|  | @ -981,6 +985,7 @@ func TestTLS(t *testing.T) { | |||
| 				Size: "1G", | ||||
| 			}, | ||||
| 			TLS:               &tls, | ||||
| 			AdditionalVolumes: additionalVolumes, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
|  | @ -1009,19 +1014,6 @@ func TestTLS(t *testing.T) { | |||
| 	fsGroup := int64(103) | ||||
| 	assert.Equal(t, &fsGroup, s.Spec.Template.Spec.SecurityContext.FSGroup, "has a default FSGroup assigned") | ||||
| 
 | ||||
| 	defaultMode := int32(0640) | ||||
| 	mountPath := "/tls" | ||||
| 	additionalVolumes = append(additionalVolumes, acidv1.AdditionalVolume{ | ||||
| 		Name:      spec.TLS.SecretName, | ||||
| 		MountPath: mountPath, | ||||
| 		VolumeSource: v1.VolumeSource{ | ||||
| 			Secret: &v1.SecretVolumeSource{ | ||||
| 				SecretName:  spec.TLS.SecretName, | ||||
| 				DefaultMode: &defaultMode, | ||||
| 			}, | ||||
| 		}, | ||||
| 	}) | ||||
| 
 | ||||
| 	volume := v1.Volume{ | ||||
| 		Name: "my-secret", | ||||
| 		VolumeSource: v1.VolumeSource{ | ||||
|  |  | |||
|  | @ -19,8 +19,8 @@ var VersionMap = map[string]int{ | |||
| 
 | ||||
| // IsBiggerPostgresVersion Compare two Postgres version numbers
 | ||||
| func IsBiggerPostgresVersion(old string, new string) bool { | ||||
| 	oldN, _ := VersionMap[old] | ||||
| 	newN, _ := VersionMap[new] | ||||
| 	oldN := VersionMap[old] | ||||
| 	newN := VersionMap[new] | ||||
| 	return newN > oldN | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -11,7 +11,6 @@ import ( | |||
| 	"github.com/zalando/postgres-operator/pkg/util" | ||||
| 	"github.com/zalando/postgres-operator/pkg/util/constants" | ||||
| 	"github.com/zalando/postgres-operator/pkg/util/k8sutil" | ||||
| 	appsv1 "k8s.io/api/apps/v1" | ||||
| 	batchv1beta1 "k8s.io/api/batch/v1beta1" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	policybeta1 "k8s.io/api/policy/v1beta1" | ||||
|  | @ -260,28 +259,6 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { | |||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) mustUpdatePodsAfterLazyUpdate(desiredSset *appsv1.StatefulSet) (bool, error) { | ||||
| 
 | ||||
| 	pods, err := c.listPods() | ||||
| 	if err != nil { | ||||
| 		return false, fmt.Errorf("could not list pods of the statefulset: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	for _, pod := range pods { | ||||
| 
 | ||||
| 		effectivePodImage := pod.Spec.Containers[0].Image | ||||
| 		ssImage := desiredSset.Spec.Template.Spec.Containers[0].Image | ||||
| 
 | ||||
| 		if ssImage != effectivePodImage { | ||||
| 			c.logger.Infof("not all pods were re-started when the lazy upgrade was enabled; forcing the rolling upgrade now") | ||||
| 			return true, nil | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	return false, nil | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) syncStatefulSet() error { | ||||
| 
 | ||||
| 	podsToRecreate := make([]v1.Pod, 0) | ||||
|  |  | |||
|  | @ -227,11 +227,6 @@ func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isU | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) logVolumeChanges(old, new acidv1.Volume) { | ||||
| 	c.logger.Infof("volume specification has been changed") | ||||
| 	logNiceDiff(c.logger, old, new) | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { | ||||
| 
 | ||||
| 	if teamID == "" { | ||||
|  | @ -251,9 +246,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { | |||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		for _, member := range additionalMembers { | ||||
| 			members = append(members, member) | ||||
| 		} | ||||
| 		members = append(members, additionalMembers...) | ||||
| 	} | ||||
| 
 | ||||
| 	if !c.OpConfig.EnableTeamsAPI { | ||||
|  | @ -292,7 +285,6 @@ func (c *Cluster) annotationsSet(annotations map[string]string) map[string]strin | |||
| 	pgCRDAnnotations := c.ObjectMeta.Annotations | ||||
| 
 | ||||
| 	// allow to inherit certain labels from the 'postgres' object
 | ||||
| 	if pgCRDAnnotations != nil { | ||||
| 	for k, v := range pgCRDAnnotations { | ||||
| 		for _, match := range c.OpConfig.InheritedAnnotations { | ||||
| 			if k == match { | ||||
|  | @ -300,7 +292,6 @@ func (c *Cluster) annotationsSet(annotations map[string]string) map[string]strin | |||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	} | ||||
| 
 | ||||
| 	if len(annotations) > 0 { | ||||
| 		return annotations | ||||
|  |  | |||
|  | @ -74,10 +74,15 @@ func (c *Cluster) syncVolumes() error { | |||
| func (c *Cluster) syncUnderlyingEBSVolume() error { | ||||
| 	c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size") | ||||
| 
 | ||||
| 	var err error | ||||
| 	var ( | ||||
| 		err     error | ||||
| 		newSize resource.Quantity | ||||
| 	) | ||||
| 
 | ||||
| 	targetValue := c.Spec.Volume | ||||
| 	newSize, err := resource.ParseQuantity(targetValue.Size) | ||||
| 	if newSize, err = resource.ParseQuantity(targetValue.Size); err != nil { | ||||
| 		return fmt.Errorf("could not parse volume size: %v", err) | ||||
| 	} | ||||
| 	targetSize := quantityToGigabyte(newSize) | ||||
| 
 | ||||
| 	awsGp3 := aws.String("gp3") | ||||
|  |  | |||
|  | @ -221,10 +221,10 @@ func TestMigrateEBS(t *testing.T) { | |||
| } | ||||
| 
 | ||||
| type testVolume struct { | ||||
| 	iops        int64 | ||||
| 	throughtput int64 | ||||
| 	size int64 | ||||
| 	volType     string | ||||
| 	//iops        int64
 | ||||
| 	//throughtput int64
 | ||||
| 	//volType     string
 | ||||
| } | ||||
| 
 | ||||
| func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustername string, labels labels.Set, volumes []testVolume) { | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue