fetch cluster resources by name, not by label selectors
This commit is contained in:
		
							parent
							
								
									57c3111d1a
								
							
						
					
					
						commit
						39c123e96a
					
				|  | @ -46,6 +46,27 @@ type spiloConfiguration struct { | |||
| 	Bootstrap            pgBootstrap            `json:"bootstrap"` | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) containerName() string { | ||||
| 	return c.Name | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) statefulSetName() string { | ||||
| 	return c.Name | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) endpointName() string { | ||||
| 	return c.Name | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) serviceName(role postgresRole) string { | ||||
| 	name := c.Name | ||||
| 	if role == replica { | ||||
| 		name = name + "-repl" | ||||
| 	} | ||||
| 
 | ||||
| 	return name | ||||
| } | ||||
| 
 | ||||
| func (c *Cluster) resourceRequirements(resources spec.Resources) (*v1.ResourceRequirements, error) { | ||||
| 	var err error | ||||
| 
 | ||||
|  | @ -274,7 +295,7 @@ func (c *Cluster) generatePodTemplate(resourceRequirements *v1.ResourceRequireme | |||
| 	} | ||||
| 	privilegedMode := bool(true) | ||||
| 	container := v1.Container{ | ||||
| 		Name:            c.Name, | ||||
| 		Name:            c.containerName(), | ||||
| 		Image:           c.OpConfig.DockerImage, | ||||
| 		ImagePullPolicy: v1.PullAlways, | ||||
| 		Resources:       *resourceRequirements, | ||||
|  | @ -314,7 +335,7 @@ func (c *Cluster) generatePodTemplate(resourceRequirements *v1.ResourceRequireme | |||
| 	template := v1.PodTemplateSpec{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Labels:    c.labelsSet(), | ||||
| 			Namespace: c.Name, | ||||
| 			Namespace: c.Namespace, | ||||
| 		}, | ||||
| 		Spec: podSpec, | ||||
| 	} | ||||
|  | @ -339,13 +360,13 @@ func (c *Cluster) generateStatefulSet(spec spec.PostgresSpec) (*v1beta1.Stateful | |||
| 
 | ||||
| 	statefulSet := &v1beta1.StatefulSet{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:      c.Name, | ||||
| 			Name:      c.statefulSetName(), | ||||
| 			Namespace: c.Namespace, | ||||
| 			Labels:    c.labelsSet(), | ||||
| 		}, | ||||
| 		Spec: v1beta1.StatefulSetSpec{ | ||||
| 			Replicas:             &spec.NumberOfInstances, | ||||
| 			ServiceName:          c.Name, | ||||
| 			ServiceName:          c.serviceName(master), | ||||
| 			Template:             *podTemplate, | ||||
| 			VolumeClaimTemplates: []v1.PersistentVolumeClaim{*volumeClaimTemplate}, | ||||
| 		}, | ||||
|  | @ -428,12 +449,12 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) | |||
| } | ||||
| 
 | ||||
| func (c *Cluster) generateService(role postgresRole, newSpec *spec.PostgresSpec) *v1.Service { | ||||
| 	var dnsName string | ||||
| 
 | ||||
| 	dnsNameFunction := c.masterDNSName | ||||
| 	name := c.Name | ||||
| 	if role == replica { | ||||
| 		dnsNameFunction = c.replicaDNSName | ||||
| 		name = name + "-repl" | ||||
| 	if role == master { | ||||
| 		dnsName = c.masterDNSName() | ||||
| 	} else { | ||||
| 		dnsName = c.replicaDNSName() | ||||
| 	} | ||||
| 
 | ||||
| 	serviceSpec := v1.ServiceSpec{ | ||||
|  | @ -462,15 +483,14 @@ func (c *Cluster) generateService(role postgresRole, newSpec *spec.PostgresSpec) | |||
| 		serviceSpec.LoadBalancerSourceRanges = sourceRanges | ||||
| 
 | ||||
| 		annotations = map[string]string{ | ||||
| 			constants.ZalandoDNSNameAnnotation: dnsNameFunction(), | ||||
| 			constants.ZalandoDNSNameAnnotation: dnsName, | ||||
| 			constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	service := &v1.Service{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:        name, | ||||
| 			Name:        c.serviceName(role), | ||||
| 			Namespace:   c.Namespace, | ||||
| 			Labels:      c.roleLabelsSet(role), | ||||
| 			Annotations: annotations, | ||||
|  | @ -484,7 +504,7 @@ func (c *Cluster) generateService(role postgresRole, newSpec *spec.PostgresSpec) | |||
| func (c *Cluster) generateMasterEndpoints(subsets []v1.EndpointSubset) *v1.Endpoints { | ||||
| 	endpoints := &v1.Endpoints{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:      c.Name, | ||||
| 			Name:      c.endpointName(), | ||||
| 			Namespace: c.Namespace, | ||||
| 			Labels:    c.roleLabelsSet(master), | ||||
| 		}, | ||||
|  |  | |||
|  | @ -16,45 +16,33 @@ import ( | |||
| ) | ||||
| 
 | ||||
| func (c *Cluster) loadResources() error { | ||||
| 	var err error | ||||
| 	ns := c.Namespace | ||||
| 	listOptions := metav1.ListOptions{ | ||||
| 		LabelSelector: c.labelsSet().String(), | ||||
| 
 | ||||
| 	masterService, err := c.KubeClient.Services(ns).Get(c.serviceName(master), metav1.GetOptions{}) | ||||
| 	if err == nil { | ||||
| 		c.Services[master] = masterService | ||||
| 	} else if !k8sutil.ResourceNotFound(err) { | ||||
| 		c.logger.Errorf("could not get master service: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	services, err := c.KubeClient.Services(ns).List(listOptions) | ||||
| 	replicaService, err := c.KubeClient.Services(ns).Get(c.serviceName(replica), metav1.GetOptions{}) | ||||
| 	if err == nil { | ||||
| 		c.Services[replica] = replicaService | ||||
| 	} else if !k8sutil.ResourceNotFound(err) { | ||||
| 		c.logger.Errorf("could not get replica service: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	ep, err := c.KubeClient.Endpoints(ns).Get(c.endpointName(), metav1.GetOptions{}) | ||||
| 	if err == nil { | ||||
| 		c.Endpoint = ep | ||||
| 	} else if !k8sutil.ResourceNotFound(err) { | ||||
| 		c.logger.Errorf("could not get endpoint: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	secrets, err := c.KubeClient.Secrets(ns).List(metav1.ListOptions{LabelSelector: c.labelsSet().String()}) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("could not get list of services: %v", err) | ||||
| 	} | ||||
| 	if len(services.Items) > 2 { | ||||
| 		return fmt.Errorf("too many(%d) services for a cluster", len(services.Items)) | ||||
| 	} | ||||
| 	for i, svc := range services.Items { | ||||
| 		switch postgresRole(svc.Labels[c.OpConfig.PodRoleLabel]) { | ||||
| 		case replica: | ||||
| 			c.Services[replica] = &services.Items[i] | ||||
| 		default: | ||||
| 			c.Services[master] = &services.Items[i] | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	endpoints, err := c.KubeClient.Endpoints(ns).List(listOptions) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("could not get list of endpoints: %v", err) | ||||
| 	} | ||||
| 	if len(endpoints.Items) > 2 { | ||||
| 		return fmt.Errorf("too many(%d) endpoints for a cluster", len(endpoints.Items)) | ||||
| 	} | ||||
| 
 | ||||
| 	for i, ep := range endpoints.Items { | ||||
| 		if ep.Labels[c.OpConfig.PodRoleLabel] != string(replica) { | ||||
| 			c.Endpoint = &endpoints.Items[i] | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	secrets, err := c.KubeClient.Secrets(ns).List(listOptions) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("could not get list of secrets: %v", err) | ||||
| 		c.logger.Errorf("could not get list of secrets: %v", err) | ||||
| 	} | ||||
| 	for i, secret := range secrets.Items { | ||||
| 		if _, ok := c.Secrets[secret.UID]; ok { | ||||
|  | @ -64,15 +52,11 @@ func (c *Cluster) loadResources() error { | |||
| 		c.logger.Debugf("secret loaded, uid: %q", secret.UID) | ||||
| 	} | ||||
| 
 | ||||
| 	statefulSets, err := c.KubeClient.StatefulSets(ns).List(listOptions) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("could not get list of statefulsets: %v", err) | ||||
| 	} | ||||
| 	if len(statefulSets.Items) > 1 { | ||||
| 		return fmt.Errorf("too many(%d) statefulsets for a cluster", len(statefulSets.Items)) | ||||
| 	} | ||||
| 	if len(statefulSets.Items) == 1 { | ||||
| 		c.Statefulset = &statefulSets.Items[0] | ||||
| 	ss, err := c.KubeClient.StatefulSets(ns).Get(c.statefulSetName(), metav1.GetOptions{}) | ||||
| 	if err == nil { | ||||
| 		c.Statefulset = ss | ||||
| 	} else if !k8sutil.ResourceNotFound(err) { | ||||
| 		c.logger.Errorf("could not get statefulset: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue