proper names for constants; some clean up for log messages
This commit is contained in:
parent
2161a0816c
commit
d7e9142fc7
|
|
@ -47,17 +47,17 @@ func init() {
|
||||||
func ControllerConfig() *controller.Config {
|
func ControllerConfig() *controller.Config {
|
||||||
restConfig, err := k8sutil.RestConfig(KubeConfigFile, OutOfCluster)
|
restConfig, err := k8sutil.RestConfig(KubeConfigFile, OutOfCluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Can't get REST config: %s", err)
|
log.Fatalf("Can't get REST config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := k8sutil.KubernetesClient(restConfig)
|
client, err := k8sutil.KubernetesClient(restConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Can't create client: %s", err)
|
log.Fatalf("Can't create client: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
restClient, err := k8sutil.KubernetesRestClient(restConfig)
|
restClient, err := k8sutil.KubernetesRestClient(restConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Can't create rest client: %s", err)
|
log.Fatalf("Can't create rest client: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &controller.Config{
|
return &controller.Config{
|
||||||
|
|
|
||||||
|
|
@ -137,7 +137,7 @@ func (c *Cluster) setStatus(status spec.PostgresStatus) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Warningf("could not set status for cluster '%s': %s", c.clusterName(), err)
|
c.logger.Warningf("could not set status for cluster %q: %v", c.clusterName(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -180,7 +180,7 @@ func (c *Cluster) Create() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create endpoint: %v", err)
|
return fmt.Errorf("could not create endpoint: %v", err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("endpoint '%s' has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
c.logger.Infof("endpoint %q has been successfully created", util.NameFromMeta(ep.ObjectMeta))
|
||||||
|
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
if role == Replica && !c.Spec.ReplicaLoadBalancer {
|
if role == Replica && !c.Spec.ReplicaLoadBalancer {
|
||||||
|
|
@ -190,7 +190,7 @@ func (c *Cluster) Create() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create %s service: %v", role, err)
|
return fmt.Errorf("could not create %s service: %v", role, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("%s service '%s' has been successfully created", role, util.NameFromMeta(service.ObjectMeta))
|
c.logger.Infof("%s service %q has been successfully created", role, util.NameFromMeta(service.ObjectMeta))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = c.initUsers(); err != nil {
|
if err = c.initUsers(); err != nil {
|
||||||
|
|
@ -207,12 +207,12 @@ func (c *Cluster) Create() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create statefulset: %v", err)
|
return fmt.Errorf("could not create statefulset: %v", err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("statefulset '%s' has been successfully created", util.NameFromMeta(ss.ObjectMeta))
|
c.logger.Infof("statefulset %q has been successfully created", util.NameFromMeta(ss.ObjectMeta))
|
||||||
|
|
||||||
c.logger.Info("Waiting for cluster being ready")
|
c.logger.Info("Waiting for cluster being ready")
|
||||||
|
|
||||||
if err = c.waitStatefulsetPodsReady(); err != nil {
|
if err = c.waitStatefulsetPodsReady(); err != nil {
|
||||||
c.logger.Errorf("Failed to create cluster: %s", err)
|
c.logger.Errorf("Failed to create cluster: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("pods are ready")
|
c.logger.Infof("pods are ready")
|
||||||
|
|
@ -233,7 +233,7 @@ func (c *Cluster) Create() error {
|
||||||
|
|
||||||
err = c.listResources()
|
err = c.listResources()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Errorf("could not list resources: %s", err)
|
c.logger.Errorf("could not list resources: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -243,7 +243,7 @@ func (c *Cluster) sameServiceWith(role PostgresRole, service *v1.Service) (match
|
||||||
//TODO: improve comparison
|
//TODO: improve comparison
|
||||||
match = true
|
match = true
|
||||||
if c.Service[role].Spec.Type != service.Spec.Type {
|
if c.Service[role].Spec.Type != service.Spec.Type {
|
||||||
return false, fmt.Sprintf("new %s service's type %s doesn't match the current one %s",
|
return false, fmt.Sprintf("new %s service's type %q doesn't match the current one %q",
|
||||||
role, service.Spec.Type, c.Service[role].Spec.Type)
|
role, service.Spec.Type, c.Service[role].Spec.Type)
|
||||||
}
|
}
|
||||||
oldSourceRanges := c.Service[role].Spec.LoadBalancerSourceRanges
|
oldSourceRanges := c.Service[role].Spec.LoadBalancerSourceRanges
|
||||||
|
|
@ -259,7 +259,7 @@ func (c *Cluster) sameServiceWith(role PostgresRole, service *v1.Service) (match
|
||||||
oldDNSAnnotation := c.Service[role].Annotations[constants.ZalandoDNSNameAnnotation]
|
oldDNSAnnotation := c.Service[role].Annotations[constants.ZalandoDNSNameAnnotation]
|
||||||
newDNSAnnotation := service.Annotations[constants.ZalandoDNSNameAnnotation]
|
newDNSAnnotation := service.Annotations[constants.ZalandoDNSNameAnnotation]
|
||||||
if oldDNSAnnotation != newDNSAnnotation {
|
if oldDNSAnnotation != newDNSAnnotation {
|
||||||
return false, fmt.Sprintf("new %s service's '%s' annotation doesn't match the current one", role, constants.ZalandoDNSNameAnnotation)
|
return false, fmt.Sprintf("new %s service's %q annotation doesn't match the current one", role, constants.ZalandoDNSNameAnnotation)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, ""
|
return true, ""
|
||||||
|
|
@ -290,7 +290,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
|
||||||
}
|
}
|
||||||
if len(c.Statefulset.Spec.Template.Spec.Containers) == 0 {
|
if len(c.Statefulset.Spec.Template.Spec.Containers) == 0 {
|
||||||
|
|
||||||
c.logger.Warnf("statefulset '%s' has no container", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
c.logger.Warnf("statefulset %q has no container", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
||||||
return &compareStatefulsetResult{}
|
return &compareStatefulsetResult{}
|
||||||
}
|
}
|
||||||
// In the comparisons below, the needsReplace and needsRollUpdate flags are never reset, since checks fall through
|
// In the comparisons below, the needsReplace and needsRollUpdate flags are never reset, since checks fall through
|
||||||
|
|
@ -333,12 +333,12 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *v1beta1.StatefulSet) *comp
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) {
|
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) {
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %s doesn't match the current one", name))
|
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q doesn't match the current one", name))
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) {
|
if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) {
|
||||||
name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name
|
name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name
|
||||||
needsReplace = true
|
needsReplace = true
|
||||||
reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %s doesn't match the current one", name))
|
reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q doesn't match the current one", name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -405,7 +405,7 @@ func (c *Cluster) Update(newSpec *spec.Postgresql) error {
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
c.setStatus(spec.ClusterStatusUpdating)
|
c.setStatus(spec.ClusterStatusUpdating)
|
||||||
c.logger.Debugf("Cluster update from version %s to %s",
|
c.logger.Debugf("Cluster update from version %q to %q",
|
||||||
c.Metadata.ResourceVersion, newSpec.Metadata.ResourceVersion)
|
c.Metadata.ResourceVersion, newSpec.Metadata.ResourceVersion)
|
||||||
|
|
||||||
/* Make sure we update when this function exists */
|
/* Make sure we update when this function exists */
|
||||||
|
|
@ -431,7 +431,7 @@ func (c *Cluster) Update(newSpec *spec.Postgresql) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create new %s service: %v", role, err)
|
return fmt.Errorf("could not create new %s service: %v", role, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("%s service '%s' has been created", role, util.NameFromMeta(service.ObjectMeta))
|
c.logger.Infof("%s service %q has been created", role, util.NameFromMeta(service.ObjectMeta))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// only proceed further if both old and new load balancer were present
|
// only proceed further if both old and new load balancer were present
|
||||||
|
|
@ -446,7 +446,7 @@ func (c *Cluster) Update(newSpec *spec.Postgresql) error {
|
||||||
c.setStatus(spec.ClusterStatusUpdateFailed)
|
c.setStatus(spec.ClusterStatusUpdateFailed)
|
||||||
return fmt.Errorf("could not update %s service: %v", role, err)
|
return fmt.Errorf("could not update %s service: %v", role, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("%s service '%s' has been updated", role, util.NameFromMeta(c.Service[role].ObjectMeta))
|
c.logger.Infof("%s service %q has been updated", role, util.NameFromMeta(c.Service[role].ObjectMeta))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -471,11 +471,11 @@ func (c *Cluster) Update(newSpec *spec.Postgresql) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//TODO: if there is a change in numberOfInstances, make sure Pods have been created/deleted
|
//TODO: if there is a change in numberOfInstances, make sure Pods have been created/deleted
|
||||||
c.logger.Infof("statefulset '%s' has been updated", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
c.logger.Infof("statefulset %q has been updated", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison
|
if c.Spec.PgVersion != newSpec.Spec.PgVersion { // PG versions comparison
|
||||||
c.logger.Warnf("Postgresql version change(%s -> %s) is not allowed",
|
c.logger.Warnf("Postgresql version change(%q -> %q) is not allowed",
|
||||||
c.Spec.PgVersion, newSpec.Spec.PgVersion)
|
c.Spec.PgVersion, newSpec.Spec.PgVersion)
|
||||||
//TODO: rewrite pg version in tpr spec
|
//TODO: rewrite pg version in tpr spec
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -39,5 +39,5 @@ func (c *Cluster) resizePostgresFilesystem(podName *spec.NamespacedName, resizer
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("could not resize filesystem: no compatible resizers for the filesystem of type %s", fsType)
|
return fmt.Errorf("could not resize filesystem: no compatible resizers for the filesystem of type %q", fsType)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -199,7 +199,7 @@ PATRONI_INITDB_PARAMS:
|
||||||
}
|
}
|
||||||
result, err := json.Marshal(config)
|
result, err := json.Marshal(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Errorf("Cannot convert spilo configuration into JSON: %s", err)
|
c.logger.Errorf("Cannot convert spilo configuration into JSON: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return string(result)
|
return string(result)
|
||||||
|
|
|
||||||
|
|
@ -35,11 +35,11 @@ func (c *Cluster) deletePods() error {
|
||||||
for _, obj := range pods {
|
for _, obj := range pods {
|
||||||
podName := util.NameFromMeta(obj.ObjectMeta)
|
podName := util.NameFromMeta(obj.ObjectMeta)
|
||||||
|
|
||||||
c.logger.Debugf("Deleting pod '%s'", podName)
|
c.logger.Debugf("Deleting pod %q", podName)
|
||||||
if err := c.deletePod(podName); err != nil {
|
if err := c.deletePod(podName); err != nil {
|
||||||
c.logger.Errorf("could not delete pod '%s': %s", podName, err)
|
c.logger.Errorf("could not delete pod %q: %v", podName, err)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Infof("pod '%s' has been deleted", podName)
|
c.logger.Infof("pod %q has been deleted", podName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(pods) > 0 {
|
if len(pods) > 0 {
|
||||||
|
|
@ -107,7 +107,7 @@ func (c *Cluster) recreatePod(pod v1.Pod) error {
|
||||||
if err := c.waitForPodLabel(ch); err != nil {
|
if err := c.waitForPodLabel(ch); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("pod '%s' is ready", podName)
|
c.logger.Infof("pod %q is ready", podName)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -136,7 +136,7 @@ func (c *Cluster) recreatePods() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.recreatePod(pod); err != nil {
|
if err := c.recreatePod(pod); err != nil {
|
||||||
return fmt.Errorf("could not recreate replica pod '%s': %v", util.NameFromMeta(pod.ObjectMeta), err)
|
return fmt.Errorf("could not recreate replica pod %q: %v", util.NameFromMeta(pod.ObjectMeta), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if masterPod.Name == "" {
|
if masterPod.Name == "" {
|
||||||
|
|
@ -144,10 +144,10 @@ func (c *Cluster) recreatePods() error {
|
||||||
} else {
|
} else {
|
||||||
//TODO: do manual failover
|
//TODO: do manual failover
|
||||||
//TODO: specify master, leave new master empty
|
//TODO: specify master, leave new master empty
|
||||||
c.logger.Infof("Recreating master pod '%s'", util.NameFromMeta(masterPod.ObjectMeta))
|
c.logger.Infof("Recreating master pod %q", util.NameFromMeta(masterPod.ObjectMeta))
|
||||||
|
|
||||||
if err := c.recreatePod(masterPod); err != nil {
|
if err := c.recreatePod(masterPod); err != nil {
|
||||||
return fmt.Errorf("could not recreate master pod '%s': %v", util.NameFromMeta(masterPod.ObjectMeta), err)
|
return fmt.Errorf("could not recreate master pod %q: %v", util.NameFromMeta(masterPod.ObjectMeta), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ func (c *Cluster) loadResources() error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c.Secrets[secret.UID] = &secrets.Items[i]
|
c.Secrets[secret.UID] = &secrets.Items[i]
|
||||||
c.logger.Debugf("secret loaded, uid: %s", secret.UID)
|
c.logger.Debugf("secret loaded, uid: %q", secret.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
statefulSets, err := c.KubeClient.StatefulSets(ns).List(listOptions)
|
statefulSets, err := c.KubeClient.StatefulSets(ns).List(listOptions)
|
||||||
|
|
@ -80,19 +80,19 @@ func (c *Cluster) loadResources() error {
|
||||||
|
|
||||||
func (c *Cluster) listResources() error {
|
func (c *Cluster) listResources() error {
|
||||||
if c.Statefulset != nil {
|
if c.Statefulset != nil {
|
||||||
c.logger.Infof("Found statefulset: %s (uid: %s)", util.NameFromMeta(c.Statefulset.ObjectMeta), c.Statefulset.UID)
|
c.logger.Infof("Found statefulset: %q (uid: %q)", util.NameFromMeta(c.Statefulset.ObjectMeta), c.Statefulset.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, obj := range c.Secrets {
|
for _, obj := range c.Secrets {
|
||||||
c.logger.Infof("Found secret: %s (uid: %s)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
|
c.logger.Infof("Found secret: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Endpoint != nil {
|
if c.Endpoint != nil {
|
||||||
c.logger.Infof("Found endpoint: %s (uid: %s)", util.NameFromMeta(c.Endpoint.ObjectMeta), c.Endpoint.UID)
|
c.logger.Infof("Found endpoint: %q (uid: %q)", util.NameFromMeta(c.Endpoint.ObjectMeta), c.Endpoint.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
for role, service := range c.Service {
|
for role, service := range c.Service {
|
||||||
c.logger.Infof("Found %s service: %s (uid: %s)", role, util.NameFromMeta(service.ObjectMeta), service.UID)
|
c.logger.Infof("Found %s service: %q (uid: %q)", role, util.NameFromMeta(service.ObjectMeta), service.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
pods, err := c.listPods()
|
pods, err := c.listPods()
|
||||||
|
|
@ -101,7 +101,7 @@ func (c *Cluster) listResources() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, obj := range pods {
|
for _, obj := range pods {
|
||||||
c.logger.Infof("Found pod: %s (uid: %s)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
|
c.logger.Infof("Found pod: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
pvcs, err := c.listPersistentVolumeClaims()
|
pvcs, err := c.listPersistentVolumeClaims()
|
||||||
|
|
@ -110,7 +110,7 @@ func (c *Cluster) listResources() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, obj := range pvcs {
|
for _, obj := range pvcs {
|
||||||
c.logger.Infof("Found PVC: %s (uid: %s)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
|
c.logger.Infof("Found PVC: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -129,7 +129,7 @@ func (c *Cluster) createStatefulSet() (*v1beta1.StatefulSet, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c.Statefulset = statefulSet
|
c.Statefulset = statefulSet
|
||||||
c.logger.Debugf("Created new statefulset '%s', uid: %s", util.NameFromMeta(statefulSet.ObjectMeta), statefulSet.UID)
|
c.logger.Debugf("Created new statefulset %q, uid: %q", util.NameFromMeta(statefulSet.ObjectMeta), statefulSet.UID)
|
||||||
|
|
||||||
return statefulSet, nil
|
return statefulSet, nil
|
||||||
}
|
}
|
||||||
|
|
@ -144,7 +144,7 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *v1beta1.StatefulSet) error {
|
||||||
|
|
||||||
patchData, err := specPatch(newStatefulSet.Spec)
|
patchData, err := specPatch(newStatefulSet.Spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not form patch for the statefulset '%s': %v", statefulSetName, err)
|
return fmt.Errorf("could not form patch for the statefulset %q: %v", statefulSetName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
statefulSet, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch(
|
statefulSet, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch(
|
||||||
|
|
@ -152,7 +152,7 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *v1beta1.StatefulSet) error {
|
||||||
types.MergePatchType,
|
types.MergePatchType,
|
||||||
patchData, "")
|
patchData, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not patch statefulset '%s': %v", statefulSetName, err)
|
return fmt.Errorf("could not patch statefulset %q: %v", statefulSetName, err)
|
||||||
}
|
}
|
||||||
c.Statefulset = statefulSet
|
c.Statefulset = statefulSet
|
||||||
|
|
||||||
|
|
@ -174,7 +174,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *v1beta1.StatefulSet) error
|
||||||
|
|
||||||
options := meta_v1.DeleteOptions{OrphanDependents: &orphanDepencies}
|
options := meta_v1.DeleteOptions{OrphanDependents: &orphanDepencies}
|
||||||
if err := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Delete(oldStatefulset.Name, &options); err != nil {
|
if err := c.KubeClient.StatefulSets(oldStatefulset.Namespace).Delete(oldStatefulset.Name, &options); err != nil {
|
||||||
return fmt.Errorf("could not delete statefulset '%s': %v", statefulSetName, err)
|
return fmt.Errorf("could not delete statefulset %q: %v", statefulSetName, err)
|
||||||
}
|
}
|
||||||
// make sure we clear the stored statefulset status if the subsequent create fails.
|
// make sure we clear the stored statefulset status if the subsequent create fails.
|
||||||
c.Statefulset = nil
|
c.Statefulset = nil
|
||||||
|
|
@ -194,7 +194,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *v1beta1.StatefulSet) error
|
||||||
// create the new statefulset with the desired spec. It would take over the remaining pods.
|
// create the new statefulset with the desired spec. It would take over the remaining pods.
|
||||||
createdStatefulset, err := c.KubeClient.StatefulSets(newStatefulSet.Namespace).Create(newStatefulSet)
|
createdStatefulset, err := c.KubeClient.StatefulSets(newStatefulSet.Namespace).Create(newStatefulSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create statefulset '%s': %v", statefulSetName, err)
|
return fmt.Errorf("could not create statefulset %q: %v", statefulSetName, err)
|
||||||
}
|
}
|
||||||
// check that all the previous replicas were picked up.
|
// check that all the previous replicas were picked up.
|
||||||
if newStatefulSet.Spec.Replicas == oldStatefulset.Spec.Replicas &&
|
if newStatefulSet.Spec.Replicas == oldStatefulset.Spec.Replicas &&
|
||||||
|
|
@ -216,7 +216,7 @@ func (c *Cluster) deleteStatefulSet() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("statefulset '%s' has been deleted", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
c.logger.Infof("statefulset %q has been deleted", util.NameFromMeta(c.Statefulset.ObjectMeta))
|
||||||
c.Statefulset = nil
|
c.Statefulset = nil
|
||||||
|
|
||||||
if err := c.deletePods(); err != nil {
|
if err := c.deletePods(); err != nil {
|
||||||
|
|
@ -270,12 +270,12 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
||||||
}
|
}
|
||||||
err = c.KubeClient.Services(c.Service[role].Namespace).Delete(c.Service[role].Name, c.deleteOptions)
|
err = c.KubeClient.Services(c.Service[role].Namespace).Delete(c.Service[role].Name, c.deleteOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not delete service '%s': '%v'", serviceName, err)
|
return fmt.Errorf("could not delete service %q: %v", serviceName, err)
|
||||||
}
|
}
|
||||||
c.Endpoint = nil
|
c.Endpoint = nil
|
||||||
svc, err := c.KubeClient.Services(newService.Namespace).Create(newService)
|
svc, err := c.KubeClient.Services(newService.Namespace).Create(newService)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create service '%s': '%v'", serviceName, err)
|
return fmt.Errorf("could not create service %q: %v", serviceName, err)
|
||||||
}
|
}
|
||||||
c.Service[role] = svc
|
c.Service[role] = svc
|
||||||
if role == Master {
|
if role == Master {
|
||||||
|
|
@ -283,7 +283,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
||||||
endpointSpec := c.generateMasterEndpoints(currentEndpoint.Subsets)
|
endpointSpec := c.generateMasterEndpoints(currentEndpoint.Subsets)
|
||||||
ep, err := c.KubeClient.Endpoints(c.Service[role].Namespace).Create(endpointSpec)
|
ep, err := c.KubeClient.Endpoints(c.Service[role].Namespace).Create(endpointSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create endpoint '%s': '%v'", endpointName, err)
|
return fmt.Errorf("could not create endpoint %q: %v", endpointName, err)
|
||||||
}
|
}
|
||||||
c.Endpoint = ep
|
c.Endpoint = ep
|
||||||
}
|
}
|
||||||
|
|
@ -299,13 +299,13 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
||||||
[]byte(annotationsPatchData), "")
|
[]byte(annotationsPatchData), "")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not replace annotations for the service '%s': %v", serviceName, err)
|
return fmt.Errorf("could not replace annotations for the service %q: %v", serviceName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
patchData, err := specPatch(newService.Spec)
|
patchData, err := specPatch(newService.Spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not form patch for the service '%s': %v", serviceName, err)
|
return fmt.Errorf("could not form patch for the service %q: %v", serviceName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
svc, err := c.KubeClient.Services(c.Service[role].Namespace).Patch(
|
svc, err := c.KubeClient.Services(c.Service[role].Namespace).Patch(
|
||||||
|
|
@ -313,7 +313,7 @@ func (c *Cluster) updateService(role PostgresRole, newService *v1.Service) error
|
||||||
types.MergePatchType,
|
types.MergePatchType,
|
||||||
patchData, "")
|
patchData, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not patch service '%s': %v", serviceName, err)
|
return fmt.Errorf("could not patch service %q: %v", serviceName, err)
|
||||||
}
|
}
|
||||||
c.Service[role] = svc
|
c.Service[role] = svc
|
||||||
|
|
||||||
|
|
@ -330,7 +330,7 @@ func (c *Cluster) deleteService(role PostgresRole) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("%s service '%s' has been deleted", role, util.NameFromMeta(service.ObjectMeta))
|
c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(service.ObjectMeta))
|
||||||
c.Service[role] = nil
|
c.Service[role] = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -359,7 +359,7 @@ func (c *Cluster) deleteEndpoint() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("endpoint '%s' has been deleted", util.NameFromMeta(c.Endpoint.ObjectMeta))
|
c.logger.Infof("endpoint %q has been deleted", util.NameFromMeta(c.Endpoint.ObjectMeta))
|
||||||
c.Endpoint = nil
|
c.Endpoint = nil
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -376,7 +376,7 @@ func (c *Cluster) applySecrets() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not get current secret: %v", err)
|
return fmt.Errorf("could not get current secret: %v", err)
|
||||||
}
|
}
|
||||||
c.logger.Debugf("secret '%s' already exists, fetching it's password", util.NameFromMeta(curSecret.ObjectMeta))
|
c.logger.Debugf("secret %q already exists, fetching it's password", util.NameFromMeta(curSecret.ObjectMeta))
|
||||||
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
|
||||||
secretUsername = constants.SuperuserKeyName
|
secretUsername = constants.SuperuserKeyName
|
||||||
userMap = c.systemUsers
|
userMap = c.systemUsers
|
||||||
|
|
@ -393,10 +393,10 @@ func (c *Cluster) applySecrets() error {
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create secret for user '%s': %v", secretUsername, err)
|
return fmt.Errorf("could not create secret for user %q: %v", secretUsername, err)
|
||||||
}
|
}
|
||||||
c.Secrets[secret.UID] = secret
|
c.Secrets[secret.UID] = secret
|
||||||
c.logger.Debugf("Created new secret '%s', uid: %s", util.NameFromMeta(secret.ObjectMeta), secret.UID)
|
c.logger.Debugf("Created new secret %q, uid: %q", util.NameFromMeta(secret.ObjectMeta), secret.UID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -404,12 +404,12 @@ func (c *Cluster) applySecrets() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) deleteSecret(secret *v1.Secret) error {
|
func (c *Cluster) deleteSecret(secret *v1.Secret) error {
|
||||||
c.logger.Debugf("Deleting secret '%s'", util.NameFromMeta(secret.ObjectMeta))
|
c.logger.Debugf("Deleting secret %q", util.NameFromMeta(secret.ObjectMeta))
|
||||||
err := c.KubeClient.Secrets(secret.Namespace).Delete(secret.Name, c.deleteOptions)
|
err := c.KubeClient.Secrets(secret.Namespace).Delete(secret.Name, c.deleteOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("secret '%s' has been deleted", util.NameFromMeta(secret.ObjectMeta))
|
c.logger.Infof("secret %q has been deleted", util.NameFromMeta(secret.ObjectMeta))
|
||||||
delete(c.Secrets, secret.UID)
|
delete(c.Secrets, secret.UID)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ func (c *Cluster) syncService(role PostgresRole) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create missing %s service: %v", role, err)
|
return fmt.Errorf("could not create missing %s service: %v", role, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("Created missing %s service '%s'", role, util.NameFromMeta(svc.ObjectMeta))
|
c.logger.Infof("Created missing %s service %q", role, util.NameFromMeta(svc.ObjectMeta))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -110,7 +110,7 @@ func (c *Cluster) syncService(role PostgresRole) error {
|
||||||
if err := c.updateService(role, desiredSvc); err != nil {
|
if err := c.updateService(role, desiredSvc); err != nil {
|
||||||
return fmt.Errorf("could not update %s service to match desired state: %v", role, err)
|
return fmt.Errorf("could not update %s service to match desired state: %v", role, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("%s service '%s' is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta))
|
c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -122,7 +122,7 @@ func (c *Cluster) syncEndpoint() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create missing endpoint: %v", err)
|
return fmt.Errorf("could not create missing endpoint: %v", err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("Created missing endpoint '%s'", util.NameFromMeta(ep.ObjectMeta))
|
c.logger.Infof("Created missing endpoint %q", util.NameFromMeta(ep.ObjectMeta))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -151,7 +151,7 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cluster is not ready: %v", err)
|
return fmt.Errorf("cluster is not ready: %v", err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("Created missing statefulset '%s'", util.NameFromMeta(ss.ObjectMeta))
|
c.logger.Infof("Created missing statefulset %q", util.NameFromMeta(ss.ObjectMeta))
|
||||||
if !rollUpdate {
|
if !rollUpdate {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -77,11 +77,11 @@ func metadataAnnotationsPatch(annotations map[string]string) string {
|
||||||
|
|
||||||
func (c *Cluster) logStatefulSetChanges(old, new *v1beta1.StatefulSet, isUpdate bool, reasons []string) {
|
func (c *Cluster) logStatefulSetChanges(old, new *v1beta1.StatefulSet, isUpdate bool, reasons []string) {
|
||||||
if isUpdate {
|
if isUpdate {
|
||||||
c.logger.Infof("statefulset '%s' has been changed",
|
c.logger.Infof("statefulset %q has been changed",
|
||||||
util.NameFromMeta(old.ObjectMeta),
|
util.NameFromMeta(old.ObjectMeta),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Infof("statefulset '%s' is not in the desired state and needs to be updated",
|
c.logger.Infof("statefulset %q is not in the desired state and needs to be updated",
|
||||||
util.NameFromMeta(old.ObjectMeta),
|
util.NameFromMeta(old.ObjectMeta),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
@ -89,18 +89,18 @@ func (c *Cluster) logStatefulSetChanges(old, new *v1beta1.StatefulSet, isUpdate
|
||||||
|
|
||||||
if len(reasons) > 0 {
|
if len(reasons) > 0 {
|
||||||
for _, reason := range reasons {
|
for _, reason := range reasons {
|
||||||
c.logger.Infof("Reason: %s", reason)
|
c.logger.Infof("Reason: %q", reason)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isUpdate bool, reason string) {
|
func (c *Cluster) logServiceChanges(role PostgresRole, old, new *v1.Service, isUpdate bool, reason string) {
|
||||||
if isUpdate {
|
if isUpdate {
|
||||||
c.logger.Infof("%s service '%s' has been changed",
|
c.logger.Infof("%s service %q has been changed",
|
||||||
role, util.NameFromMeta(old.ObjectMeta),
|
role, util.NameFromMeta(old.ObjectMeta),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Infof("%s service '%s is not in the desired state and needs to be updated",
|
c.logger.Infof("%s service %q is not in the desired state and needs to be updated",
|
||||||
role, util.NameFromMeta(old.ObjectMeta),
|
role, util.NameFromMeta(old.ObjectMeta),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
@ -127,7 +127,7 @@ func (c *Cluster) getOAuthToken() (string, error) {
|
||||||
Get(c.OpConfig.OAuthTokenSecretName.Name, meta_v1.GetOptions{})
|
Get(c.OpConfig.OAuthTokenSecretName.Name, meta_v1.GetOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Debugf("Oauth token secret name: %s", c.OpConfig.OAuthTokenSecretName)
|
c.logger.Debugf("Oauth token secret name: %q", c.OpConfig.OAuthTokenSecretName)
|
||||||
return "", fmt.Errorf("could not get credentials secret: %v", err)
|
return "", fmt.Errorf("could not get credentials secret: %v", err)
|
||||||
}
|
}
|
||||||
data := credentialsSecret.Data
|
data := credentialsSecret.Data
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ func (c *Cluster) deletePersistenVolumeClaims() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, pvc := range pvcs {
|
for _, pvc := range pvcs {
|
||||||
c.logger.Debugf("Deleting PVC '%s'", util.NameFromMeta(pvc.ObjectMeta))
|
c.logger.Debugf("Deleting PVC %q", util.NameFromMeta(pvc.ObjectMeta))
|
||||||
if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, c.deleteOptions); err != nil {
|
if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, c.deleteOptions); err != nil {
|
||||||
c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err)
|
c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -63,10 +63,10 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) {
|
||||||
if lastDash > 0 && lastDash < len(pvc.Name)-1 {
|
if lastDash > 0 && lastDash < len(pvc.Name)-1 {
|
||||||
pvcNumber, err := strconv.Atoi(pvc.Name[lastDash+1:])
|
pvcNumber, err := strconv.Atoi(pvc.Name[lastDash+1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not convert last part of the persistent volume claim name %s to a number", pvc.Name)
|
return nil, fmt.Errorf("could not convert last part of the persistent volume claim name %q to a number", pvc.Name)
|
||||||
}
|
}
|
||||||
if int32(pvcNumber) > lastPodIndex {
|
if int32(pvcNumber) > lastPodIndex {
|
||||||
c.logger.Debugf("Skipping persistent volume %s corresponding to a non-running pods", pvc.Name)
|
c.logger.Debugf("Skipping persistent volume %q corresponding to a non-running pods", pvc.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -119,22 +119,22 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Debugf("updating persistent volume %s to %d", pv.Name, newSize)
|
c.logger.Debugf("updating persistent volume %q to %d", pv.Name, newSize)
|
||||||
if err := resizer.ResizeVolume(awsVolumeId, newSize); err != nil {
|
if err := resizer.ResizeVolume(awsVolumeId, newSize); err != nil {
|
||||||
return fmt.Errorf("could not resize EBS volume %s: %v", awsVolumeId, err)
|
return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeId, err)
|
||||||
}
|
}
|
||||||
c.logger.Debugf("resizing the filesystem on the volume %s", pv.Name)
|
c.logger.Debugf("resizing the filesystem on the volume %q", pv.Name)
|
||||||
podName := getPodNameFromPersistentVolume(pv)
|
podName := getPodNameFromPersistentVolume(pv)
|
||||||
if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil {
|
if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil {
|
||||||
return fmt.Errorf("could not resize the filesystem on pod '%s': %v", podName, err)
|
return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err)
|
||||||
}
|
}
|
||||||
c.logger.Debugf("filesystem resize successful on volume %s", pv.Name)
|
c.logger.Debugf("filesystem resize successful on volume %q", pv.Name)
|
||||||
pv.Spec.Capacity[v1.ResourceStorage] = newQuantity
|
pv.Spec.Capacity[v1.ResourceStorage] = newQuantity
|
||||||
c.logger.Debugf("updating persistent volume definition for volume %s", pv.Name)
|
c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name)
|
||||||
if _, err := c.KubeClient.PersistentVolumes().Update(pv); err != nil {
|
if _, err := c.KubeClient.PersistentVolumes().Update(pv); err != nil {
|
||||||
return fmt.Errorf("could not update persistent volume: %s", err)
|
return fmt.Errorf("could not update persistent volume: %q", err)
|
||||||
}
|
}
|
||||||
c.logger.Debugf("successfully updated persistent volume %s", pv.Name)
|
c.logger.Debugf("successfully updated persistent volume %q", pv.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(pvs) > 0 && totalCompatible == 0 {
|
if len(pvs) > 0 && totalCompatible == 0 {
|
||||||
|
|
|
||||||
|
|
@ -107,7 +107,7 @@ func (c *Controller) podEventsDispatcher(stopCh <-chan struct{}) {
|
||||||
c.clustersMu.RUnlock()
|
c.clustersMu.RUnlock()
|
||||||
|
|
||||||
if ok {
|
if ok {
|
||||||
c.logger.Debugf("Sending %s event of pod '%s' to the '%s' cluster channel", event.EventType, event.PodName, event.ClusterName)
|
c.logger.Debugf("Sending %q event of pod %q to the %q cluster channel", event.EventType, event.PodName, event.ClusterName)
|
||||||
cluster.ReceivePodEvent(event)
|
cluster.ReceivePodEvent(event)
|
||||||
}
|
}
|
||||||
case <-stopCh:
|
case <-stopCh:
|
||||||
|
|
|
||||||
|
|
@ -120,11 +120,11 @@ func (c *Controller) processEvent(obj interface{}) error {
|
||||||
switch event.EventType {
|
switch event.EventType {
|
||||||
case spec.EventAdd:
|
case spec.EventAdd:
|
||||||
if clusterFound {
|
if clusterFound {
|
||||||
logger.Debugf("Cluster '%s' already exists", clusterName)
|
logger.Debugf("Cluster %q already exists", clusterName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("Creation of the '%s' cluster started", clusterName)
|
logger.Infof("Creation of the %q cluster started", clusterName)
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
cl = cluster.New(c.makeClusterConfig(), *event.NewSpec, logger)
|
cl = cluster.New(c.makeClusterConfig(), *event.NewSpec, logger)
|
||||||
|
|
@ -142,31 +142,31 @@ func (c *Controller) processEvent(obj interface{}) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("Cluster '%s' has been created", clusterName)
|
logger.Infof("Cluster %q has been created", clusterName)
|
||||||
case spec.EventUpdate:
|
case spec.EventUpdate:
|
||||||
logger.Infof("Update of the '%s' cluster started", clusterName)
|
logger.Infof("Update of the %q cluster started", clusterName)
|
||||||
|
|
||||||
if !clusterFound {
|
if !clusterFound {
|
||||||
logger.Warnf("Cluster '%s' does not exist", clusterName)
|
logger.Warnf("Cluster %q does not exist", clusterName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := cl.Update(event.NewSpec); err != nil {
|
if err := cl.Update(event.NewSpec); err != nil {
|
||||||
cl.Error = fmt.Errorf("could not update cluster: %s", err)
|
cl.Error = fmt.Errorf("could not update cluster: %v", err)
|
||||||
logger.Errorf("%v", cl.Error)
|
logger.Errorf("%v", cl.Error)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cl.Error = nil
|
cl.Error = nil
|
||||||
logger.Infof("Cluster '%s' has been updated", clusterName)
|
logger.Infof("Cluster %q has been updated", clusterName)
|
||||||
case spec.EventDelete:
|
case spec.EventDelete:
|
||||||
logger.Infof("Deletion of the '%s' cluster started", clusterName)
|
logger.Infof("Deletion of the %q cluster started", clusterName)
|
||||||
if !clusterFound {
|
if !clusterFound {
|
||||||
logger.Errorf("Unknown cluster: %s", clusterName)
|
logger.Errorf("Unknown cluster: %q", clusterName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cl.Delete(); err != nil {
|
if err := cl.Delete(); err != nil {
|
||||||
logger.Errorf("could not delete cluster '%s': %s", clusterName, err)
|
logger.Errorf("could not delete cluster %q: %v", clusterName, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
close(c.stopChs[clusterName])
|
close(c.stopChs[clusterName])
|
||||||
|
|
@ -176,9 +176,9 @@ func (c *Controller) processEvent(obj interface{}) error {
|
||||||
delete(c.stopChs, clusterName)
|
delete(c.stopChs, clusterName)
|
||||||
c.clustersMu.Unlock()
|
c.clustersMu.Unlock()
|
||||||
|
|
||||||
logger.Infof("Cluster '%s' has been deleted", clusterName)
|
logger.Infof("Cluster %q has been deleted", clusterName)
|
||||||
case spec.EventSync:
|
case spec.EventSync:
|
||||||
logger.Infof("Syncing of the '%s' cluster started", clusterName)
|
logger.Infof("Syncing of the %q cluster started", clusterName)
|
||||||
|
|
||||||
// no race condition because a cluster is always processed by single worker
|
// no race condition because a cluster is always processed by single worker
|
||||||
if !clusterFound {
|
if !clusterFound {
|
||||||
|
|
@ -193,13 +193,13 @@ func (c *Controller) processEvent(obj interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cl.Sync(); err != nil {
|
if err := cl.Sync(); err != nil {
|
||||||
cl.Error = fmt.Errorf("could not sync cluster '%s': %v", clusterName, err)
|
cl.Error = fmt.Errorf("could not sync cluster %q: %v", clusterName, err)
|
||||||
logger.Errorf("%v", cl.Error)
|
logger.Errorf("%v", cl.Error)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cl.Error = nil
|
cl.Error = nil
|
||||||
|
|
||||||
logger.Infof("Cluster '%s' has been synced", clusterName)
|
logger.Infof("Cluster %q has been synced", clusterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -236,7 +236,7 @@ func (c *Controller) queueClusterEvent(old, new *spec.Postgresql, eventType spec
|
||||||
}
|
}
|
||||||
|
|
||||||
if clusterError != nil && eventType != spec.EventDelete {
|
if clusterError != nil && eventType != spec.EventDelete {
|
||||||
c.logger.Debugf("Skipping %s event for invalid cluster %s (reason: %v)", eventType, clusterName, clusterError)
|
c.logger.Debugf("Skipping %q event for invalid cluster %q (reason: %v)", eventType, clusterName, clusterError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -253,7 +253,7 @@ func (c *Controller) queueClusterEvent(old, new *spec.Postgresql, eventType spec
|
||||||
if err := c.clusterEventQueues[workerID].Add(clusterEvent); err != nil {
|
if err := c.clusterEventQueues[workerID].Add(clusterEvent); err != nil {
|
||||||
c.logger.WithField("worker", workerID).Errorf("error when queueing cluster event: %v", clusterEvent)
|
c.logger.WithField("worker", workerID).Errorf("error when queueing cluster event: %v", clusterEvent)
|
||||||
}
|
}
|
||||||
c.logger.WithField("worker", workerID).Infof("%s of the '%s' cluster has been queued", eventType, clusterName)
|
c.logger.WithField("worker", workerID).Infof("%q of the %q cluster has been queued", eventType, clusterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) postgresqlAdd(obj interface{}) {
|
func (c *Controller) postgresqlAdd(obj interface{}) {
|
||||||
|
|
|
||||||
|
|
@ -49,17 +49,16 @@ func (c *Controller) clusterWorkerID(clusterName spec.NamespacedName) uint32 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) createTPR() error {
|
func (c *Controller) createTPR() error {
|
||||||
TPRName := fmt.Sprintf("%s.%s", constants.TPRName, constants.TPRVendor)
|
tpr := thirdPartyResource(constants.TPRName)
|
||||||
tpr := thirdPartyResource(TPRName)
|
|
||||||
|
|
||||||
_, err := c.KubeClient.ExtensionsV1beta1().ThirdPartyResources().Create(tpr)
|
_, err := c.KubeClient.ExtensionsV1beta1().ThirdPartyResources().Create(tpr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !k8sutil.ResourceAlreadyExists(err) {
|
if !k8sutil.ResourceAlreadyExists(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.logger.Infof("ThirdPartyResource '%s' is already registered", TPRName)
|
c.logger.Infof("ThirdPartyResource %q is already registered", constants.TPRName)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Infof("ThirdPartyResource '%s' has been registered", TPRName)
|
c.logger.Infof("ThirdPartyResource %q' has been registered", constants.TPRName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return k8sutil.WaitTPRReady(c.RestClient, c.opConfig.TPR.ReadyWaitInterval, c.opConfig.TPR.ReadyWaitTimeout, c.opConfig.Namespace)
|
return k8sutil.WaitTPRReady(c.RestClient, c.opConfig.TPR.ReadyWaitInterval, c.opConfig.TPR.ReadyWaitTimeout, c.opConfig.Namespace)
|
||||||
|
|
@ -75,7 +74,7 @@ func (c *Controller) getInfrastructureRoles() (result map[string]spec.PgUser, er
|
||||||
Secrets(c.opConfig.InfrastructureRolesSecretName.Namespace).
|
Secrets(c.opConfig.InfrastructureRolesSecretName.Namespace).
|
||||||
Get(c.opConfig.InfrastructureRolesSecretName.Name, meta_v1.GetOptions{})
|
Get(c.opConfig.InfrastructureRolesSecretName.Name, meta_v1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Debugf("Infrastructure roles secret name: %s", c.opConfig.InfrastructureRolesSecretName)
|
c.logger.Debugf("Infrastructure roles secret name: %q", c.opConfig.InfrastructureRolesSecretName)
|
||||||
return nil, fmt.Errorf("could not get infrastructure roles secret: %v", err)
|
return nil, fmt.Errorf("could not get infrastructure roles secret: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -103,7 +102,7 @@ Users:
|
||||||
case "inrole":
|
case "inrole":
|
||||||
t.MemberOf = append(t.MemberOf, s)
|
t.MemberOf = append(t.MemberOf, s)
|
||||||
default:
|
default:
|
||||||
c.logger.Warnf("Unknown key %s", p)
|
c.logger.Warnf("Unknown key %q", p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -362,7 +362,7 @@ func TestClusterName(t *testing.T) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if name != tt.clusterName {
|
if name != tt.clusterName {
|
||||||
t.Errorf("Expected cluserName: %s, got: %s", tt.clusterName, name)
|
t.Errorf("Expected cluserName: %q, got: %q", tt.clusterName, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -399,7 +399,7 @@ func TestMarshalMaintenanceWindow(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(s, tt.in) {
|
if !bytes.Equal(s, tt.in) {
|
||||||
t.Errorf("Expected Marshal: %s, got: %s", string(tt.in), string(s))
|
t.Errorf("Expected Marshal: %q, got: %q", string(tt.in), string(s))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -434,7 +434,7 @@ func TestMarshal(t *testing.T) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !bytes.Equal(m, tt.marshal) {
|
if !bytes.Equal(m, tt.marshal) {
|
||||||
t.Errorf("Marshal Postgresql expected: %s, got: %s", string(tt.marshal), string(m))
|
t.Errorf("Marshal Postgresql expected: %q, got: %q", string(tt.marshal), string(m))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ func TestNamespacedNameError(t *testing.T) {
|
||||||
var actual NamespacedName
|
var actual NamespacedName
|
||||||
err := actual.Decode(tt)
|
err := actual.Decode(tt)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("Error expected for '%s', got: %#v", tt, actual)
|
t.Errorf("Error expected for %q, got: %#v", tt, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@ import "time"
|
||||||
|
|
||||||
// General kubernetes-related constants
|
// General kubernetes-related constants
|
||||||
const (
|
const (
|
||||||
ListClustersURITemplate = "/apis/" + TPRVendor + "/" + TPRApiVersion + "/namespaces/%s/" + ResourceName // Namespace
|
ListClustersURITemplate = "/apis/" + TPRGroup + "/" + TPRApiVersion + "/namespaces/%s/" + ResourceName // Namespace
|
||||||
WatchClustersURITemplate = "/apis/" + TPRVendor + "/" + TPRApiVersion + "/watch/namespaces/%s/" + ResourceName // Namespace
|
WatchClustersURITemplate = "/apis/" + TPRGroup + "/" + TPRApiVersion + "/watch/namespaces/%s/" + ResourceName // Namespace
|
||||||
K8sVersion = "v1"
|
K8sVersion = "v1"
|
||||||
K8sAPIPath = "/api"
|
K8sAPIPath = "/api"
|
||||||
StatefulsetDeletionInterval = 1 * time.Second
|
StatefulsetDeletionInterval = 1 * time.Second
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ package constants
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PasswordLength = 64
|
PasswordLength = 64
|
||||||
UserSecretTemplate = "%s.%s.credentials." + TPRName + "." + TPRVendor // Username, ClusterName
|
UserSecretTemplate = "%s.%s.credentials." + TPRKind + "." + TPRGroup // Username, ClusterName
|
||||||
SuperuserKeyName = "superuser"
|
SuperuserKeyName = "superuser"
|
||||||
ReplicationUserKeyName = "replication"
|
ReplicationUserKeyName = "replication"
|
||||||
RoleFlagSuperuser = "SUPERUSER"
|
RoleFlagSuperuser = "SUPERUSER"
|
||||||
|
|
|
||||||
|
|
@ -2,9 +2,10 @@ package constants
|
||||||
|
|
||||||
// Different properties of the PostgreSQL Third Party Resources
|
// Different properties of the PostgreSQL Third Party Resources
|
||||||
const (
|
const (
|
||||||
TPRName = "postgresql"
|
TPRKind = "postgresql"
|
||||||
TPRVendor = "acid.zalan.do"
|
TPRGroup = "acid.zalan.do"
|
||||||
TPRDescription = "Managed PostgreSQL clusters"
|
TPRDescription = "Managed PostgreSQL clusters"
|
||||||
TPRApiVersion = "v1"
|
TPRApiVersion = "v1"
|
||||||
ResourceName = TPRName + "s"
|
TPRName = TPRKind + "." + TPRKind
|
||||||
|
ResourceName = TPRKind + "s"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -37,5 +37,5 @@ func (c *Ext234Resize) ResizeFilesystem(deviceName string, commandExecutor func(
|
||||||
(strings.Contains(out, "on-line resizing required") && ext2fsSuccessRegexp.MatchString(out)) {
|
(strings.Contains(out, "on-line resizing required") && ext2fsSuccessRegexp.MatchString(out)) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("unrecognized output: %s, assuming error", out)
|
return fmt.Errorf("unrecognized output: %q, assuming error", out)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ func KubernetesRestClient(c *rest.Config) (rest.Interface, error) {
|
||||||
func(scheme *runtime.Scheme) error {
|
func(scheme *runtime.Scheme) error {
|
||||||
scheme.AddKnownTypes(
|
scheme.AddKnownTypes(
|
||||||
schema.GroupVersion{
|
schema.GroupVersion{
|
||||||
Group: constants.TPRVendor,
|
Group: constants.TPRGroup,
|
||||||
Version: constants.TPRApiVersion,
|
Version: constants.TPRApiVersion,
|
||||||
},
|
},
|
||||||
&spec.Postgresql{},
|
&spec.Postgresql{},
|
||||||
|
|
|
||||||
|
|
@ -66,11 +66,11 @@ func (s DefaultUserSyncStrategy) ExecuteSyncRequests(reqs []spec.PgSyncUserReque
|
||||||
switch r.Kind {
|
switch r.Kind {
|
||||||
case spec.PGSyncUserAdd:
|
case spec.PGSyncUserAdd:
|
||||||
if err := s.createPgUser(r.User, db); err != nil {
|
if err := s.createPgUser(r.User, db); err != nil {
|
||||||
return fmt.Errorf("could not create user '%s': %v", r.User.Name, err)
|
return fmt.Errorf("could not create user %q: %v", r.User.Name, err)
|
||||||
}
|
}
|
||||||
case spec.PGsyncUserAlter:
|
case spec.PGsyncUserAlter:
|
||||||
if err := s.alterPgUser(r.User, db); err != nil {
|
if err := s.alterPgUser(r.User, db); err != nil {
|
||||||
return fmt.Errorf("could not alter user '%s': %v", r.User.Name, err)
|
return fmt.Errorf("could not alter user %q: %v", r.User.Name, err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unrecognized operation: %v", r.Kind)
|
return fmt.Errorf("unrecognized operation: %v", r.Kind)
|
||||||
|
|
@ -100,7 +100,7 @@ func (s DefaultUserSyncStrategy) createPgUser(user spec.PgUser, db *sql.DB) (err
|
||||||
|
|
||||||
_, err = db.Query(query) // TODO: Try several times
|
_, err = db.Query(query) // TODO: Try several times
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("dB error: %s, query: %v", err, query)
|
err = fmt.Errorf("dB error: %v, query: %q", err, query)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -122,7 +122,7 @@ func (s DefaultUserSyncStrategy) alterPgUser(user spec.PgUser, db *sql.DB) (err
|
||||||
|
|
||||||
_, err = db.Query(query) // TODO: Try several times
|
_, err = db.Query(query) // TODO: Try several times
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("dB error: %s query %v", err, query)
|
err = fmt.Errorf("dB error: %v query %q", err, query)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -73,7 +73,7 @@ func TestPGUserPassword(t *testing.T) {
|
||||||
for _, tt := range pgUsers {
|
for _, tt := range pgUsers {
|
||||||
pwd := PGUserPassword(tt.in)
|
pwd := PGUserPassword(tt.in)
|
||||||
if pwd != tt.out {
|
if pwd != tt.out {
|
||||||
t.Errorf("PgUserPassword expected: %s, got: %s", tt.out, pwd)
|
t.Errorf("PgUserPassword expected: %q, got: %q", tt.out, pwd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -81,7 +81,7 @@ func TestPGUserPassword(t *testing.T) {
|
||||||
func TestPrettyDiff(t *testing.T) {
|
func TestPrettyDiff(t *testing.T) {
|
||||||
for _, tt := range prettyDiffTest {
|
for _, tt := range prettyDiffTest {
|
||||||
if actual := PrettyDiff(tt.inA, tt.inB); actual != tt.out {
|
if actual := PrettyDiff(tt.inA, tt.inB); actual != tt.out {
|
||||||
t.Errorf("PrettyDiff expected: %s, got: %s", tt.out, actual)
|
t.Errorf("PrettyDiff expected: %q, got: %q", tt.out, actual)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -42,11 +42,11 @@ func (c *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool
|
||||||
func (c *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) {
|
func (c *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) {
|
||||||
volumeID := pv.Spec.AWSElasticBlockStore.VolumeID
|
volumeID := pv.Spec.AWSElasticBlockStore.VolumeID
|
||||||
if volumeID == "" {
|
if volumeID == "" {
|
||||||
return "", fmt.Errorf("volume id is empty for volume %s", pv.Name)
|
return "", fmt.Errorf("volume id is empty for volume %q", pv.Name)
|
||||||
}
|
}
|
||||||
idx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1
|
idx := strings.LastIndex(volumeID, constants.EBSVolumeIDStart) + 1
|
||||||
if idx == 0 {
|
if idx == 0 {
|
||||||
return "", fmt.Errorf("malfored EBS volume id %s", volumeID)
|
return "", fmt.Errorf("malfored EBS volume id %q", volumeID)
|
||||||
}
|
}
|
||||||
return volumeID[idx:], nil
|
return volumeID[idx:], nil
|
||||||
}
|
}
|
||||||
|
|
@ -60,7 +60,7 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeId string, newSize int64) error {
|
||||||
}
|
}
|
||||||
vol := volumeOutput.Volumes[0]
|
vol := volumeOutput.Volumes[0]
|
||||||
if *vol.VolumeId != volumeId {
|
if *vol.VolumeId != volumeId {
|
||||||
return fmt.Errorf("describe volume %s returned information about a non-matching volume %s", volumeId, *vol.VolumeId)
|
return fmt.Errorf("describe volume %q returned information about a non-matching volume %q", volumeId, *vol.VolumeId)
|
||||||
}
|
}
|
||||||
if *vol.Size == newSize {
|
if *vol.Size == newSize {
|
||||||
// nothing to do
|
// nothing to do
|
||||||
|
|
@ -74,7 +74,7 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeId string, newSize int64) error {
|
||||||
|
|
||||||
state := *output.VolumeModification.ModificationState
|
state := *output.VolumeModification.ModificationState
|
||||||
if state == constants.EBSVolumeStateFailed {
|
if state == constants.EBSVolumeStateFailed {
|
||||||
return fmt.Errorf("could not modify persistent volume %s: modification state failed", volumeId)
|
return fmt.Errorf("could not modify persistent volume %q: modification state failed", volumeId)
|
||||||
}
|
}
|
||||||
if state == "" {
|
if state == "" {
|
||||||
return fmt.Errorf("received empty modification status")
|
return fmt.Errorf("received empty modification status")
|
||||||
|
|
@ -91,10 +91,10 @@ func (c *EBSVolumeResizer) ResizeVolume(volumeId string, newSize int64) error {
|
||||||
return false, fmt.Errorf("could not describe volume modification: %v", err)
|
return false, fmt.Errorf("could not describe volume modification: %v", err)
|
||||||
}
|
}
|
||||||
if len(out.VolumesModifications) != 1 {
|
if len(out.VolumesModifications) != 1 {
|
||||||
return false, fmt.Errorf("describe volume modification didn't return one record for volume \"%s\"", volumeId)
|
return false, fmt.Errorf("describe volume modification didn't return one record for volume %q", volumeId)
|
||||||
}
|
}
|
||||||
if *out.VolumesModifications[0].VolumeId != volumeId {
|
if *out.VolumesModifications[0].VolumeId != volumeId {
|
||||||
return false, fmt.Errorf("non-matching volume id when describing modifications: \"%s\" is different from \"%s\"",
|
return false, fmt.Errorf("non-matching volume id when describing modifications: %q is different from %q",
|
||||||
*out.VolumesModifications[0].VolumeId, volumeId)
|
*out.VolumesModifications[0].VolumeId, volumeId)
|
||||||
}
|
}
|
||||||
return *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil
|
return *out.VolumesModifications[0].ModificationState != constants.EBSVolumeStateModifying, nil
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue