add another switch for CRD config checksum to deployment
This commit is contained in:
		
						commit
						ea9ec6454f
					
				|  | @ -25,6 +25,7 @@ rules: | |||
|   - create | ||||
|   - get | ||||
|   - patch | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|  |  | |||
|  | @ -17,6 +17,8 @@ spec: | |||
|     metadata: | ||||
|       annotations: | ||||
|         checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} | ||||
|         # In order to use the checksum of CRD OperatorConfiguration instead, use the following line instead | ||||
|         # checksum/config: {{ include (print $.Template.BasePath "/operatorconfiguration.yaml") . | sha256sum }} | ||||
|     {{- if .Values.podAnnotations }} | ||||
| {{ toYaml .Values.podAnnotations | indent 8 }} | ||||
|     {{- end }} | ||||
|  |  | |||
|  | @ -63,6 +63,7 @@ spec: | |||
|   #  uid: "efd12e58-5786-11e8-b5a7-06148230260c" | ||||
|   #  cluster: "acid-batman" | ||||
|   #  timestamp: "2017-12-19T12:40:33+01:00" # timezone required (offset relative to UTC, see RFC 3339 section 5.6) | ||||
|   #  s3_wal_path: "s3://custom/path/to/bucket" | ||||
|   maintenanceWindows: | ||||
|   - 01:00-06:00 #UTC | ||||
|   - Sat:00:00-04:00 | ||||
|  |  | |||
|  | @ -25,6 +25,8 @@ rules: | |||
|   verbs: | ||||
|   - create | ||||
|   - get | ||||
|   - patch | ||||
|   - update | ||||
| - apiGroups: | ||||
|   - "" | ||||
|   resources: | ||||
|  |  | |||
|  | @ -36,7 +36,7 @@ configuration: | |||
|     # infrastructure_roles_secret_name: "" | ||||
|     # pod_environment_configmap: "" | ||||
|     pod_management_policy: "ordered_ready" | ||||
|     enable_pod_antiaffinity: "false" | ||||
|     enable_pod_antiaffinity: false | ||||
|     pod_antiaffinity_topology_key: "kubernetes.io/hostname" | ||||
|   postgres_pod_resources: | ||||
|     default_cpu_request: 100m | ||||
|  | @ -91,4 +91,3 @@ configuration: | |||
|     # scalyr_api_key: "" | ||||
|     # scalyr_image: "" | ||||
|     # scalyr_server_url: "" | ||||
| 
 | ||||
|  |  | |||
|  | @ -114,6 +114,7 @@ type CloneDescription struct { | |||
| 	ClusterName  string `json:"cluster,omitempty"` | ||||
| 	UID          string `json:"uid,omitempty"` | ||||
| 	EndTimestamp string `json:"timestamp,omitempty"` | ||||
| 	S3WalPath    string `json:"s3_wal_path,omitempty"` | ||||
| } | ||||
| 
 | ||||
| // Sidecar defines a container to be run in the same pod as the Postgres container.
 | ||||
|  |  | |||
|  | @ -61,12 +61,12 @@ var cloneClusterDescriptions = []struct { | |||
| 	in  *CloneDescription | ||||
| 	err error | ||||
| }{ | ||||
| 	{&CloneDescription{"foo+bar", "", "NotEmpty"}, nil}, | ||||
| 	{&CloneDescription{"foo+bar", "", ""}, | ||||
| 	{&CloneDescription{"foo+bar", "", "NotEmpty", ""}, nil}, | ||||
| 	{&CloneDescription{"foo+bar", "", "", ""}, | ||||
| 		errors.New(`clone cluster name must confirm to DNS-1035, regex used for validation is "^[a-z]([-a-z0-9]*[a-z0-9])?$"`)}, | ||||
| 	{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", ""}, | ||||
| 	{&CloneDescription{"foobar123456789012345678901234567890123456789012345678901234567890", "", "", ""}, | ||||
| 		errors.New("clone cluster name must be no longer than 63 characters")}, | ||||
| 	{&CloneDescription{"foobar", "", ""}, nil}, | ||||
| 	{&CloneDescription{"foobar", "", "", ""}, nil}, | ||||
| } | ||||
| 
 | ||||
| var maintenanceWindows = []struct { | ||||
|  |  | |||
|  | @ -8,7 +8,7 @@ import ( | |||
| 	"github.com/sirupsen/logrus" | ||||
| 
 | ||||
| 	"k8s.io/api/apps/v1beta1" | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	policybeta1 "k8s.io/api/policy/v1beta1" | ||||
| 	"k8s.io/apimachinery/pkg/api/resource" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
|  | @ -1014,6 +1014,7 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string | |||
| 		return nil, fmt.Errorf("could not parse volume size: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	volumeMode := v1.PersistentVolumeFilesystem | ||||
| 	volumeClaim := &v1.PersistentVolumeClaim{ | ||||
| 		ObjectMeta: metadata, | ||||
| 		Spec: v1.PersistentVolumeClaimSpec{ | ||||
|  | @ -1024,6 +1025,7 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string | |||
| 				}, | ||||
| 			}, | ||||
| 			StorageClassName: storageClassName, | ||||
| 			VolumeMode:       &volumeMode, | ||||
| 		}, | ||||
| 	} | ||||
| 
 | ||||
|  | @ -1216,10 +1218,37 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) | |||
| 			}) | ||||
| 	} else { | ||||
| 		// cloning with S3, find out the bucket to clone
 | ||||
| 		msg := "Clone from S3 bucket" | ||||
| 		c.logger.Info(msg, description.S3WalPath) | ||||
| 
 | ||||
| 		if description.S3WalPath == "" { | ||||
| 			msg := "Figure out which S3 bucket to use from env" | ||||
| 			c.logger.Info(msg, description.S3WalPath) | ||||
| 
 | ||||
| 			envs := []v1.EnvVar{ | ||||
| 				v1.EnvVar{ | ||||
| 					Name:  "CLONE_WAL_S3_BUCKET", | ||||
| 					Value: c.OpConfig.WALES3Bucket, | ||||
| 				}, | ||||
| 				v1.EnvVar{ | ||||
| 					Name:  "CLONE_WAL_BUCKET_SCOPE_SUFFIX", | ||||
| 					Value: getBucketScopeSuffix(description.UID), | ||||
| 				}, | ||||
| 			} | ||||
| 
 | ||||
| 			result = append(result, envs...) | ||||
| 		} else { | ||||
| 			msg := "Use custom parsed S3WalPath %s from the manifest" | ||||
| 			c.logger.Warningf(msg, description.S3WalPath) | ||||
| 
 | ||||
| 			result = append(result, v1.EnvVar{ | ||||
| 				Name:  "CLONE_WALE_S3_PREFIX", | ||||
| 				Value: description.S3WalPath, | ||||
| 			}) | ||||
| 		} | ||||
| 
 | ||||
| 		result = append(result, v1.EnvVar{Name: "CLONE_METHOD", Value: "CLONE_WITH_WALE"}) | ||||
| 		result = append(result, v1.EnvVar{Name: "CLONE_WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket}) | ||||
| 		result = append(result, v1.EnvVar{Name: "CLONE_TARGET_TIME", Value: description.EndTimestamp}) | ||||
| 		result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(description.UID)}) | ||||
| 		result = append(result, v1.EnvVar{Name: "CLONE_WAL_BUCKET_SCOPE_PREFIX", Value: ""}) | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -129,3 +129,82 @@ func TestShmVolume(t *testing.T) { | |||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestCloneEnv(t *testing.T) { | ||||
| 	testName := "TestCloneEnv" | ||||
| 	tests := []struct { | ||||
| 		subTest   string | ||||
| 		cloneOpts *acidv1.CloneDescription | ||||
| 		env       v1.EnvVar | ||||
| 		envPos    int | ||||
| 	}{ | ||||
| 		{ | ||||
| 			subTest: "custom s3 path", | ||||
| 			cloneOpts: &acidv1.CloneDescription{ | ||||
| 				ClusterName:  "test-cluster", | ||||
| 				S3WalPath:    "s3://some/path/", | ||||
| 				EndTimestamp: "somewhen", | ||||
| 			}, | ||||
| 			env: v1.EnvVar{ | ||||
| 				Name:  "CLONE_WALE_S3_PREFIX", | ||||
| 				Value: "s3://some/path/", | ||||
| 			}, | ||||
| 			envPos: 1, | ||||
| 		}, | ||||
| 		{ | ||||
| 			subTest: "generated s3 path, bucket", | ||||
| 			cloneOpts: &acidv1.CloneDescription{ | ||||
| 				ClusterName:  "test-cluster", | ||||
| 				EndTimestamp: "somewhen", | ||||
| 				UID:          "0000", | ||||
| 			}, | ||||
| 			env: v1.EnvVar{ | ||||
| 				Name:  "CLONE_WAL_S3_BUCKET", | ||||
| 				Value: "wale-bucket", | ||||
| 			}, | ||||
| 			envPos: 1, | ||||
| 		}, | ||||
| 		{ | ||||
| 			subTest: "generated s3 path, target time", | ||||
| 			cloneOpts: &acidv1.CloneDescription{ | ||||
| 				ClusterName:  "test-cluster", | ||||
| 				EndTimestamp: "somewhen", | ||||
| 				UID:          "0000", | ||||
| 			}, | ||||
| 			env: v1.EnvVar{ | ||||
| 				Name:  "CLONE_TARGET_TIME", | ||||
| 				Value: "somewhen", | ||||
| 			}, | ||||
| 			envPos: 4, | ||||
| 		}, | ||||
| 	} | ||||
| 
 | ||||
| 	var cluster = New( | ||||
| 		Config{ | ||||
| 			OpConfig: config.Config{ | ||||
| 				WALES3Bucket:   "wale-bucket", | ||||
| 				ProtectedRoles: []string{"admin"}, | ||||
| 				Auth: config.Auth{ | ||||
| 					SuperUsername:       superUserName, | ||||
| 					ReplicationUsername: replicationUserName, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger) | ||||
| 
 | ||||
| 	for _, tt := range tests { | ||||
| 		envs := cluster.generateCloneEnvironment(tt.cloneOpts) | ||||
| 
 | ||||
| 		env := envs[tt.envPos] | ||||
| 
 | ||||
| 		if env.Name != tt.env.Name { | ||||
| 			t.Errorf("%s %s: Expected env name %s, have %s instead", | ||||
| 				testName, tt.subTest, tt.env.Name, env.Name) | ||||
| 		} | ||||
| 
 | ||||
| 		if env.Value != tt.env.Value { | ||||
| 			t.Errorf("%s %s: Expected env value %s, have %s instead", | ||||
| 				testName, tt.subTest, tt.env.Value, env.Value) | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| } | ||||
|  |  | |||
|  | @ -1,11 +1,13 @@ | |||
| package controller | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/types" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 
 | ||||
| 	acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" | ||||
|  | @ -52,7 +54,15 @@ func (c *Controller) createOperatorCRD(crd *apiextv1beta1.CustomResourceDefiniti | |||
| 		if !k8sutil.ResourceAlreadyExists(err) { | ||||
| 			return fmt.Errorf("could not create customResourceDefinition: %v", err) | ||||
| 		} | ||||
| 		c.logger.Infof("customResourceDefinition %q is already registered", crd.Name) | ||||
| 		c.logger.Infof("customResourceDefinition %q is already registered and will only be updated", crd.Name) | ||||
| 
 | ||||
| 		patch, err := json.Marshal(crd) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("could not marshal new customResourceDefintion: %v", err) | ||||
| 		} | ||||
| 		if _, err := c.KubeClient.CustomResourceDefinitions().Patch(crd.Name, types.MergePatchType, patch); err != nil { | ||||
| 			return fmt.Errorf("could not update customResourceDefinition: %v", err) | ||||
| 		} | ||||
| 	} else { | ||||
| 		c.logger.Infof("customResourceDefinition %q has been registered", crd.Name) | ||||
| 	} | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue