Support inherited annotations for all major objects (#1236)
* add comments where inherited annotations could be added * add inheritedAnnotations feature * return nil if no annotations are set * minor changes * first downscaler then inherited annotations * add unit test for inherited annotations * add pvc to test + minor changes * missing comma * fix nil map assignment * set annotations in the same order it is done in other places * replace acidClientSet with acid getters in K8s client * more fixes on clientSet vs getters * minor changes * remove endpoints from annotation test * refine unit test - but deployment and sts are still empty * fix checkinng sts and deployment * make annotations setter one liners * no need for len check anymore Co-authored-by: Rafia Sabih <rafia.sabih@zalando.de>
This commit is contained in:
parent
549f71bb49
commit
6a97316a69
|
|
@ -166,6 +166,10 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
template:
|
template:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
inherited_annotations:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
inherited_labels:
|
inherited_labels:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
|
||||||
|
|
@ -91,7 +91,11 @@ configKubernetes:
|
||||||
# namespaced name of the secret containing infrastructure roles names and passwords
|
# namespaced name of the secret containing infrastructure roles names and passwords
|
||||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||||
|
|
||||||
# list of labels that can be inherited from the cluster manifest
|
# list of annotation keys that can be inherited from the cluster manifest
|
||||||
|
# inherited_annotations:
|
||||||
|
# - owned-by
|
||||||
|
|
||||||
|
# list of label keys that can be inherited from the cluster manifest
|
||||||
# inherited_labels:
|
# inherited_labels:
|
||||||
# - application
|
# - application
|
||||||
# - environment
|
# - environment
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,10 @@ configKubernetes:
|
||||||
# namespaced name of the secret containing infrastructure roles names and passwords
|
# namespaced name of the secret containing infrastructure roles names and passwords
|
||||||
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
|
||||||
|
|
||||||
# list of labels that can be inherited from the cluster manifest
|
# list of annotation keys that can be inherited from the cluster manifest
|
||||||
|
# inherited_annotations: owned-by
|
||||||
|
|
||||||
|
# list of label keys that can be inherited from the cluster manifest
|
||||||
# inherited_labels: application,environment
|
# inherited_labels: application,environment
|
||||||
|
|
||||||
# timeout for successful migration of master pods from unschedulable node
|
# timeout for successful migration of master pods from unschedulable node
|
||||||
|
|
|
||||||
|
|
@ -274,6 +274,12 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
are extracted. For the ConfigMap this has to be a string which allows
|
are extracted. For the ConfigMap this has to be a string which allows
|
||||||
referencing only one infrastructure roles secret. The default is empty.
|
referencing only one infrastructure roles secret. The default is empty.
|
||||||
|
|
||||||
|
* **inherited_annotations**
|
||||||
|
list of annotation keys that can be inherited from the cluster manifest, and
|
||||||
|
added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PDB` and
|
||||||
|
`Services`) created by the operator incl. the ones from the connection
|
||||||
|
pooler deployment. The default is empty.
|
||||||
|
|
||||||
* **pod_role_label**
|
* **pod_role_label**
|
||||||
name of the label assigned to the Postgres pods (and services/endpoints) by
|
name of the label assigned to the Postgres pods (and services/endpoints) by
|
||||||
the operator. The default is `spilo-role`.
|
the operator. The default is `spilo-role`.
|
||||||
|
|
@ -283,15 +289,16 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
objects. The default is `application:spilo`.
|
objects. The default is `application:spilo`.
|
||||||
|
|
||||||
* **inherited_labels**
|
* **inherited_labels**
|
||||||
list of labels that can be inherited from the cluster manifest, and added to
|
list of label keys that can be inherited from the cluster manifest, and
|
||||||
each child objects (`StatefulSet`, `Pod`, `Service` and `Endpoints`) created
|
added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PVCs`,
|
||||||
by the operator. Typical use case is to dynamically pass labels that are
|
`PDB`, `Service`, `Endpoints` and `Secrets`) created by the operator.
|
||||||
specific to a given Postgres cluster, in order to implement `NetworkPolicy`.
|
Typical use case is to dynamically pass labels that are specific to a
|
||||||
The default is empty.
|
given Postgres cluster, in order to implement `NetworkPolicy`. The default
|
||||||
|
is empty.
|
||||||
|
|
||||||
* **cluster_name_label**
|
* **cluster_name_label**
|
||||||
name of the label assigned to Kubernetes objects created by the operator that
|
name of the label assigned to Kubernetes objects created by the operator
|
||||||
indicates which cluster a given object belongs to. The default is
|
that indicates which cluster a given object belongs to. The default is
|
||||||
`cluster-name`.
|
`cluster-name`.
|
||||||
|
|
||||||
* **node_readiness_label**
|
* **node_readiness_label**
|
||||||
|
|
|
||||||
|
|
@ -117,7 +117,7 @@ class K8s:
|
||||||
for svc in svcs:
|
for svc in svcs:
|
||||||
for key, value in annotations.items():
|
for key, value in annotations.items():
|
||||||
if not svc.metadata.annotations or key not in svc.metadata.annotations or svc.metadata.annotations[key] != value:
|
if not svc.metadata.annotations or key not in svc.metadata.annotations or svc.metadata.annotations[key] != value:
|
||||||
print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotations))
|
print("Expected key {} not found in service annotations {}".format(key, svc.metadata.annotations))
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
@ -126,7 +126,7 @@ class K8s:
|
||||||
for sset in ssets:
|
for sset in ssets:
|
||||||
for key, value in annotations.items():
|
for key, value in annotations.items():
|
||||||
if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value:
|
if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value:
|
||||||
print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotations))
|
print("Expected key {} not found in statefulset annotations {}".format(key, sset.metadata.annotations))
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -852,6 +852,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
patch_sset_propagate_annotations = {
|
patch_sset_propagate_annotations = {
|
||||||
"data": {
|
"data": {
|
||||||
"downscaler_annotations": "deployment-time,downscaler/*",
|
"downscaler_annotations": "deployment-time,downscaler/*",
|
||||||
|
"inherited_annotations": "owned-by",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_sset_propagate_annotations)
|
k8s.update_config(patch_sset_propagate_annotations)
|
||||||
|
|
@ -861,6 +862,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"annotations": {
|
"annotations": {
|
||||||
"deployment-time": "2020-04-30 12:00:00",
|
"deployment-time": "2020-04-30 12:00:00",
|
||||||
"downscaler/downtime_replicas": "0",
|
"downscaler/downtime_replicas": "0",
|
||||||
|
"owned-by": "acid",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -870,10 +872,9 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
annotations = {
|
annotations = {
|
||||||
"deployment-time": "2020-04-30 12:00:00",
|
"deployment-time": "2020-04-30 12:00:00",
|
||||||
"downscaler/downtime_replicas": "0",
|
"downscaler/downtime_replicas": "0",
|
||||||
|
"owned-by": "acid",
|
||||||
}
|
}
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
|
|
||||||
|
|
||||||
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
|
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
|
|
||||||
|
|
@ -57,6 +57,7 @@ data:
|
||||||
# kubernetes_use_configmaps: "false"
|
# kubernetes_use_configmaps: "false"
|
||||||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||||
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
||||||
|
# inherited_annotations: owned-by
|
||||||
# inherited_labels: application,environment
|
# inherited_labels: application,environment
|
||||||
# kube_iam_role: ""
|
# kube_iam_role: ""
|
||||||
# log_s3_bucket: ""
|
# log_s3_bucket: ""
|
||||||
|
|
|
||||||
|
|
@ -164,6 +164,10 @@ spec:
|
||||||
type: string
|
type: string
|
||||||
template:
|
template:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
inherited_annotations:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
inherited_labels:
|
inherited_labels:
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
|
|
|
||||||
|
|
@ -49,6 +49,8 @@ configuration:
|
||||||
# - secretname: "other-infrastructure-role"
|
# - secretname: "other-infrastructure-role"
|
||||||
# userkey: "other-user-key"
|
# userkey: "other-user-key"
|
||||||
# passwordkey: "other-password-key"
|
# passwordkey: "other-password-key"
|
||||||
|
# inherited_annotations:
|
||||||
|
# - owned-by
|
||||||
# inherited_labels:
|
# inherited_labels:
|
||||||
# - application
|
# - application
|
||||||
# - environment
|
# - environment
|
||||||
|
|
|
||||||
|
|
@ -961,6 +961,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"inherited_annotations": {
|
||||||
|
Type: "array",
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"inherited_labels": {
|
"inherited_labels": {
|
||||||
Type: "array",
|
Type: "array",
|
||||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
|
@ -1407,7 +1415,7 @@ func buildCRD(name, kind, plural, short string, columns []apiextv1.CustomResourc
|
||||||
},
|
},
|
||||||
Scope: apiextv1.NamespaceScoped,
|
Scope: apiextv1.NamespaceScoped,
|
||||||
Versions: []apiextv1.CustomResourceDefinitionVersion{
|
Versions: []apiextv1.CustomResourceDefinitionVersion{
|
||||||
apiextv1.CustomResourceDefinitionVersion{
|
{
|
||||||
Name: SchemeGroupVersion.Version,
|
Name: SchemeGroupVersion.Version,
|
||||||
Served: true,
|
Served: true,
|
||||||
Storage: true,
|
Storage: true,
|
||||||
|
|
|
||||||
|
|
@ -66,6 +66,7 @@ type KubernetesMetaConfiguration struct {
|
||||||
PodRoleLabel string `json:"pod_role_label,omitempty"`
|
PodRoleLabel string `json:"pod_role_label,omitempty"`
|
||||||
ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
|
ClusterLabels map[string]string `json:"cluster_labels,omitempty"`
|
||||||
InheritedLabels []string `json:"inherited_labels,omitempty"`
|
InheritedLabels []string `json:"inherited_labels,omitempty"`
|
||||||
|
InheritedAnnotations []string `json:"inherited_annotations,omitempty"`
|
||||||
DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"`
|
DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"`
|
||||||
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
|
ClusterNameLabel string `json:"cluster_name_label,omitempty"`
|
||||||
DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"`
|
DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -202,6 +202,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
if in.InheritedAnnotations != nil {
|
||||||
|
in, out := &in.InheritedAnnotations, &out.InheritedAnnotations
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.DownscalerAnnotations != nil {
|
if in.DownscalerAnnotations != nil {
|
||||||
in, out := &in.DownscalerAnnotations, &out.DownscalerAnnotations
|
in, out := &in.DownscalerAnnotations, &out.DownscalerAnnotations
|
||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
|
|
|
||||||
|
|
@ -286,7 +286,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: c.connectionPoolerLabels(role, true).MatchLabels,
|
Labels: c.connectionPoolerLabels(role, true).MatchLabels,
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Annotations: c.generatePodAnnotations(spec),
|
Annotations: c.annotationsSet(c.generatePodAnnotations(spec)),
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
ServiceAccountName: c.OpConfig.PodServiceAccountName,
|
ServiceAccountName: c.OpConfig.PodServiceAccountName,
|
||||||
|
|
@ -325,7 +325,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
|
||||||
|
|
||||||
if *numberOfInstances < constants.ConnectionPoolerMinInstances {
|
if *numberOfInstances < constants.ConnectionPoolerMinInstances {
|
||||||
msg := "Adjusted number of connection pooler instances from %d to %d"
|
msg := "Adjusted number of connection pooler instances from %d to %d"
|
||||||
c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances)
|
c.logger.Warningf(msg, *numberOfInstances, constants.ConnectionPoolerMinInstances)
|
||||||
|
|
||||||
*numberOfInstances = constants.ConnectionPoolerMinInstances
|
*numberOfInstances = constants.ConnectionPoolerMinInstances
|
||||||
}
|
}
|
||||||
|
|
@ -339,7 +339,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
|
||||||
Name: connectionPooler.Name,
|
Name: connectionPooler.Name,
|
||||||
Namespace: connectionPooler.Namespace,
|
Namespace: connectionPooler.Namespace,
|
||||||
Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels,
|
Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels,
|
||||||
Annotations: map[string]string{},
|
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
|
||||||
// make StatefulSet object its owner to represent the dependency.
|
// make StatefulSet object its owner to represent the dependency.
|
||||||
// By itself StatefulSet is being deleted with "Orphaned"
|
// By itself StatefulSet is being deleted with "Orphaned"
|
||||||
// propagation policy, which means that it's deletion will not
|
// propagation policy, which means that it's deletion will not
|
||||||
|
|
@ -390,7 +390,7 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
|
||||||
Name: connectionPooler.Name,
|
Name: connectionPooler.Name,
|
||||||
Namespace: connectionPooler.Namespace,
|
Namespace: connectionPooler.Namespace,
|
||||||
Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels,
|
Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels,
|
||||||
Annotations: map[string]string{},
|
Annotations: c.annotationsSet(c.generateServiceAnnotations(connectionPooler.Role, spec)),
|
||||||
// make StatefulSet object its owner to represent the dependency.
|
// make StatefulSet object its owner to represent the dependency.
|
||||||
// By itself StatefulSet is being deleted with "Orphaned"
|
// By itself StatefulSet is being deleted with "Orphaned"
|
||||||
// propagation policy, which means that it's deletion will not
|
// propagation policy, which means that it's deletion will not
|
||||||
|
|
@ -866,7 +866,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations)
|
newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(c.ConnectionPooler[role].Deployment.Annotations))
|
||||||
if newAnnotations != nil {
|
if newAnnotations != nil {
|
||||||
deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations)
|
deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -1184,13 +1184,13 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||||
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
|
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
|
||||||
|
|
||||||
annotations := c.generatePodAnnotations(spec)
|
podAnnotations := c.generatePodAnnotations(spec)
|
||||||
|
|
||||||
// generate pod template for the statefulset, based on the spilo container and sidecars
|
// generate pod template for the statefulset, based on the spilo container and sidecars
|
||||||
podTemplate, err = c.generatePodTemplate(
|
podTemplate, err = c.generatePodTemplate(
|
||||||
c.Namespace,
|
c.Namespace,
|
||||||
c.labelsSet(true),
|
c.labelsSet(true),
|
||||||
annotations,
|
c.annotationsSet(podAnnotations),
|
||||||
spiloContainer,
|
spiloContainer,
|
||||||
initContainers,
|
initContainers,
|
||||||
sidecarContainers,
|
sidecarContainers,
|
||||||
|
|
@ -1236,15 +1236,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
|
||||||
}
|
}
|
||||||
|
|
||||||
annotations = make(map[string]string)
|
stsAnnotations := make(map[string]string)
|
||||||
annotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false)
|
stsAnnotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false)
|
||||||
|
stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil))
|
||||||
|
|
||||||
statefulSet := &appsv1.StatefulSet{
|
statefulSet := &appsv1.StatefulSet{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.statefulSetName(),
|
Name: c.statefulSetName(),
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Labels: c.labelsSet(true),
|
Labels: c.labelsSet(true),
|
||||||
Annotations: c.AnnotationsToPropagate(annotations),
|
Annotations: stsAnnotations,
|
||||||
},
|
},
|
||||||
Spec: appsv1.StatefulSetSpec{
|
Spec: appsv1.StatefulSetSpec{
|
||||||
Replicas: &numberOfInstances,
|
Replicas: &numberOfInstances,
|
||||||
|
|
@ -1537,9 +1538,10 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
|
||||||
username := pgUser.Name
|
username := pgUser.Name
|
||||||
secret := v1.Secret{
|
secret := v1.Secret{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.credentialSecretName(username),
|
Name: c.credentialSecretName(username),
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Labels: c.labelsSet(true),
|
Labels: c.labelsSet(true),
|
||||||
|
Annotations: c.annotationsSet(nil),
|
||||||
},
|
},
|
||||||
Type: v1.SecretTypeOpaque,
|
Type: v1.SecretTypeOpaque,
|
||||||
Data: map[string][]byte{
|
Data: map[string][]byte{
|
||||||
|
|
@ -1613,7 +1615,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
||||||
Name: c.serviceName(role),
|
Name: c.serviceName(role),
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Labels: c.roleLabelsSet(true, role),
|
Labels: c.roleLabelsSet(true, role),
|
||||||
Annotations: c.generateServiceAnnotations(role, spec),
|
Annotations: c.annotationsSet(c.generateServiceAnnotations(role, spec)),
|
||||||
},
|
},
|
||||||
Spec: serviceSpec,
|
Spec: serviceSpec,
|
||||||
}
|
}
|
||||||
|
|
@ -1816,9 +1818,10 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget
|
||||||
|
|
||||||
return &policybeta1.PodDisruptionBudget{
|
return &policybeta1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.podDisruptionBudgetName(),
|
Name: c.podDisruptionBudgetName(),
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Labels: c.labelsSet(true),
|
Labels: c.labelsSet(true),
|
||||||
|
Annotations: c.annotationsSet(nil),
|
||||||
},
|
},
|
||||||
Spec: policybeta1.PodDisruptionBudgetSpec{
|
Spec: policybeta1.PodDisruptionBudgetSpec{
|
||||||
MinAvailable: &minAvailable,
|
MinAvailable: &minAvailable,
|
||||||
|
|
@ -1938,9 +1941,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
||||||
|
|
||||||
cronJob := &batchv1beta1.CronJob{
|
cronJob := &batchv1beta1.CronJob{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.getLogicalBackupJobName(),
|
Name: c.getLogicalBackupJobName(),
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Labels: c.labelsSet(true),
|
Labels: c.labelsSet(true),
|
||||||
|
Annotations: c.annotationsSet(nil),
|
||||||
},
|
},
|
||||||
Spec: batchv1beta1.CronJobSpec{
|
Spec: batchv1beta1.CronJobSpec{
|
||||||
Schedule: schedule,
|
Schedule: schedule,
|
||||||
|
|
|
||||||
|
|
@ -368,8 +368,8 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
annotations := c.AnnotationsToPropagate(c.Statefulset.Annotations)
|
|
||||||
c.updateStatefulSetAnnotations(annotations)
|
c.updateStatefulSetAnnotations(c.AnnotationsToPropagate(c.annotationsSet(c.Statefulset.Annotations)))
|
||||||
|
|
||||||
if !podsRollingUpdateRequired && !c.OpConfig.EnableLazySpiloUpgrade {
|
if !podsRollingUpdateRequired && !c.OpConfig.EnableLazySpiloUpgrade {
|
||||||
// even if desired and actual statefulsets match
|
// even if desired and actual statefulsets match
|
||||||
|
|
@ -412,11 +412,15 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
// AnnotationsToPropagate get the annotations to update if required
|
// AnnotationsToPropagate get the annotations to update if required
|
||||||
// based on the annotations in postgres CRD
|
// based on the annotations in postgres CRD
|
||||||
func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string {
|
func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string {
|
||||||
toPropagateAnnotations := c.OpConfig.DownscalerAnnotations
|
|
||||||
pgCRDAnnotations := c.Postgresql.ObjectMeta.GetAnnotations()
|
|
||||||
|
|
||||||
if toPropagateAnnotations != nil && pgCRDAnnotations != nil {
|
if annotations == nil {
|
||||||
for _, anno := range toPropagateAnnotations {
|
annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
pgCRDAnnotations := c.ObjectMeta.Annotations
|
||||||
|
|
||||||
|
if pgCRDAnnotations != nil {
|
||||||
|
for _, anno := range c.OpConfig.DownscalerAnnotations {
|
||||||
for k, v := range pgCRDAnnotations {
|
for k, v := range pgCRDAnnotations {
|
||||||
matched, err := regexp.MatchString(anno, k)
|
matched, err := regexp.MatchString(anno, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -430,7 +434,11 @@ func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[stri
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return annotations
|
if len(annotations) > 0 {
|
||||||
|
return annotations
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters
|
// checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters
|
||||||
|
|
|
||||||
|
|
@ -271,6 +271,33 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
||||||
return members, nil
|
return members, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns annotations to be passed to child objects
|
||||||
|
func (c *Cluster) annotationsSet(annotations map[string]string) map[string]string {
|
||||||
|
|
||||||
|
if annotations == nil {
|
||||||
|
annotations = make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
pgCRDAnnotations := c.ObjectMeta.Annotations
|
||||||
|
|
||||||
|
// allow to inherit certain labels from the 'postgres' object
|
||||||
|
if pgCRDAnnotations != nil {
|
||||||
|
for k, v := range pgCRDAnnotations {
|
||||||
|
for _, match := range c.OpConfig.InheritedAnnotations {
|
||||||
|
if k == match {
|
||||||
|
annotations[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(annotations) > 0 {
|
||||||
|
return annotations
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) {
|
func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) {
|
||||||
timeout := time.After(c.OpConfig.PodLabelWaitTimeout)
|
timeout := time.After(c.OpConfig.PodLabelWaitTimeout)
|
||||||
for {
|
for {
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,141 @@
|
||||||
|
package cluster
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
k8sFake "k8s.io/client-go/kubernetes/fake"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset) {
|
||||||
|
clientSet := k8sFake.NewSimpleClientset()
|
||||||
|
acidClientSet := fakeacidv1.NewSimpleClientset()
|
||||||
|
|
||||||
|
return k8sutil.KubernetesClient{
|
||||||
|
PodDisruptionBudgetsGetter: clientSet.PolicyV1beta1(),
|
||||||
|
ServicesGetter: clientSet.CoreV1(),
|
||||||
|
StatefulSetsGetter: clientSet.AppsV1(),
|
||||||
|
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||||
|
}, clientSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInheritedAnnotations(t *testing.T) {
|
||||||
|
testName := "test inheriting annotations from manifest"
|
||||||
|
client, _ := newFakeK8sAnnotationsClient()
|
||||||
|
clusterName := "acid-test-cluster"
|
||||||
|
namespace := "default"
|
||||||
|
annotationValue := "acid"
|
||||||
|
role := Master
|
||||||
|
|
||||||
|
pg := acidv1.Postgresql{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: clusterName,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"owned-by": annotationValue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: acidv1.PostgresSpec{
|
||||||
|
EnableReplicaConnectionPooler: boolToPointer(true),
|
||||||
|
Volume: acidv1.Volume{
|
||||||
|
Size: "1Gi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
ConnectionPooler: config.ConnectionPooler{
|
||||||
|
ConnectionPoolerDefaultCPURequest: "100m",
|
||||||
|
ConnectionPoolerDefaultCPULimit: "100m",
|
||||||
|
ConnectionPoolerDefaultMemoryRequest: "100Mi",
|
||||||
|
ConnectionPoolerDefaultMemoryLimit: "100Mi",
|
||||||
|
NumberOfInstances: int32ToPointer(1),
|
||||||
|
},
|
||||||
|
PodManagementPolicy: "ordered_ready",
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterLabels: map[string]string{"application": "spilo"},
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
|
DefaultCPURequest: "300m",
|
||||||
|
DefaultCPULimit: "300m",
|
||||||
|
DefaultMemoryRequest: "300Mi",
|
||||||
|
DefaultMemoryLimit: "300Mi",
|
||||||
|
InheritedAnnotations: []string{"owned-by"},
|
||||||
|
PodRoleLabel: "spilo-role",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, client, pg, logger, eventRecorder)
|
||||||
|
|
||||||
|
cluster.Name = clusterName
|
||||||
|
cluster.Namespace = namespace
|
||||||
|
|
||||||
|
// test annotationsSet function
|
||||||
|
inheritedAnnotations := cluster.annotationsSet(nil)
|
||||||
|
|
||||||
|
listOptions := metav1.ListOptions{
|
||||||
|
LabelSelector: cluster.labelsSet(false).String(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// check statefulset annotations
|
||||||
|
_, err := cluster.createStatefulSet()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
stsList, err := client.StatefulSets(namespace).List(context.TODO(), listOptions)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
for _, sts := range stsList.Items {
|
||||||
|
if !(util.MapContains(sts.ObjectMeta.Annotations, inheritedAnnotations)) {
|
||||||
|
t.Errorf("%s: StatefulSet %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations)
|
||||||
|
}
|
||||||
|
// pod template
|
||||||
|
if !(util.MapContains(sts.Spec.Template.ObjectMeta.Annotations, inheritedAnnotations)) {
|
||||||
|
t.Errorf("%s: pod template %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations)
|
||||||
|
}
|
||||||
|
// pvc template
|
||||||
|
if util.MapContains(sts.Spec.VolumeClaimTemplates[0].Annotations, inheritedAnnotations) {
|
||||||
|
t.Errorf("%s: PVC template %v not expected to have inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check service annotations
|
||||||
|
cluster.createService(Master)
|
||||||
|
svcList, err := client.Services(namespace).List(context.TODO(), listOptions)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
for _, svc := range svcList.Items {
|
||||||
|
if !(util.MapContains(svc.ObjectMeta.Annotations, inheritedAnnotations)) {
|
||||||
|
t.Errorf("%s: Service %v not inherited annotations %#v, got %#v", testName, svc.ObjectMeta.Name, inheritedAnnotations, svc.ObjectMeta.Annotations)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check pod disruption budget annotations
|
||||||
|
cluster.createPodDisruptionBudget()
|
||||||
|
pdbList, err := client.PodDisruptionBudgets(namespace).List(context.TODO(), listOptions)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
for _, pdb := range pdbList.Items {
|
||||||
|
if !(util.MapContains(pdb.ObjectMeta.Annotations, inheritedAnnotations)) {
|
||||||
|
t.Errorf("%s: Pod Disruption Budget %v not inherited annotations %#v, got %#v", testName, pdb.ObjectMeta.Name, inheritedAnnotations, pdb.ObjectMeta.Annotations)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check pooler deployment annotations
|
||||||
|
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{}
|
||||||
|
cluster.ConnectionPooler[role] = &ConnectionPoolerObjects{
|
||||||
|
Name: cluster.connectionPoolerName(role),
|
||||||
|
ClusterName: cluster.ClusterName,
|
||||||
|
Namespace: cluster.Namespace,
|
||||||
|
Role: role,
|
||||||
|
}
|
||||||
|
deploy, err := cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[role])
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
if !(util.MapContains(deploy.ObjectMeta.Annotations, inheritedAnnotations)) {
|
||||||
|
t.Errorf("%s: Deployment %v not inherited annotations %#v, got %#v", testName, deploy.ObjectMeta.Name, inheritedAnnotations, deploy.ObjectMeta.Annotations)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -22,7 +22,7 @@ import (
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewFakeKubernetesClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
func newFakeK8sPVCclient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||||
clientSet := fake.NewSimpleClientset()
|
clientSet := fake.NewSimpleClientset()
|
||||||
|
|
||||||
return k8sutil.KubernetesClient{
|
return k8sutil.KubernetesClient{
|
||||||
|
|
@ -34,7 +34,7 @@ func NewFakeKubernetesClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||||
|
|
||||||
func TestResizeVolumeClaim(t *testing.T) {
|
func TestResizeVolumeClaim(t *testing.T) {
|
||||||
testName := "test resizing of persistent volume claims"
|
testName := "test resizing of persistent volume claims"
|
||||||
client, _ := NewFakeKubernetesClient()
|
client, _ := newFakeK8sPVCclient()
|
||||||
clusterName := "acid-test-cluster"
|
clusterName := "acid-test-cluster"
|
||||||
namespace := "default"
|
namespace := "default"
|
||||||
newVolumeSize := "2Gi"
|
newVolumeSize := "2Gi"
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ import (
|
||||||
|
|
||||||
func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*acidv1.OperatorConfiguration, error) {
|
func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*acidv1.OperatorConfiguration, error) {
|
||||||
|
|
||||||
config, err := c.KubeClient.AcidV1ClientSet.AcidV1().OperatorConfigurations(configObjectNamespace).Get(
|
config, err := c.KubeClient.OperatorConfigurationsGetter.OperatorConfigurations(configObjectNamespace).Get(
|
||||||
context.TODO(), configObjectName, metav1.GetOptions{})
|
context.TODO(), configObjectName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not get operator configuration object %q: %v", configObjectName, err)
|
return nil, fmt.Errorf("could not get operator configuration object %q: %v", configObjectName, err)
|
||||||
|
|
@ -93,6 +93,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role")
|
result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role")
|
||||||
result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"})
|
result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"})
|
||||||
result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels
|
result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels
|
||||||
|
result.InheritedAnnotations = fromCRD.Kubernetes.InheritedAnnotations
|
||||||
result.DownscalerAnnotations = fromCRD.Kubernetes.DownscalerAnnotations
|
result.DownscalerAnnotations = fromCRD.Kubernetes.DownscalerAnnotations
|
||||||
result.ClusterNameLabel = util.Coalesce(fromCRD.Kubernetes.ClusterNameLabel, "cluster-name")
|
result.ClusterNameLabel = util.Coalesce(fromCRD.Kubernetes.ClusterNameLabel, "cluster-name")
|
||||||
result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey
|
result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ func (c *Controller) listClusters(options metav1.ListOptions) (*acidv1.Postgresq
|
||||||
var pgList acidv1.PostgresqlList
|
var pgList acidv1.PostgresqlList
|
||||||
|
|
||||||
// TODO: use the SharedInformer cache instead of quering Kubernetes API directly.
|
// TODO: use the SharedInformer cache instead of quering Kubernetes API directly.
|
||||||
list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(context.TODO(), options)
|
list, err := c.KubeClient.PostgresqlsGetter.Postgresqls(c.opConfig.WatchedNamespace).List(context.TODO(), options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Errorf("could not list postgresql objects: %v", err)
|
c.logger.Errorf("could not list postgresql objects: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -398,7 +398,7 @@ func (c *Controller) loadPostgresTeams() {
|
||||||
// reset team map
|
// reset team map
|
||||||
c.pgTeamMap = teams.PostgresTeamMap{}
|
c.pgTeamMap = teams.PostgresTeamMap{}
|
||||||
|
|
||||||
pgTeams, err := c.KubeClient.AcidV1ClientSet.AcidV1().PostgresTeams(c.opConfig.WatchedNamespace).List(context.TODO(), metav1.ListOptions{})
|
pgTeams, err := c.KubeClient.PostgresTeamsGetter.PostgresTeams(c.opConfig.WatchedNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Errorf("could not list postgres team objects: %v", err)
|
c.logger.Errorf("could not list postgres team objects: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ type Resources struct {
|
||||||
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
||||||
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
|
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
|
||||||
InheritedLabels []string `name:"inherited_labels" default:""`
|
InheritedLabels []string `name:"inherited_labels" default:""`
|
||||||
|
InheritedAnnotations []string `name:"inherited_annotations" default:""`
|
||||||
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
||||||
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
||||||
DeleteAnnotationDateKey string `name:"delete_annotation_date_key"`
|
DeleteAnnotationDateKey string `name:"delete_annotation_date_key"`
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,9 @@ import (
|
||||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||||
clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
|
clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
|
||||||
|
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
apiacidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
|
||||||
|
acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
apiappsv1 "k8s.io/api/apps/v1"
|
apiappsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
|
@ -19,6 +21,7 @@ import (
|
||||||
apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
|
apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||||
|
|
@ -27,9 +30,6 @@ import (
|
||||||
rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
|
rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
|
||||||
acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Int32ToPointer(value int32) *int32 {
|
func Int32ToPointer(value int32) *int32 {
|
||||||
|
|
@ -55,6 +55,9 @@ type KubernetesClient struct {
|
||||||
policyv1beta1.PodDisruptionBudgetsGetter
|
policyv1beta1.PodDisruptionBudgetsGetter
|
||||||
apiextv1.CustomResourceDefinitionsGetter
|
apiextv1.CustomResourceDefinitionsGetter
|
||||||
clientbatchv1beta1.CronJobsGetter
|
clientbatchv1beta1.CronJobsGetter
|
||||||
|
acidv1.OperatorConfigurationsGetter
|
||||||
|
acidv1.PostgresTeamsGetter
|
||||||
|
acidv1.PostgresqlsGetter
|
||||||
|
|
||||||
RESTClient rest.Interface
|
RESTClient rest.Interface
|
||||||
AcidV1ClientSet *acidv1client.Clientset
|
AcidV1ClientSet *acidv1client.Clientset
|
||||||
|
|
@ -154,15 +157,23 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1()
|
kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1()
|
||||||
|
|
||||||
kubeClient.AcidV1ClientSet = acidv1client.NewForConfigOrDie(cfg)
|
kubeClient.AcidV1ClientSet = acidv1client.NewForConfigOrDie(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return kubeClient, fmt.Errorf("could not create acid.zalan.do clientset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
kubeClient.OperatorConfigurationsGetter = kubeClient.AcidV1ClientSet.AcidV1()
|
||||||
|
kubeClient.PostgresTeamsGetter = kubeClient.AcidV1ClientSet.AcidV1()
|
||||||
|
kubeClient.PostgresqlsGetter = kubeClient.AcidV1ClientSet.AcidV1()
|
||||||
|
|
||||||
return kubeClient, nil
|
return kubeClient, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPostgresCRDStatus of Postgres cluster
|
// SetPostgresCRDStatus of Postgres cluster
|
||||||
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*acidv1.Postgresql, error) {
|
func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*apiacidv1.Postgresql, error) {
|
||||||
var pg *acidv1.Postgresql
|
var pg *apiacidv1.Postgresql
|
||||||
var pgStatus acidv1.PostgresStatus
|
var pgStatus apiacidv1.PostgresStatus
|
||||||
pgStatus.PostgresClusterStatus = status
|
pgStatus.PostgresClusterStatus = status
|
||||||
|
|
||||||
patch, err := json.Marshal(struct {
|
patch, err := json.Marshal(struct {
|
||||||
|
|
@ -176,7 +187,7 @@ func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.Namespaced
|
||||||
// we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ),
|
// we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ),
|
||||||
// however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11)
|
// however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11)
|
||||||
// we should take advantage of it.
|
// we should take advantage of it.
|
||||||
pg, err = client.AcidV1ClientSet.AcidV1().Postgresqls(clusterName.Namespace).Patch(
|
pg, err = client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Patch(
|
||||||
context.TODO(), clusterName.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
|
context.TODO(), clusterName.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pg, fmt.Errorf("could not update status: %v", err)
|
return pg, fmt.Errorf("could not update status: %v", err)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue