sync all resources to cluster fields (#2713)
* sync all resources to cluster fields (CronJob, Streams, Patroni resources) * separated sync and delete logic for Patroni resources * align delete streams and secrets logic with other resources * rename gatherApplicationIds to getDistinctApplicationIds * improve slot check before syncing streams CRD * add ownerReferences and annotations diff to Patroni objects * add extra sync code for config service so it does not get too ugly * some bugfixes when comparing annotations and return err on found * sync Patroni resources on update event and extended unit tests * add config service/endpoint owner references check to e2e tes
This commit is contained in:
parent
31f92a1aa0
commit
25ccc87317
|
|
@ -252,17 +252,16 @@ will differ and trigger a rolling update of the pods.
|
||||||
## Owner References and Finalizers
|
## Owner References and Finalizers
|
||||||
|
|
||||||
The Postgres Operator can set [owner references](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/) to most of a cluster's child resources to improve
|
The Postgres Operator can set [owner references](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/) to most of a cluster's child resources to improve
|
||||||
monitoring with GitOps tools and enable cascading deletes. There are three
|
monitoring with GitOps tools and enable cascading deletes. There are two
|
||||||
exceptions:
|
exceptions:
|
||||||
|
|
||||||
* Persistent Volume Claims, because they are handled by the [PV Reclaim Policy]https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/ of the Stateful Set
|
* Persistent Volume Claims, because they are handled by the [PV Reclaim Policy]https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/ of the Stateful Set
|
||||||
* The config endpoint + headless service resource because it is managed by Patroni
|
|
||||||
* Cross-namespace secrets, because owner references are not allowed across namespaces by design
|
* Cross-namespace secrets, because owner references are not allowed across namespaces by design
|
||||||
|
|
||||||
The operator would clean these resources up with its regular delete loop
|
The operator would clean these resources up with its regular delete loop
|
||||||
unless they got synced correctly. If for some reason the initial cluster sync
|
unless they got synced correctly. If for some reason the initial cluster sync
|
||||||
fails, e.g. after a cluster creation or operator restart, a deletion of the
|
fails, e.g. after a cluster creation or operator restart, a deletion of the
|
||||||
cluster manifest would leave orphaned resources behind which the user has to
|
cluster manifest might leave orphaned resources behind which the user has to
|
||||||
clean up manually.
|
clean up manually.
|
||||||
|
|
||||||
Another option is to enable finalizers which first ensures the deletion of all
|
Another option is to enable finalizers which first ensures the deletion of all
|
||||||
|
|
|
||||||
|
|
@ -402,8 +402,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"max_connections": new_max_connections_value,
|
"max_connections": new_max_connections_value,
|
||||||
"wal_level": "logical"
|
"wal_level": "logical"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"patroni": {
|
"patroni": {
|
||||||
"slots": {
|
"slots": {
|
||||||
"first_slot": {
|
"first_slot": {
|
||||||
"type": "physical"
|
"type": "physical"
|
||||||
|
|
@ -414,7 +414,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"retry_timeout": 9,
|
"retry_timeout": 9,
|
||||||
"synchronous_mode": True,
|
"synchronous_mode": True,
|
||||||
"failsafe_mode": True,
|
"failsafe_mode": True,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -517,7 +517,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
pg_add_new_slots_patch = {
|
pg_add_new_slots_patch = {
|
||||||
"spec": {
|
"spec": {
|
||||||
"patroni": {
|
"patroni": {
|
||||||
"slots": {
|
"slots": {
|
||||||
"test_slot": {
|
"test_slot": {
|
||||||
"type": "logical",
|
"type": "logical",
|
||||||
"database": "foo",
|
"database": "foo",
|
||||||
|
|
@ -1667,19 +1667,18 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", self.test_namespace, "postgresqls", cluster_name)
|
"acid.zalan.do", "v1", self.test_namespace, "postgresqls", cluster_name)
|
||||||
|
|
||||||
# statefulset, pod disruption budget and secrets should be deleted via owner reference
|
# child resources with owner references should be deleted via owner references
|
||||||
self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_label), 0, "Pods not deleted")
|
self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_label), 0, "Pods not deleted")
|
||||||
self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted")
|
self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_services_with_label(cluster_label), 0, "Services not deleted")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_endpoints_with_label(cluster_label), 0, "Endpoints not deleted")
|
||||||
self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted")
|
self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted")
|
||||||
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets were not deleted")
|
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets were not deleted")
|
||||||
|
|
||||||
time.sleep(5) # wait for the operator to also delete the leftovers
|
time.sleep(5) # wait for the operator to also delete the PVCs
|
||||||
|
|
||||||
# pvcs and Patroni config service/endpoint should not be affected by owner reference
|
# pvcs do not have an owner reference but will deleted by the operator almost immediately
|
||||||
# but deleted by the operator almost immediately
|
|
||||||
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 0, "PVCs not deleted")
|
self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 0, "PVCs not deleted")
|
||||||
self.eventuallyEqual(lambda: k8s.count_services_with_label(cluster_label), 0, "Patroni config service not deleted")
|
|
||||||
self.eventuallyEqual(lambda: k8s.count_endpoints_with_label(cluster_label), 0, "Patroni config endpoint not deleted")
|
|
||||||
|
|
||||||
# disable owner references in config
|
# disable owner references in config
|
||||||
disable_owner_refs = {
|
disable_owner_refs = {
|
||||||
|
|
@ -2143,13 +2142,13 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# update the manifest with the streams section
|
# update the manifest with the streams section
|
||||||
patch_streaming_config = {
|
patch_streaming_config = {
|
||||||
"spec": {
|
"spec": {
|
||||||
"patroni": {
|
"patroni": {
|
||||||
"slots": {
|
"slots": {
|
||||||
"manual_slot": {
|
"manual_slot": {
|
||||||
"type": "physical"
|
"type": "physical"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"streams": [
|
"streams": [
|
||||||
{
|
{
|
||||||
"applicationId": "test-app",
|
"applicationId": "test-app",
|
||||||
|
|
@ -2481,11 +2480,15 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
self.assertTrue(self.has_postgresql_owner_reference(svc.metadata.owner_references, inverse), "primary service owner reference check failed")
|
self.assertTrue(self.has_postgresql_owner_reference(svc.metadata.owner_references, inverse), "primary service owner reference check failed")
|
||||||
replica_svc = k8s.api.core_v1.read_namespaced_service(cluster_name + "-repl", cluster_namespace)
|
replica_svc = k8s.api.core_v1.read_namespaced_service(cluster_name + "-repl", cluster_namespace)
|
||||||
self.assertTrue(self.has_postgresql_owner_reference(replica_svc.metadata.owner_references, inverse), "replica service owner reference check failed")
|
self.assertTrue(self.has_postgresql_owner_reference(replica_svc.metadata.owner_references, inverse), "replica service owner reference check failed")
|
||||||
|
config_svc = k8s.api.core_v1.read_namespaced_service(cluster_name + "-config", cluster_namespace)
|
||||||
|
self.assertTrue(self.has_postgresql_owner_reference(config_svc.metadata.owner_references, inverse), "config service owner reference check failed")
|
||||||
|
|
||||||
ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name, cluster_namespace)
|
ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name, cluster_namespace)
|
||||||
self.assertTrue(self.has_postgresql_owner_reference(ep.metadata.owner_references, inverse), "primary endpoint owner reference check failed")
|
self.assertTrue(self.has_postgresql_owner_reference(ep.metadata.owner_references, inverse), "primary endpoint owner reference check failed")
|
||||||
replica_ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name + "-repl", cluster_namespace)
|
replica_ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name + "-repl", cluster_namespace)
|
||||||
self.assertTrue(self.has_postgresql_owner_reference(replica_ep.metadata.owner_references, inverse), "replica owner reference check failed")
|
self.assertTrue(self.has_postgresql_owner_reference(replica_ep.metadata.owner_references, inverse), "replica endpoint owner reference check failed")
|
||||||
|
config_ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name + "-config", cluster_namespace)
|
||||||
|
self.assertTrue(self.has_postgresql_owner_reference(config_ep.metadata.owner_references, inverse), "config endpoint owner reference check failed")
|
||||||
|
|
||||||
pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-pdb".format(cluster_name), cluster_namespace)
|
pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-pdb".format(cluster_name), cluster_namespace)
|
||||||
self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption owner reference check failed")
|
self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption owner reference check failed")
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ package cluster
|
||||||
// Postgres CustomResourceDefinition object i.e. Spilo
|
// Postgres CustomResourceDefinition object i.e. Spilo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
@ -15,6 +14,7 @@ import (
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
zalandov1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1"
|
||||||
|
|
||||||
"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
"github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme"
|
||||||
"github.com/zalando/postgres-operator/pkg/spec"
|
"github.com/zalando/postgres-operator/pkg/spec"
|
||||||
|
|
@ -30,7 +30,6 @@ import (
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apipolicyv1 "k8s.io/api/policy/v1"
|
|
||||||
policyv1 "k8s.io/api/policy/v1"
|
policyv1 "k8s.io/api/policy/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
@ -62,9 +61,13 @@ type Config struct {
|
||||||
type kubeResources struct {
|
type kubeResources struct {
|
||||||
Services map[PostgresRole]*v1.Service
|
Services map[PostgresRole]*v1.Service
|
||||||
Endpoints map[PostgresRole]*v1.Endpoints
|
Endpoints map[PostgresRole]*v1.Endpoints
|
||||||
|
PatroniEndpoints map[string]*v1.Endpoints
|
||||||
|
PatroniConfigMaps map[string]*v1.ConfigMap
|
||||||
Secrets map[types.UID]*v1.Secret
|
Secrets map[types.UID]*v1.Secret
|
||||||
Statefulset *appsv1.StatefulSet
|
Statefulset *appsv1.StatefulSet
|
||||||
PodDisruptionBudget *policyv1.PodDisruptionBudget
|
PodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||||
|
LogicalBackupJob *batchv1.CronJob
|
||||||
|
Streams map[string]*zalandov1.FabricEventStream
|
||||||
//Pods are treated separately
|
//Pods are treated separately
|
||||||
//PVCs are treated separately
|
//PVCs are treated separately
|
||||||
}
|
}
|
||||||
|
|
@ -132,9 +135,12 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
||||||
systemUsers: make(map[string]spec.PgUser),
|
systemUsers: make(map[string]spec.PgUser),
|
||||||
podSubscribers: make(map[spec.NamespacedName]chan PodEvent),
|
podSubscribers: make(map[spec.NamespacedName]chan PodEvent),
|
||||||
kubeResources: kubeResources{
|
kubeResources: kubeResources{
|
||||||
Secrets: make(map[types.UID]*v1.Secret),
|
Secrets: make(map[types.UID]*v1.Secret),
|
||||||
Services: make(map[PostgresRole]*v1.Service),
|
Services: make(map[PostgresRole]*v1.Service),
|
||||||
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
Endpoints: make(map[PostgresRole]*v1.Endpoints),
|
||||||
|
PatroniEndpoints: make(map[string]*v1.Endpoints),
|
||||||
|
PatroniConfigMaps: make(map[string]*v1.ConfigMap),
|
||||||
|
Streams: make(map[string]*zalandov1.FabricEventStream)},
|
||||||
userSyncStrategy: users.DefaultUserSyncStrategy{
|
userSyncStrategy: users.DefaultUserSyncStrategy{
|
||||||
PasswordEncryption: passwordEncryption,
|
PasswordEncryption: passwordEncryption,
|
||||||
RoleDeletionSuffix: cfg.OpConfig.RoleDeletionSuffix,
|
RoleDeletionSuffix: cfg.OpConfig.RoleDeletionSuffix,
|
||||||
|
|
@ -357,6 +363,11 @@ func (c *Cluster) Create() (err error) {
|
||||||
c.logger.Infof("pods are ready")
|
c.logger.Infof("pods are ready")
|
||||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready")
|
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready")
|
||||||
|
|
||||||
|
// sync resources created by Patroni
|
||||||
|
if err = c.syncPatroniResources(); err != nil {
|
||||||
|
c.logger.Warnf("Patroni resources not yet synced: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// create database objects unless we are running without pods or disabled
|
// create database objects unless we are running without pods or disabled
|
||||||
// that feature explicitly
|
// that feature explicitly
|
||||||
if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
|
if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
|
||||||
|
|
@ -382,10 +393,6 @@ func (c *Cluster) Create() (err error) {
|
||||||
c.logger.Info("a k8s cron job for logical backup has been successfully created")
|
c.logger.Info("a k8s cron job for logical backup has been successfully created")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.listResources(); err != nil {
|
|
||||||
c.logger.Errorf("could not list resources: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create connection pooler deployment and services if necessary. Since we
|
// Create connection pooler deployment and services if necessary. Since we
|
||||||
// need to perform some operations with the database itself (e.g. install
|
// need to perform some operations with the database itself (e.g. install
|
||||||
// lookup function), do it as the last step, when everything is available.
|
// lookup function), do it as the last step, when everything is available.
|
||||||
|
|
@ -410,6 +417,10 @@ func (c *Cluster) Create() (err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := c.listResources(); err != nil {
|
||||||
|
c.logger.Errorf("could not list resources: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -856,7 +867,7 @@ func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) (match bool
|
||||||
return true, ""
|
return true, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) comparePodDisruptionBudget(cur, new *apipolicyv1.PodDisruptionBudget) (bool, string) {
|
func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBudget) (bool, string) {
|
||||||
//TODO: improve comparison
|
//TODO: improve comparison
|
||||||
if !reflect.DeepEqual(new.Spec, cur.Spec) {
|
if !reflect.DeepEqual(new.Spec, cur.Spec) {
|
||||||
return false, "new PDB's spec does not match the current one"
|
return false, "new PDB's spec does not match the current one"
|
||||||
|
|
@ -977,6 +988,12 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
updateFailed = true
|
updateFailed = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Patroni service and endpoints / config maps
|
||||||
|
if err := c.syncPatroniResources(); err != nil {
|
||||||
|
c.logger.Errorf("could not sync services: %v", err)
|
||||||
|
updateFailed = true
|
||||||
|
}
|
||||||
|
|
||||||
// Users
|
// Users
|
||||||
func() {
|
func() {
|
||||||
// check if users need to be synced during update
|
// check if users need to be synced during update
|
||||||
|
|
@ -1191,7 +1208,6 @@ func (c *Cluster) Delete() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
|
|
||||||
if !c.patroniKubernetesUseConfigMaps() {
|
if !c.patroniKubernetesUseConfigMaps() {
|
||||||
if err := c.deleteEndpoint(role); err != nil {
|
if err := c.deleteEndpoint(role); err != nil {
|
||||||
anyErrors = true
|
anyErrors = true
|
||||||
|
|
@ -1207,10 +1223,10 @@ func (c *Cluster) Delete() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.deletePatroniClusterObjects(); err != nil {
|
if err := c.deletePatroniResources(); err != nil {
|
||||||
anyErrors = true
|
anyErrors = true
|
||||||
c.logger.Warningf("could not remove leftover patroni objects; %v", err)
|
c.logger.Warningf("could not delete all Patroni resources: %v", err)
|
||||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not remove leftover patroni objects; %v", err)
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete all Patroni resources: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete connection pooler objects anyway, even if it's not mentioned in the
|
// Delete connection pooler objects anyway, even if it's not mentioned in the
|
||||||
|
|
@ -1742,96 +1758,3 @@ func (c *Cluster) Lock() {
|
||||||
func (c *Cluster) Unlock() {
|
func (c *Cluster) Unlock() {
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
type simpleActionWithResult func()
|
|
||||||
|
|
||||||
type clusterObjectGet func(name string) (spec.NamespacedName, error)
|
|
||||||
|
|
||||||
type clusterObjectDelete func(name string) error
|
|
||||||
|
|
||||||
func (c *Cluster) deletePatroniClusterObjects() error {
|
|
||||||
// TODO: figure out how to remove leftover patroni objects in other cases
|
|
||||||
var actionsList []simpleActionWithResult
|
|
||||||
|
|
||||||
if !c.patroniUsesKubernetes() {
|
|
||||||
c.logger.Infof("not cleaning up Etcd Patroni objects on cluster delete")
|
|
||||||
}
|
|
||||||
|
|
||||||
actionsList = append(actionsList, c.deletePatroniClusterServices)
|
|
||||||
if c.patroniKubernetesUseConfigMaps() {
|
|
||||||
actionsList = append(actionsList, c.deletePatroniClusterConfigMaps)
|
|
||||||
} else {
|
|
||||||
actionsList = append(actionsList, c.deletePatroniClusterEndpoints)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.logger.Debugf("removing leftover Patroni objects (endpoints / services and configmaps)")
|
|
||||||
for _, deleter := range actionsList {
|
|
||||||
deleter()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteClusterObject(
|
|
||||||
get clusterObjectGet,
|
|
||||||
del clusterObjectDelete,
|
|
||||||
objType string,
|
|
||||||
clusterName string,
|
|
||||||
logger *logrus.Entry) {
|
|
||||||
for _, suffix := range patroniObjectSuffixes {
|
|
||||||
name := fmt.Sprintf("%s-%s", clusterName, suffix)
|
|
||||||
|
|
||||||
namespacedName, err := get(name)
|
|
||||||
if err == nil {
|
|
||||||
logger.Debugf("deleting %s %q",
|
|
||||||
objType, namespacedName)
|
|
||||||
|
|
||||||
if err = del(name); err != nil {
|
|
||||||
logger.Warningf("could not delete %s %q: %v",
|
|
||||||
objType, namespacedName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if !k8sutil.ResourceNotFound(err) {
|
|
||||||
logger.Warningf("could not fetch %s %q: %v",
|
|
||||||
objType, namespacedName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) deletePatroniClusterServices() {
|
|
||||||
get := func(name string) (spec.NamespacedName, error) {
|
|
||||||
svc, err := c.KubeClient.Services(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
|
||||||
return util.NameFromMeta(svc.ObjectMeta), err
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteServiceFn := func(name string) error {
|
|
||||||
return c.KubeClient.Services(c.Namespace).Delete(context.TODO(), name, c.deleteOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteClusterObject(get, deleteServiceFn, "service", c.Name, c.logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) deletePatroniClusterEndpoints() {
|
|
||||||
get := func(name string) (spec.NamespacedName, error) {
|
|
||||||
ep, err := c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
|
||||||
return util.NameFromMeta(ep.ObjectMeta), err
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteEndpointFn := func(name string) error {
|
|
||||||
return c.KubeClient.Endpoints(c.Namespace).Delete(context.TODO(), name, c.deleteOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteClusterObject(get, deleteEndpointFn, "endpoint", c.Name, c.logger)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) deletePatroniClusterConfigMaps() {
|
|
||||||
get := func(name string) (spec.NamespacedName, error) {
|
|
||||||
cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
|
||||||
return util.NameFromMeta(cm.ObjectMeta), err
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteConfigMapFn := func(name string) error {
|
|
||||||
return c.KubeClient.ConfigMaps(c.Namespace).Delete(context.TODO(), name, c.deleteOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteClusterObject(get, deleteConfigMapFn, "configmap", c.Name, c.logger)
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -655,7 +655,7 @@ func (c *Cluster) deleteConnectionPoolerSecret() (err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Debugf("could not get connection pooler secret %s: %v", secretName, err)
|
c.logger.Debugf("could not get connection pooler secret %s: %v", secretName, err)
|
||||||
} else {
|
} else {
|
||||||
if err = c.deleteSecret(secret.UID, *secret); err != nil {
|
if err = c.deleteSecret(secret.UID); err != nil {
|
||||||
return fmt.Errorf("could not delete pooler secret: %v", err)
|
return fmt.Errorf("could not delete pooler secret: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -79,19 +79,13 @@ func (c *Cluster) statefulSetName() string {
|
||||||
return c.Name
|
return c.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) endpointName(role PostgresRole) string {
|
|
||||||
name := c.Name
|
|
||||||
if role == Replica {
|
|
||||||
name = fmt.Sprintf("%s-%s", name, "repl")
|
|
||||||
}
|
|
||||||
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) serviceName(role PostgresRole) string {
|
func (c *Cluster) serviceName(role PostgresRole) string {
|
||||||
name := c.Name
|
name := c.Name
|
||||||
if role == Replica {
|
switch role {
|
||||||
|
case Replica:
|
||||||
name = fmt.Sprintf("%s-%s", name, "repl")
|
name = fmt.Sprintf("%s-%s", name, "repl")
|
||||||
|
case Patroni:
|
||||||
|
name = fmt.Sprintf("%s-%s", name, "config")
|
||||||
}
|
}
|
||||||
|
|
||||||
return name
|
return name
|
||||||
|
|
@ -2072,7 +2066,7 @@ func (c *Cluster) getCustomServiceAnnotations(role PostgresRole, spec *acidv1.Po
|
||||||
func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints {
|
func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints {
|
||||||
endpoints := &v1.Endpoints{
|
endpoints := &v1.Endpoints{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.endpointName(role),
|
Name: c.serviceName(role),
|
||||||
Namespace: c.Namespace,
|
Namespace: c.Namespace,
|
||||||
Annotations: c.annotationsSet(nil),
|
Annotations: c.annotationsSet(nil),
|
||||||
Labels: c.roleLabelsSet(true, role),
|
Labels: c.roleLabelsSet(true, role),
|
||||||
|
|
|
||||||
|
|
@ -31,20 +31,36 @@ func (c *Cluster) listResources() error {
|
||||||
c.logger.Infof("found statefulset: %q (uid: %q)", util.NameFromMeta(c.Statefulset.ObjectMeta), c.Statefulset.UID)
|
c.logger.Infof("found statefulset: %q (uid: %q)", util.NameFromMeta(c.Statefulset.ObjectMeta), c.Statefulset.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, obj := range c.Secrets {
|
for appId, stream := range c.Streams {
|
||||||
c.logger.Infof("found secret: %q (uid: %q) namesapce: %s", util.NameFromMeta(obj.ObjectMeta), obj.UID, obj.ObjectMeta.Namespace)
|
c.logger.Infof("found stream: %q with application id %q (uid: %q)", util.NameFromMeta(stream.ObjectMeta), appId, stream.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !c.patroniKubernetesUseConfigMaps() {
|
if c.LogicalBackupJob != nil {
|
||||||
for role, endpoint := range c.Endpoints {
|
c.logger.Infof("found logical backup job: %q (uid: %q)", util.NameFromMeta(c.LogicalBackupJob.ObjectMeta), c.LogicalBackupJob.UID)
|
||||||
c.logger.Infof("found %s endpoint: %q (uid: %q)", role, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID)
|
}
|
||||||
}
|
|
||||||
|
for _, secret := range c.Secrets {
|
||||||
|
c.logger.Infof("found secret: %q (uid: %q) namespace: %s", util.NameFromMeta(secret.ObjectMeta), secret.UID, secret.ObjectMeta.Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
for role, service := range c.Services {
|
for role, service := range c.Services {
|
||||||
c.logger.Infof("found %s service: %q (uid: %q)", role, util.NameFromMeta(service.ObjectMeta), service.UID)
|
c.logger.Infof("found %s service: %q (uid: %q)", role, util.NameFromMeta(service.ObjectMeta), service.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for role, endpoint := range c.Endpoints {
|
||||||
|
c.logger.Infof("found %s endpoint: %q (uid: %q)", role, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.patroniKubernetesUseConfigMaps() {
|
||||||
|
for suffix, configmap := range c.PatroniConfigMaps {
|
||||||
|
c.logger.Infof("found %s Patroni config map: %q (uid: %q)", suffix, util.NameFromMeta(configmap.ObjectMeta), configmap.UID)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for suffix, endpoint := range c.PatroniEndpoints {
|
||||||
|
c.logger.Infof("found %s Patroni endpoint: %q (uid: %q)", suffix, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pods, err := c.listPods()
|
pods, err := c.listPods()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not get the list of pods: %v", err)
|
return fmt.Errorf("could not get the list of pods: %v", err)
|
||||||
|
|
@ -63,6 +79,15 @@ func (c *Cluster) listResources() error {
|
||||||
c.logger.Infof("found PVC: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
|
c.logger.Infof("found PVC: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for role, poolerObjs := range c.ConnectionPooler {
|
||||||
|
if poolerObjs.Deployment != nil {
|
||||||
|
c.logger.Infof("found %s pooler deployment: %q (uid: %q) ", role, util.NameFromMeta(poolerObjs.Deployment.ObjectMeta), poolerObjs.Deployment.UID)
|
||||||
|
}
|
||||||
|
if poolerObjs.Service != nil {
|
||||||
|
c.logger.Infof("found %s pooler service: %q (uid: %q) ", role, util.NameFromMeta(poolerObjs.Service.ObjectMeta), poolerObjs.Service.UID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -332,11 +357,10 @@ func (c *Cluster) deleteService(role PostgresRole) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.KubeClient.Services(c.Services[role].Namespace).Delete(context.TODO(), c.Services[role].Name, c.deleteOptions); err != nil {
|
if err := c.KubeClient.Services(c.Services[role].Namespace).Delete(context.TODO(), c.Services[role].Name, c.deleteOptions); err != nil {
|
||||||
if k8sutil.ResourceNotFound(err) {
|
if !k8sutil.ResourceNotFound(err) {
|
||||||
c.logger.Debugf("%s service has already been deleted", role)
|
return fmt.Errorf("could not delete %s service: %v", role, err)
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
c.logger.Debugf("%s service has already been deleted", role)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(c.Services[role].ObjectMeta))
|
c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(c.Services[role].ObjectMeta))
|
||||||
|
|
@ -478,11 +502,10 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil {
|
if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil {
|
||||||
if k8sutil.ResourceNotFound(err) {
|
if !k8sutil.ResourceNotFound(err) {
|
||||||
c.logger.Debugf("%s endpoint has already been deleted", role)
|
return fmt.Errorf("could not delete %s endpoint: %v", role, err)
|
||||||
} else if err != nil {
|
|
||||||
return fmt.Errorf("could not delete endpoint: %v", err)
|
|
||||||
}
|
}
|
||||||
|
c.logger.Debugf("%s endpoint has already been deleted", role)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Infof("%s endpoint %q has been deleted", role, util.NameFromMeta(c.Endpoints[role].ObjectMeta))
|
c.logger.Infof("%s endpoint %q has been deleted", role, util.NameFromMeta(c.Endpoints[role].ObjectMeta))
|
||||||
|
|
@ -491,12 +514,83 @@ func (c *Cluster) deleteEndpoint(role PostgresRole) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) deletePatroniResources() error {
|
||||||
|
c.setProcessName("deleting Patroni resources")
|
||||||
|
errors := make([]string, 0)
|
||||||
|
|
||||||
|
if err := c.deleteService(Patroni); err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("%v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, suffix := range patroniObjectSuffixes {
|
||||||
|
if c.patroniKubernetesUseConfigMaps() {
|
||||||
|
if err := c.deletePatroniConfigMap(suffix); err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("%v", err))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := c.deletePatroniEndpoint(suffix); err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("%v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
return fmt.Errorf("%v", strings.Join(errors, `', '`))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) deletePatroniConfigMap(suffix string) error {
|
||||||
|
c.setProcessName("deleting Patroni config map")
|
||||||
|
c.logger.Debugln("deleting Patroni config map")
|
||||||
|
cm := c.PatroniConfigMaps[suffix]
|
||||||
|
if cm == nil {
|
||||||
|
c.logger.Debugf("there is no %s Patroni config map in the cluster", suffix)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.KubeClient.ConfigMaps(cm.Namespace).Delete(context.TODO(), cm.Name, c.deleteOptions); err != nil {
|
||||||
|
if !k8sutil.ResourceNotFound(err) {
|
||||||
|
return fmt.Errorf("could not delete %s Patroni config map %q: %v", suffix, cm.Name, err)
|
||||||
|
}
|
||||||
|
c.logger.Debugf("%s Patroni config map has already been deleted", suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Infof("%s Patroni config map %q has been deleted", suffix, util.NameFromMeta(cm.ObjectMeta))
|
||||||
|
delete(c.PatroniConfigMaps, suffix)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) deletePatroniEndpoint(suffix string) error {
|
||||||
|
c.setProcessName("deleting Patroni endpoint")
|
||||||
|
c.logger.Debugln("deleting Patroni endpoint")
|
||||||
|
ep := c.PatroniEndpoints[suffix]
|
||||||
|
if ep == nil {
|
||||||
|
c.logger.Debugf("there is no %s Patroni endpoint in the cluster", suffix)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.KubeClient.Endpoints(ep.Namespace).Delete(context.TODO(), ep.Name, c.deleteOptions); err != nil {
|
||||||
|
if !k8sutil.ResourceNotFound(err) {
|
||||||
|
return fmt.Errorf("could not delete %s Patroni endpoint %q: %v", suffix, ep.Name, err)
|
||||||
|
}
|
||||||
|
c.logger.Debugf("%s Patroni endpoint has already been deleted", suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Infof("%s Patroni endpoint %q has been deleted", suffix, util.NameFromMeta(ep.ObjectMeta))
|
||||||
|
delete(c.PatroniEndpoints, suffix)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) deleteSecrets() error {
|
func (c *Cluster) deleteSecrets() error {
|
||||||
c.setProcessName("deleting secrets")
|
c.setProcessName("deleting secrets")
|
||||||
errors := make([]string, 0)
|
errors := make([]string, 0)
|
||||||
|
|
||||||
for uid, secret := range c.Secrets {
|
for uid := range c.Secrets {
|
||||||
err := c.deleteSecret(uid, *secret)
|
err := c.deleteSecret(uid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, fmt.Sprintf("%v", err))
|
errors = append(errors, fmt.Sprintf("%v", err))
|
||||||
}
|
}
|
||||||
|
|
@ -509,8 +603,9 @@ func (c *Cluster) deleteSecrets() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) deleteSecret(uid types.UID, secret v1.Secret) error {
|
func (c *Cluster) deleteSecret(uid types.UID) error {
|
||||||
c.setProcessName("deleting secret")
|
c.setProcessName("deleting secret")
|
||||||
|
secret := c.Secrets[uid]
|
||||||
secretName := util.NameFromMeta(secret.ObjectMeta)
|
secretName := util.NameFromMeta(secret.ObjectMeta)
|
||||||
c.logger.Debugf("deleting secret %q", secretName)
|
c.logger.Debugf("deleting secret %q", secretName)
|
||||||
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions)
|
||||||
|
|
@ -539,10 +634,11 @@ func (c *Cluster) createLogicalBackupJob() (err error) {
|
||||||
return fmt.Errorf("could not generate k8s cron job spec: %v", err)
|
return fmt.Errorf("could not generate k8s cron job spec: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(context.TODO(), logicalBackupJobSpec, metav1.CreateOptions{})
|
cronJob, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(context.TODO(), logicalBackupJobSpec, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not create k8s cron job: %v", err)
|
return fmt.Errorf("could not create k8s cron job: %v", err)
|
||||||
}
|
}
|
||||||
|
c.LogicalBackupJob = cronJob
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -556,7 +652,7 @@ func (c *Cluster) patchLogicalBackupJob(newJob *batchv1.CronJob) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// update the backup job spec
|
// update the backup job spec
|
||||||
_, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Patch(
|
cronJob, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Patch(
|
||||||
context.TODO(),
|
context.TODO(),
|
||||||
c.getLogicalBackupJobName(),
|
c.getLogicalBackupJobName(),
|
||||||
types.MergePatchType,
|
types.MergePatchType,
|
||||||
|
|
@ -566,20 +662,24 @@ func (c *Cluster) patchLogicalBackupJob(newJob *batchv1.CronJob) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not patch logical backup job: %v", err)
|
return fmt.Errorf("could not patch logical backup job: %v", err)
|
||||||
}
|
}
|
||||||
|
c.LogicalBackupJob = cronJob
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) deleteLogicalBackupJob() error {
|
func (c *Cluster) deleteLogicalBackupJob() error {
|
||||||
|
if c.LogicalBackupJob == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
c.logger.Info("removing the logical backup job")
|
c.logger.Info("removing the logical backup job")
|
||||||
|
|
||||||
err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions)
|
err := c.KubeClient.CronJobsGetter.CronJobs(c.LogicalBackupJob.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions)
|
||||||
if k8sutil.ResourceNotFound(err) {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
c.logger.Debugf("logical backup cron job %q has already been deleted", c.getLogicalBackupJobName())
|
c.logger.Debugf("logical backup cron job %q has already been deleted", c.getLogicalBackupJobName())
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
c.LogicalBackupJob = nil
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -29,51 +29,46 @@ func (c *Cluster) createStreams(appId string) (*zalandov1.FabricEventStream, err
|
||||||
return streamCRD, nil
|
return streamCRD, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) error {
|
func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) (patchedStream *zalandov1.FabricEventStream, err error) {
|
||||||
c.setProcessName("updating event streams")
|
c.setProcessName("updating event streams")
|
||||||
|
|
||||||
patch, err := json.Marshal(newEventStreams)
|
patch, err := json.Marshal(newEventStreams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not marshal new event stream CRD %q: %v", newEventStreams.Name, err)
|
return nil, fmt.Errorf("could not marshal new event stream CRD %q: %v", newEventStreams.Name, err)
|
||||||
}
|
}
|
||||||
if _, err := c.KubeClient.FabricEventStreams(newEventStreams.Namespace).Patch(
|
if patchedStream, err = c.KubeClient.FabricEventStreams(newEventStreams.Namespace).Patch(
|
||||||
context.TODO(), newEventStreams.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
context.TODO(), newEventStreams.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return patchedStream, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) deleteStream(stream *zalandov1.FabricEventStream) error {
|
func (c *Cluster) deleteStream(appId string) error {
|
||||||
c.setProcessName("deleting event stream")
|
c.setProcessName("deleting event stream")
|
||||||
|
|
||||||
err := c.KubeClient.FabricEventStreams(stream.Namespace).Delete(context.TODO(), stream.Name, metav1.DeleteOptions{})
|
err := c.KubeClient.FabricEventStreams(c.Streams[appId].Namespace).Delete(context.TODO(), c.Streams[appId].Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not delete event stream %q: %v", stream.Name, err)
|
return fmt.Errorf("could not delete event stream %q with applicationId %s: %v", c.Streams[appId].Name, appId, err)
|
||||||
}
|
}
|
||||||
|
delete(c.Streams, appId)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) deleteStreams() error {
|
func (c *Cluster) deleteStreams() error {
|
||||||
c.setProcessName("deleting event streams")
|
|
||||||
|
|
||||||
// check if stream CRD is installed before trying a delete
|
// check if stream CRD is installed before trying a delete
|
||||||
_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{})
|
_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{})
|
||||||
if k8sutil.ResourceNotFound(err) {
|
if k8sutil.ResourceNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
c.setProcessName("deleting event streams")
|
||||||
errors := make([]string, 0)
|
errors := make([]string, 0)
|
||||||
listOptions := metav1.ListOptions{
|
|
||||||
LabelSelector: c.labelsSet(true).String(),
|
for appId := range c.Streams {
|
||||||
}
|
err := c.deleteStream(appId)
|
||||||
streams, err := c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not list of FabricEventStreams: %v", err)
|
|
||||||
}
|
|
||||||
for _, stream := range streams.Items {
|
|
||||||
err := c.deleteStream(&stream)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = append(errors, fmt.Sprintf("could not delete event stream %q: %v", stream.Name, err))
|
errors = append(errors, fmt.Sprintf("%v", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -84,7 +79,7 @@ func (c *Cluster) deleteStreams() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func gatherApplicationIds(streams []acidv1.Stream) []string {
|
func getDistinctApplicationIds(streams []acidv1.Stream) []string {
|
||||||
appIds := make([]string, 0)
|
appIds := make([]string, 0)
|
||||||
for _, stream := range streams {
|
for _, stream := range streams {
|
||||||
if !util.SliceContains(appIds, stream.ApplicationId) {
|
if !util.SliceContains(appIds, stream.ApplicationId) {
|
||||||
|
|
@ -137,7 +132,7 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if there is any deletion
|
// check if there is any deletion
|
||||||
for slotName, _ := range currentPublications {
|
for slotName := range currentPublications {
|
||||||
if _, exists := databaseSlotsList[slotName]; !exists {
|
if _, exists := databaseSlotsList[slotName]; !exists {
|
||||||
deletePublications = append(deletePublications, slotName)
|
deletePublications = append(deletePublications, slotName)
|
||||||
}
|
}
|
||||||
|
|
@ -334,13 +329,13 @@ func (c *Cluster) syncStreams() error {
|
||||||
return fmt.Errorf("could not get list of databases: %v", err)
|
return fmt.Errorf("could not get list of databases: %v", err)
|
||||||
}
|
}
|
||||||
// get database name with empty list of slot, except template0 and template1
|
// get database name with empty list of slot, except template0 and template1
|
||||||
for dbName, _ := range listDatabases {
|
for dbName := range listDatabases {
|
||||||
if dbName != "template0" && dbName != "template1" {
|
if dbName != "template0" && dbName != "template1" {
|
||||||
databaseSlots[dbName] = map[string]zalandov1.Slot{}
|
databaseSlots[dbName] = map[string]zalandov1.Slot{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// gather list of required slots and publications, group by database
|
// get list of required slots and publications, group by database
|
||||||
for _, stream := range c.Spec.Streams {
|
for _, stream := range c.Spec.Streams {
|
||||||
if _, exists := databaseSlots[stream.Database]; !exists {
|
if _, exists := databaseSlots[stream.Database]; !exists {
|
||||||
c.logger.Warningf("database %q does not exist in the cluster", stream.Database)
|
c.logger.Warningf("database %q does not exist in the cluster", stream.Database)
|
||||||
|
|
@ -394,78 +389,73 @@ func (c *Cluster) syncStreams() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// finally sync stream CRDs
|
// finally sync stream CRDs
|
||||||
err = c.createOrUpdateStreams(slotsToSync)
|
// get distinct application IDs from streams section
|
||||||
if err != nil {
|
// there will be a separate event stream resource for each ID
|
||||||
return err
|
appIds := getDistinctApplicationIds(c.Spec.Streams)
|
||||||
|
for _, appId := range appIds {
|
||||||
|
if hasSlotsInSync(appId, databaseSlots, slotsToSync) {
|
||||||
|
if err = c.syncStream(appId); err != nil {
|
||||||
|
c.logger.Warningf("could not sync event streams with applicationId %s: %v", appId, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.logger.Warningf("database replication slots for streams with applicationId %s not in sync, skipping event stream sync", appId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if there is any deletion
|
||||||
|
if err = c.cleanupRemovedStreams(appIds); err != nil {
|
||||||
|
return fmt.Errorf("%v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) createOrUpdateStreams(createdSlots map[string]map[string]string) error {
|
func hasSlotsInSync(appId string, databaseSlots map[string]map[string]zalandov1.Slot, slotsToSync map[string]map[string]string) bool {
|
||||||
|
allSlotsInSync := true
|
||||||
// fetch different application IDs from streams section
|
for dbName, slots := range databaseSlots {
|
||||||
// there will be a separate event stream resource for each ID
|
for slotName := range slots {
|
||||||
appIds := gatherApplicationIds(c.Spec.Streams)
|
if slotName == getSlotName(dbName, appId) {
|
||||||
|
if _, exists := slotsToSync[slotName]; !exists {
|
||||||
// list all existing stream CRDs
|
allSlotsInSync = false
|
||||||
listOptions := metav1.ListOptions{
|
|
||||||
LabelSelector: c.labelsSet(true).String(),
|
|
||||||
}
|
|
||||||
streams, err := c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not list of FabricEventStreams: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for idx, appId := range appIds {
|
|
||||||
streamExists := false
|
|
||||||
|
|
||||||
// update stream when it exists and EventStreams array differs
|
|
||||||
for _, stream := range streams.Items {
|
|
||||||
if appId == stream.Spec.ApplicationId {
|
|
||||||
streamExists = true
|
|
||||||
desiredStreams := c.generateFabricEventStream(appId)
|
|
||||||
if match, reason := sameStreams(stream.Spec.EventStreams, desiredStreams.Spec.EventStreams); !match {
|
|
||||||
c.logger.Debugf("updating event streams: %s", reason)
|
|
||||||
desiredStreams.ObjectMeta = stream.ObjectMeta
|
|
||||||
err = c.updateStreams(desiredStreams)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed updating event stream %s: %v", stream.Name, err)
|
|
||||||
}
|
|
||||||
c.logger.Infof("event stream %q has been successfully updated", stream.Name)
|
|
||||||
}
|
}
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !streamExists {
|
|
||||||
// check if there is any slot with the applicationId
|
|
||||||
slotName := getSlotName(c.Spec.Streams[idx].Database, appId)
|
|
||||||
if _, exists := createdSlots[slotName]; !exists {
|
|
||||||
c.logger.Warningf("no slot %s with applicationId %s exists, skipping event stream creation", slotName, appId)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
c.logger.Infof("event streams with applicationId %s do not exist, create it", appId)
|
|
||||||
streamCRD, err := c.createStreams(appId)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed creating event streams with applicationId %s: %v", appId, err)
|
|
||||||
}
|
|
||||||
c.logger.Infof("event streams %q have been successfully created", streamCRD.Name)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if there is any deletion
|
return allSlotsInSync
|
||||||
for _, stream := range streams.Items {
|
}
|
||||||
if !util.SliceContains(appIds, stream.Spec.ApplicationId) {
|
|
||||||
c.logger.Infof("event streams with applicationId %s do not exist in the manifest, delete it", stream.Spec.ApplicationId)
|
func (c *Cluster) syncStream(appId string) error {
|
||||||
err := c.deleteStream(&stream)
|
streamExists := false
|
||||||
if err != nil {
|
// update stream when it exists and EventStreams array differs
|
||||||
return fmt.Errorf("failed deleting event streams with applicationId %s: %v", stream.Spec.ApplicationId, err)
|
for _, stream := range c.Streams {
|
||||||
|
if appId == stream.Spec.ApplicationId {
|
||||||
|
streamExists = true
|
||||||
|
desiredStreams := c.generateFabricEventStream(appId)
|
||||||
|
if match, reason := sameStreams(stream.Spec.EventStreams, desiredStreams.Spec.EventStreams); !match {
|
||||||
|
c.logger.Debugf("updating event streams with applicationId %s: %s", appId, reason)
|
||||||
|
desiredStreams.ObjectMeta = stream.ObjectMeta
|
||||||
|
updatedStream, err := c.updateStreams(desiredStreams)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err)
|
||||||
|
}
|
||||||
|
c.Streams[appId] = updatedStream
|
||||||
|
c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId)
|
||||||
}
|
}
|
||||||
c.logger.Infof("event streams %q have been successfully deleted", stream.Name)
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !streamExists {
|
||||||
|
c.logger.Infof("event streams with applicationId %s do not exist, create it", appId)
|
||||||
|
createdStream, err := c.createStreams(appId)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed creating event streams with applicationId %s: %v", appId, err)
|
||||||
|
}
|
||||||
|
c.logger.Infof("event streams %q have been successfully created", createdStream.Name)
|
||||||
|
c.Streams[appId] = createdStream
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -493,3 +483,23 @@ func sameStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (matc
|
||||||
|
|
||||||
return true, ""
|
return true, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) cleanupRemovedStreams(appIds []string) error {
|
||||||
|
errors := make([]string, 0)
|
||||||
|
for appId := range c.Streams {
|
||||||
|
if !util.SliceContains(appIds, appId) {
|
||||||
|
c.logger.Infof("event streams with applicationId %s do not exist in the manifest, delete it", appId)
|
||||||
|
err := c.deleteStream(appId)
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("failed deleting event streams with applicationId %s: %v", appId, err))
|
||||||
|
}
|
||||||
|
c.logger.Infof("event streams with applicationId %s have been successfully deleted", appId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
return fmt.Errorf("could not delete all removed event streams: %v", strings.Join(errors, `', '`))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -41,10 +41,6 @@ var (
|
||||||
fesUser string = fmt.Sprintf("%s%s", constants.EventStreamSourceSlotPrefix, constants.UserRoleNameSuffix)
|
fesUser string = fmt.Sprintf("%s%s", constants.EventStreamSourceSlotPrefix, constants.UserRoleNameSuffix)
|
||||||
slotName string = fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbName, strings.Replace(appId, "-", "_", -1))
|
slotName string = fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbName, strings.Replace(appId, "-", "_", -1))
|
||||||
|
|
||||||
fakeCreatedSlots map[string]map[string]string = map[string]map[string]string{
|
|
||||||
slotName: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
pg = acidv1.Postgresql{
|
pg = acidv1.Postgresql{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "Postgresql",
|
Kind: "Postgresql",
|
||||||
|
|
@ -189,10 +185,95 @@ var (
|
||||||
|
|
||||||
func TestGatherApplicationIds(t *testing.T) {
|
func TestGatherApplicationIds(t *testing.T) {
|
||||||
testAppIds := []string{appId}
|
testAppIds := []string{appId}
|
||||||
appIds := gatherApplicationIds(pg.Spec.Streams)
|
appIds := getDistinctApplicationIds(pg.Spec.Streams)
|
||||||
|
|
||||||
if !util.IsEqualIgnoreOrder(testAppIds, appIds) {
|
if !util.IsEqualIgnoreOrder(testAppIds, appIds) {
|
||||||
t.Errorf("gathered applicationIds do not match, expected %#v, got %#v", testAppIds, appIds)
|
t.Errorf("list of applicationIds does not match, expected %#v, got %#v", testAppIds, appIds)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasSlotsInSync(t *testing.T) {
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
subTest string
|
||||||
|
expectedSlots map[string]map[string]zalandov1.Slot
|
||||||
|
actualSlots map[string]map[string]string
|
||||||
|
slotsInSync bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "slots are in sync",
|
||||||
|
expectedSlots: map[string]map[string]zalandov1.Slot{
|
||||||
|
dbName: {
|
||||||
|
slotName: zalandov1.Slot{
|
||||||
|
Slot: map[string]string{
|
||||||
|
"databases": dbName,
|
||||||
|
"plugin": constants.EventStreamSourcePluginType,
|
||||||
|
"type": "logical",
|
||||||
|
},
|
||||||
|
Publication: map[string]acidv1.StreamTable{
|
||||||
|
"test1": acidv1.StreamTable{
|
||||||
|
EventType: "stream-type-a",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
actualSlots: map[string]map[string]string{
|
||||||
|
slotName: map[string]string{
|
||||||
|
"databases": dbName,
|
||||||
|
"plugin": constants.EventStreamSourcePluginType,
|
||||||
|
"type": "logical",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
slotsInSync: true,
|
||||||
|
}, {
|
||||||
|
subTest: "slots are not in sync",
|
||||||
|
expectedSlots: map[string]map[string]zalandov1.Slot{
|
||||||
|
dbName: {
|
||||||
|
slotName: zalandov1.Slot{
|
||||||
|
Slot: map[string]string{
|
||||||
|
"databases": dbName,
|
||||||
|
"plugin": constants.EventStreamSourcePluginType,
|
||||||
|
"type": "logical",
|
||||||
|
},
|
||||||
|
Publication: map[string]acidv1.StreamTable{
|
||||||
|
"test1": acidv1.StreamTable{
|
||||||
|
EventType: "stream-type-a",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"dbnotexists": {
|
||||||
|
slotName: zalandov1.Slot{
|
||||||
|
Slot: map[string]string{
|
||||||
|
"databases": "dbnotexists",
|
||||||
|
"plugin": constants.EventStreamSourcePluginType,
|
||||||
|
"type": "logical",
|
||||||
|
},
|
||||||
|
Publication: map[string]acidv1.StreamTable{
|
||||||
|
"test2": acidv1.StreamTable{
|
||||||
|
EventType: "stream-type-b",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
actualSlots: map[string]map[string]string{
|
||||||
|
slotName: map[string]string{
|
||||||
|
"databases": dbName,
|
||||||
|
"plugin": constants.EventStreamSourcePluginType,
|
||||||
|
"type": "logical",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
slotsInSync: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
result := hasSlotsInSync(appId, tt.expectedSlots, tt.actualSlots)
|
||||||
|
if !result {
|
||||||
|
t.Errorf("slots are not in sync, expected %#v, got %#v", tt.expectedSlots, tt.actualSlots)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -226,7 +307,7 @@ func TestGenerateFabricEventStream(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// create the streams
|
// create the streams
|
||||||
err = cluster.createOrUpdateStreams(fakeCreatedSlots)
|
err = cluster.syncStream(appId)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// compare generated stream with expected stream
|
// compare generated stream with expected stream
|
||||||
|
|
@ -252,7 +333,7 @@ func TestGenerateFabricEventStream(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// sync streams once again
|
// sync streams once again
|
||||||
err = cluster.createOrUpdateStreams(fakeCreatedSlots)
|
err = cluster.syncStream(appId)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
|
|
@ -401,7 +482,7 @@ func TestUpdateFabricEventStream(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// now create the stream
|
// now create the stream
|
||||||
err = cluster.createOrUpdateStreams(fakeCreatedSlots)
|
err = cluster.syncStream(appId)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// change specs of streams and patch CRD
|
// change specs of streams and patch CRD
|
||||||
|
|
@ -415,46 +496,25 @@ func TestUpdateFabricEventStream(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
patchData, err := specPatch(pg.Spec)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
|
|
||||||
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
cluster.Postgresql.Spec = pgPatched.Spec
|
|
||||||
err = cluster.createOrUpdateStreams(fakeCreatedSlots)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// compare stream returned from API with expected stream
|
// compare stream returned from API with expected stream
|
||||||
listOptions := metav1.ListOptions{
|
listOptions := metav1.ListOptions{
|
||||||
LabelSelector: cluster.labelsSet(true).String(),
|
LabelSelector: cluster.labelsSet(true).String(),
|
||||||
}
|
}
|
||||||
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
result := cluster.generateFabricEventStream(appId)
|
result := cluster.generateFabricEventStream(appId)
|
||||||
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
||||||
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
|
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
|
||||||
}
|
}
|
||||||
|
|
||||||
// disable recovery
|
// disable recovery
|
||||||
for _, stream := range pg.Spec.Streams {
|
for idx, stream := range pg.Spec.Streams {
|
||||||
if stream.ApplicationId == appId {
|
if stream.ApplicationId == appId {
|
||||||
stream.EnableRecovery = util.False()
|
stream.EnableRecovery = util.False()
|
||||||
|
pg.Spec.Streams[idx] = stream
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
patchData, err = specPatch(pg.Spec)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
pgPatched, err = cluster.KubeClient.Postgresqls(namespace).Patch(
|
|
||||||
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
cluster.Postgresql.Spec = pgPatched.Spec
|
|
||||||
err = cluster.createOrUpdateStreams(fakeCreatedSlots)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
|
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
|
||||||
result = cluster.generateFabricEventStream(appId)
|
result = cluster.generateFabricEventStream(appId)
|
||||||
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
||||||
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
|
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
|
||||||
|
|
@ -464,16 +524,34 @@ func TestUpdateFabricEventStream(t *testing.T) {
|
||||||
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
|
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
|
||||||
|
|
||||||
// remove streams from manifest
|
// remove streams from manifest
|
||||||
pgPatched.Spec.Streams = nil
|
pg.Spec.Streams = nil
|
||||||
pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update(
|
pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update(
|
||||||
context.TODO(), pgPatched, metav1.UpdateOptions{})
|
context.TODO(), &pg, metav1.UpdateOptions{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
cluster.Postgresql.Spec = pgUpdated.Spec
|
appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams)
|
||||||
cluster.createOrUpdateStreams(fakeCreatedSlots)
|
cluster.cleanupRemovedStreams(appIds)
|
||||||
|
|
||||||
streamList, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
if len(streamList.Items) > 0 || err != nil {
|
if len(streams.Items) > 0 || err != nil {
|
||||||
t.Errorf("stream resource has not been removed or unexpected error %v", err)
|
t.Errorf("stream resource has not been removed or unexpected error %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
|
||||||
|
patchData, err := specPatch(pgSpec)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
|
||||||
|
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
cluster.Postgresql.Spec = pgPatched.Spec
|
||||||
|
err = cluster.syncStream(appId)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
return streams
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/zalando/postgres-operator/pkg/util"
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
|
@ -80,6 +81,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = c.syncPatroniResources(); err != nil {
|
||||||
|
c.logger.Errorf("could not sync Patroni resources: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// sync volume may already transition volumes to gp3, if iops/throughput or type is specified
|
// sync volume may already transition volumes to gp3, if iops/throughput or type is specified
|
||||||
if err = c.syncVolumes(); err != nil {
|
if err = c.syncVolumes(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
@ -173,6 +178,163 @@ func (c *Cluster) syncFinalizer() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) syncPatroniResources() error {
|
||||||
|
errors := make([]string, 0)
|
||||||
|
|
||||||
|
if err := c.syncPatroniService(); err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("could not sync %s service: %v", Patroni, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, suffix := range patroniObjectSuffixes {
|
||||||
|
if c.patroniKubernetesUseConfigMaps() {
|
||||||
|
if err := c.syncPatroniConfigMap(suffix); err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("could not sync %s Patroni config map: %v", suffix, err))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := c.syncPatroniEndpoint(suffix); err != nil {
|
||||||
|
errors = append(errors, fmt.Sprintf("could not sync %s Patroni endpoint: %v", suffix, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(errors) > 0 {
|
||||||
|
return fmt.Errorf("%v", strings.Join(errors, `', '`))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) syncPatroniConfigMap(suffix string) error {
|
||||||
|
var (
|
||||||
|
cm *v1.ConfigMap
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
configMapName := fmt.Sprintf("%s-%s", c.Name, suffix)
|
||||||
|
c.logger.Debugf("syncing %s config map", configMapName)
|
||||||
|
c.setProcessName("syncing %s config map", configMapName)
|
||||||
|
|
||||||
|
if cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}); err == nil {
|
||||||
|
c.PatroniConfigMaps[suffix] = cm
|
||||||
|
desiredOwnerRefs := c.ownerReferences()
|
||||||
|
if !reflect.DeepEqual(cm.ObjectMeta.OwnerReferences, desiredOwnerRefs) {
|
||||||
|
c.logger.Infof("new %s config map's owner references do not match the current ones", configMapName)
|
||||||
|
cm.ObjectMeta.OwnerReferences = desiredOwnerRefs
|
||||||
|
c.setProcessName("updating %s config map", configMapName)
|
||||||
|
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not update %s config map: %v", configMapName, err)
|
||||||
|
}
|
||||||
|
c.PatroniConfigMaps[suffix] = cm
|
||||||
|
}
|
||||||
|
annotations := make(map[string]string)
|
||||||
|
maps.Copy(annotations, cm.Annotations)
|
||||||
|
desiredAnnotations := c.annotationsSet(cm.Annotations)
|
||||||
|
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
|
||||||
|
patchData, err := metaAnnotationsPatch(desiredAnnotations)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not form patch for %s config map: %v", configMapName, err)
|
||||||
|
}
|
||||||
|
cm, err = c.KubeClient.ConfigMaps(c.Namespace).Patch(context.TODO(), configMapName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not patch annotations of %s config map: %v", configMapName, err)
|
||||||
|
}
|
||||||
|
c.PatroniConfigMaps[suffix] = cm
|
||||||
|
}
|
||||||
|
} else if !k8sutil.ResourceNotFound(err) {
|
||||||
|
// if config map does not exist yet, Patroni should create it
|
||||||
|
return fmt.Errorf("could not get %s config map: %v", configMapName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) syncPatroniEndpoint(suffix string) error {
|
||||||
|
var (
|
||||||
|
ep *v1.Endpoints
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
endpointName := fmt.Sprintf("%s-%s", c.Name, suffix)
|
||||||
|
c.logger.Debugf("syncing %s endpoint", endpointName)
|
||||||
|
c.setProcessName("syncing %s endpoint", endpointName)
|
||||||
|
|
||||||
|
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), endpointName, metav1.GetOptions{}); err == nil {
|
||||||
|
c.PatroniEndpoints[suffix] = ep
|
||||||
|
desiredOwnerRefs := c.ownerReferences()
|
||||||
|
if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredOwnerRefs) {
|
||||||
|
c.logger.Infof("new %s endpoints's owner references do not match the current ones", endpointName)
|
||||||
|
ep.ObjectMeta.OwnerReferences = desiredOwnerRefs
|
||||||
|
c.setProcessName("updating %s endpoint", endpointName)
|
||||||
|
ep, err = c.KubeClient.Endpoints(c.Namespace).Update(context.TODO(), ep, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not update %s endpoint: %v", endpointName, err)
|
||||||
|
}
|
||||||
|
c.PatroniEndpoints[suffix] = ep
|
||||||
|
}
|
||||||
|
annotations := make(map[string]string)
|
||||||
|
maps.Copy(annotations, ep.Annotations)
|
||||||
|
desiredAnnotations := c.annotationsSet(ep.Annotations)
|
||||||
|
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
|
||||||
|
patchData, err := metaAnnotationsPatch(desiredAnnotations)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not form patch for %s endpoint: %v", endpointName, err)
|
||||||
|
}
|
||||||
|
ep, err = c.KubeClient.Endpoints(c.Namespace).Patch(context.TODO(), endpointName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not patch annotations of %s endpoint: %v", endpointName, err)
|
||||||
|
}
|
||||||
|
c.PatroniEndpoints[suffix] = ep
|
||||||
|
}
|
||||||
|
} else if !k8sutil.ResourceNotFound(err) {
|
||||||
|
// if endpoint does not exist yet, Patroni should create it
|
||||||
|
return fmt.Errorf("could not get %s endpoint: %v", endpointName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cluster) syncPatroniService() error {
|
||||||
|
var (
|
||||||
|
svc *v1.Service
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
serviceName := fmt.Sprintf("%s-%s", c.Name, Patroni)
|
||||||
|
c.setProcessName("syncing %s service", serviceName)
|
||||||
|
|
||||||
|
if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil {
|
||||||
|
c.Services[Patroni] = svc
|
||||||
|
desiredOwnerRefs := c.ownerReferences()
|
||||||
|
if !reflect.DeepEqual(svc.ObjectMeta.OwnerReferences, desiredOwnerRefs) {
|
||||||
|
c.logger.Infof("new %s service's owner references do not match the current ones", serviceName)
|
||||||
|
svc.ObjectMeta.OwnerReferences = desiredOwnerRefs
|
||||||
|
c.setProcessName("updating %v service", serviceName)
|
||||||
|
svc, err = c.KubeClient.Services(c.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not update %s endpoint: %v", serviceName, err)
|
||||||
|
}
|
||||||
|
c.Services[Patroni] = svc
|
||||||
|
}
|
||||||
|
annotations := make(map[string]string)
|
||||||
|
maps.Copy(annotations, svc.Annotations)
|
||||||
|
desiredAnnotations := c.annotationsSet(svc.Annotations)
|
||||||
|
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
|
||||||
|
patchData, err := metaAnnotationsPatch(desiredAnnotations)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not form patch for %s service: %v", serviceName, err)
|
||||||
|
}
|
||||||
|
svc, err = c.KubeClient.Services(c.Namespace).Patch(context.TODO(), serviceName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not patch annotations of %s service: %v", serviceName, err)
|
||||||
|
}
|
||||||
|
c.Services[Patroni] = svc
|
||||||
|
}
|
||||||
|
} else if !k8sutil.ResourceNotFound(err) {
|
||||||
|
// if config service does not exist yet, Patroni should create it
|
||||||
|
return fmt.Errorf("could not get %s service: %v", serviceName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Cluster) syncServices() error {
|
func (c *Cluster) syncServices() error {
|
||||||
for _, role := range []PostgresRole{Master, Replica} {
|
for _, role := range []PostgresRole{Master, Replica} {
|
||||||
c.logger.Debugf("syncing %s service", role)
|
c.logger.Debugf("syncing %s service", role)
|
||||||
|
|
@ -211,7 +373,6 @@ func (c *Cluster) syncService(role PostgresRole) error {
|
||||||
return fmt.Errorf("could not get %s service: %v", role, err)
|
return fmt.Errorf("could not get %s service: %v", role, err)
|
||||||
}
|
}
|
||||||
// no existing service, create new one
|
// no existing service, create new one
|
||||||
c.Services[role] = nil
|
|
||||||
c.logger.Infof("could not find the cluster's %s service", role)
|
c.logger.Infof("could not find the cluster's %s service", role)
|
||||||
|
|
||||||
if svc, err = c.createService(role); err == nil {
|
if svc, err = c.createService(role); err == nil {
|
||||||
|
|
@ -236,7 +397,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
||||||
)
|
)
|
||||||
c.setProcessName("syncing %s endpoint", role)
|
c.setProcessName("syncing %s endpoint", role)
|
||||||
|
|
||||||
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err == nil {
|
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil {
|
||||||
desiredEp := c.generateEndpoint(role, ep.Subsets)
|
desiredEp := c.generateEndpoint(role, ep.Subsets)
|
||||||
// if owner references differ we update which would also change annotations
|
// if owner references differ we update which would also change annotations
|
||||||
if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredEp.ObjectMeta.OwnerReferences) {
|
if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredEp.ObjectMeta.OwnerReferences) {
|
||||||
|
|
@ -252,7 +413,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not form patch for %s endpoint: %v", role, err)
|
return fmt.Errorf("could not form patch for %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
ep, err = c.KubeClient.Endpoints(c.Namespace).Patch(context.TODO(), c.endpointName(role), types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
ep, err = c.KubeClient.Endpoints(c.Namespace).Patch(context.TODO(), c.serviceName(role), types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not patch annotations of %s endpoint: %v", role, err)
|
return fmt.Errorf("could not patch annotations of %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
|
|
@ -265,7 +426,6 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
||||||
return fmt.Errorf("could not get %s endpoint: %v", role, err)
|
return fmt.Errorf("could not get %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
// no existing endpoint, create new one
|
// no existing endpoint, create new one
|
||||||
c.Endpoints[role] = nil
|
|
||||||
c.logger.Infof("could not find the cluster's %s endpoint", role)
|
c.logger.Infof("could not find the cluster's %s endpoint", role)
|
||||||
|
|
||||||
if ep, err = c.createEndpoint(role); err == nil {
|
if ep, err = c.createEndpoint(role); err == nil {
|
||||||
|
|
@ -275,7 +435,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
||||||
return fmt.Errorf("could not create missing %s endpoint: %v", role, err)
|
return fmt.Errorf("could not create missing %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta))
|
c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta))
|
||||||
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err != nil {
|
if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err != nil {
|
||||||
return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err)
|
return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -307,7 +467,6 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
||||||
return fmt.Errorf("could not get pod disruption budget: %v", err)
|
return fmt.Errorf("could not get pod disruption budget: %v", err)
|
||||||
}
|
}
|
||||||
// no existing pod disruption budget, create new one
|
// no existing pod disruption budget, create new one
|
||||||
c.PodDisruptionBudget = nil
|
|
||||||
c.logger.Infof("could not find the cluster's pod disruption budget")
|
c.logger.Infof("could not find the cluster's pod disruption budget")
|
||||||
|
|
||||||
if pdb, err = c.createPodDisruptionBudget(); err != nil {
|
if pdb, err = c.createPodDisruptionBudget(); err != nil {
|
||||||
|
|
@ -349,7 +508,6 @@ func (c *Cluster) syncStatefulSet() error {
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// statefulset does not exist, try to re-create it
|
// statefulset does not exist, try to re-create it
|
||||||
c.Statefulset = nil
|
|
||||||
c.logger.Infof("cluster's statefulset does not exist")
|
c.logger.Infof("cluster's statefulset does not exist")
|
||||||
|
|
||||||
sset, err = c.createStatefulSet()
|
sset, err = c.createStatefulSet()
|
||||||
|
|
@ -714,7 +872,7 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
||||||
// check if specified slots exist in config and if they differ
|
// check if specified slots exist in config and if they differ
|
||||||
for slotName, desiredSlot := range desiredPatroniConfig.Slots {
|
for slotName, desiredSlot := range desiredPatroniConfig.Slots {
|
||||||
// only add slots specified in manifest to c.replicationSlots
|
// only add slots specified in manifest to c.replicationSlots
|
||||||
for manifestSlotName, _ := range c.Spec.Patroni.Slots {
|
for manifestSlotName := range c.Spec.Patroni.Slots {
|
||||||
if manifestSlotName == slotName {
|
if manifestSlotName == slotName {
|
||||||
c.replicationSlots[slotName] = desiredSlot
|
c.replicationSlots[slotName] = desiredSlot
|
||||||
}
|
}
|
||||||
|
|
@ -1447,6 +1605,7 @@ func (c *Cluster) syncLogicalBackupJob() error {
|
||||||
return fmt.Errorf("could not patch annotations of the logical backup job %q: %v", jobName, err)
|
return fmt.Errorf("could not patch annotations of the logical backup job %q: %v", jobName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
c.LogicalBackupJob = desiredJob
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !k8sutil.ResourceNotFound(err) {
|
if !k8sutil.ResourceNotFound(err) {
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@ const (
|
||||||
// spilo roles
|
// spilo roles
|
||||||
Master PostgresRole = "master"
|
Master PostgresRole = "master"
|
||||||
Replica PostgresRole = "replica"
|
Replica PostgresRole = "replica"
|
||||||
|
Patroni PostgresRole = "config"
|
||||||
|
|
||||||
// roles returned by Patroni cluster endpoint
|
// roles returned by Patroni cluster endpoint
|
||||||
Leader PostgresRole = "leader"
|
Leader PostgresRole = "leader"
|
||||||
|
|
|
||||||
|
|
@ -16,12 +16,14 @@ import (
|
||||||
"github.com/zalando/postgres-operator/mocks"
|
"github.com/zalando/postgres-operator/mocks"
|
||||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
k8sFake "k8s.io/client-go/kubernetes/fake"
|
k8sFake "k8s.io/client-go/kubernetes/fake"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -49,6 +51,7 @@ func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset
|
||||||
PersistentVolumeClaimsGetter: clientSet.CoreV1(),
|
PersistentVolumeClaimsGetter: clientSet.CoreV1(),
|
||||||
PersistentVolumesGetter: clientSet.CoreV1(),
|
PersistentVolumesGetter: clientSet.CoreV1(),
|
||||||
EndpointsGetter: clientSet.CoreV1(),
|
EndpointsGetter: clientSet.CoreV1(),
|
||||||
|
ConfigMapsGetter: clientSet.CoreV1(),
|
||||||
PodsGetter: clientSet.CoreV1(),
|
PodsGetter: clientSet.CoreV1(),
|
||||||
DeploymentsGetter: clientSet.AppsV1(),
|
DeploymentsGetter: clientSet.AppsV1(),
|
||||||
CronJobsGetter: clientSet.BatchV1(),
|
CronJobsGetter: clientSet.BatchV1(),
|
||||||
|
|
@ -66,12 +69,8 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[
|
||||||
clusterOptions := clusterLabelsOptions(cluster)
|
clusterOptions := clusterLabelsOptions(cluster)
|
||||||
// helper functions
|
// helper functions
|
||||||
containsAnnotations := func(expected map[string]string, actual map[string]string, objName string, objType string) error {
|
containsAnnotations := func(expected map[string]string, actual map[string]string, objName string, objType string) error {
|
||||||
if expected == nil {
|
if !util.MapContains(actual, expected) {
|
||||||
if len(actual) != 0 {
|
return fmt.Errorf("%s %v expected annotations %#v to be contained in %#v", objType, objName, expected, actual)
|
||||||
return fmt.Errorf("%s %v expected not to have any annotations, got: %#v", objType, objName, actual)
|
|
||||||
}
|
|
||||||
} else if !(reflect.DeepEqual(expected, actual)) {
|
|
||||||
return fmt.Errorf("%s %v expected annotations: %#v, got: %#v", objType, objName, expected, actual)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -183,7 +182,7 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, cronJob := range cronJobList.Items {
|
for _, cronJob := range cronJobList.Items {
|
||||||
if err := containsAnnotations(updateAnnotations(annotations), cronJob.Annotations, cronJob.ObjectMeta.Name, "Logical backup cron job"); err != nil {
|
if err := containsAnnotations(annotations, cronJob.Annotations, cronJob.ObjectMeta.Name, "Logical backup cron job"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := containsAnnotations(updateAnnotations(annotations), cronJob.Spec.JobTemplate.Spec.Template.Annotations, cronJob.Name, "Logical backup cron job pod template"); err != nil {
|
if err := containsAnnotations(updateAnnotations(annotations), cronJob.Spec.JobTemplate.Spec.Template.Annotations, cronJob.Name, "Logical backup cron job pod template"); err != nil {
|
||||||
|
|
@ -219,8 +218,21 @@ func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
checkConfigMaps := func(annotations map[string]string) error {
|
||||||
|
cmList, err := cluster.KubeClient.ConfigMaps(namespace).List(context.TODO(), clusterOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, cm := range cmList.Items {
|
||||||
|
if err := containsAnnotations(annotations, cm.Annotations, cm.ObjectMeta.Name, "ConfigMap"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
checkFuncs := []func(map[string]string) error{
|
checkFuncs := []func(map[string]string) error{
|
||||||
checkSts, checkPods, checkSvc, checkPdb, checkPooler, checkCronJob, checkPvc, checkSecrets, checkEndpoints,
|
checkSts, checkPods, checkSvc, checkPdb, checkPooler, checkCronJob, checkPvc, checkSecrets, checkEndpoints, checkConfigMaps,
|
||||||
}
|
}
|
||||||
for _, f := range checkFuncs {
|
for _, f := range checkFuncs {
|
||||||
if err := f(resultAnnotations); err != nil {
|
if err := f(resultAnnotations); err != nil {
|
||||||
|
|
@ -281,6 +293,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
|
||||||
OpConfig: config.Config{
|
OpConfig: config.Config{
|
||||||
PatroniAPICheckInterval: time.Duration(1),
|
PatroniAPICheckInterval: time.Duration(1),
|
||||||
PatroniAPICheckTimeout: time.Duration(5),
|
PatroniAPICheckTimeout: time.Duration(5),
|
||||||
|
KubernetesUseConfigMaps: true,
|
||||||
ConnectionPooler: config.ConnectionPooler{
|
ConnectionPooler: config.ConnectionPooler{
|
||||||
ConnectionPoolerDefaultCPURequest: "100m",
|
ConnectionPoolerDefaultCPURequest: "100m",
|
||||||
ConnectionPoolerDefaultCPULimit: "100m",
|
ConnectionPoolerDefaultCPULimit: "100m",
|
||||||
|
|
@ -343,11 +356,60 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// resources which Patroni creates
|
||||||
|
if err = createPatroniResources(cluster); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return cluster, nil
|
return cluster, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createPatroniResources(cluster *Cluster) error {
|
||||||
|
patroniService := cluster.generateService(Replica, &pg.Spec)
|
||||||
|
patroniService.ObjectMeta.Name = cluster.serviceName(Patroni)
|
||||||
|
_, err := cluster.KubeClient.Services(namespace).Create(context.TODO(), patroniService, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, suffix := range patroniObjectSuffixes {
|
||||||
|
metadata := metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("%s-%s", clusterName, suffix),
|
||||||
|
Namespace: namespace,
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"initialize": "123456789",
|
||||||
|
},
|
||||||
|
Labels: cluster.labelsSet(false),
|
||||||
|
}
|
||||||
|
|
||||||
|
if cluster.OpConfig.KubernetesUseConfigMaps {
|
||||||
|
configMap := v1.ConfigMap{
|
||||||
|
ObjectMeta: metadata,
|
||||||
|
}
|
||||||
|
_, err := cluster.KubeClient.ConfigMaps(namespace).Create(context.TODO(), &configMap, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
endpoints := v1.Endpoints{
|
||||||
|
ObjectMeta: metadata,
|
||||||
|
}
|
||||||
|
_, err := cluster.KubeClient.Endpoints(namespace).Create(context.TODO(), &endpoints, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func annotateResources(cluster *Cluster) error {
|
func annotateResources(cluster *Cluster) error {
|
||||||
clusterOptions := clusterLabelsOptions(cluster)
|
clusterOptions := clusterLabelsOptions(cluster)
|
||||||
|
patchData, err := metaAnnotationsPatch(externalAnnotations)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions)
|
stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -355,7 +417,7 @@ func annotateResources(cluster *Cluster) error {
|
||||||
}
|
}
|
||||||
for _, sts := range stsList.Items {
|
for _, sts := range stsList.Items {
|
||||||
sts.Annotations = externalAnnotations
|
sts.Annotations = externalAnnotations
|
||||||
if _, err = cluster.KubeClient.StatefulSets(namespace).Update(context.TODO(), &sts, metav1.UpdateOptions{}); err != nil {
|
if _, err = cluster.KubeClient.StatefulSets(namespace).Patch(context.TODO(), sts.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -366,7 +428,7 @@ func annotateResources(cluster *Cluster) error {
|
||||||
}
|
}
|
||||||
for _, pod := range podList.Items {
|
for _, pod := range podList.Items {
|
||||||
pod.Annotations = externalAnnotations
|
pod.Annotations = externalAnnotations
|
||||||
if _, err = cluster.KubeClient.Pods(namespace).Update(context.TODO(), &pod, metav1.UpdateOptions{}); err != nil {
|
if _, err = cluster.KubeClient.Pods(namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -377,7 +439,7 @@ func annotateResources(cluster *Cluster) error {
|
||||||
}
|
}
|
||||||
for _, svc := range svcList.Items {
|
for _, svc := range svcList.Items {
|
||||||
svc.Annotations = externalAnnotations
|
svc.Annotations = externalAnnotations
|
||||||
if _, err = cluster.KubeClient.Services(namespace).Update(context.TODO(), &svc, metav1.UpdateOptions{}); err != nil {
|
if _, err = cluster.KubeClient.Services(namespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -388,7 +450,19 @@ func annotateResources(cluster *Cluster) error {
|
||||||
}
|
}
|
||||||
for _, pdb := range pdbList.Items {
|
for _, pdb := range pdbList.Items {
|
||||||
pdb.Annotations = externalAnnotations
|
pdb.Annotations = externalAnnotations
|
||||||
_, err = cluster.KubeClient.PodDisruptionBudgets(namespace).Update(context.TODO(), &pdb, metav1.UpdateOptions{})
|
_, err = cluster.KubeClient.PodDisruptionBudgets(namespace).Patch(context.TODO(), pdb.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, cronJob := range cronJobList.Items {
|
||||||
|
cronJob.Annotations = externalAnnotations
|
||||||
|
_, err = cluster.KubeClient.CronJobs(namespace).Patch(context.TODO(), cronJob.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -400,7 +474,7 @@ func annotateResources(cluster *Cluster) error {
|
||||||
}
|
}
|
||||||
for _, pvc := range pvcList.Items {
|
for _, pvc := range pvcList.Items {
|
||||||
pvc.Annotations = externalAnnotations
|
pvc.Annotations = externalAnnotations
|
||||||
if _, err = cluster.KubeClient.PersistentVolumeClaims(namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil {
|
if _, err = cluster.KubeClient.PersistentVolumeClaims(namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -411,7 +485,7 @@ func annotateResources(cluster *Cluster) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
deploy.Annotations = externalAnnotations
|
deploy.Annotations = externalAnnotations
|
||||||
if _, err = cluster.KubeClient.Deployments(namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{}); err != nil {
|
if _, err = cluster.KubeClient.Deployments(namespace).Patch(context.TODO(), deploy.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -422,7 +496,7 @@ func annotateResources(cluster *Cluster) error {
|
||||||
}
|
}
|
||||||
for _, secret := range secrets.Items {
|
for _, secret := range secrets.Items {
|
||||||
secret.Annotations = externalAnnotations
|
secret.Annotations = externalAnnotations
|
||||||
if _, err = cluster.KubeClient.Secrets(namespace).Update(context.TODO(), &secret, metav1.UpdateOptions{}); err != nil {
|
if _, err = cluster.KubeClient.Secrets(namespace).Patch(context.TODO(), secret.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -433,10 +507,22 @@ func annotateResources(cluster *Cluster) error {
|
||||||
}
|
}
|
||||||
for _, ep := range endpoints.Items {
|
for _, ep := range endpoints.Items {
|
||||||
ep.Annotations = externalAnnotations
|
ep.Annotations = externalAnnotations
|
||||||
if _, err = cluster.KubeClient.Endpoints(namespace).Update(context.TODO(), &ep, metav1.UpdateOptions{}); err != nil {
|
if _, err = cluster.KubeClient.Endpoints(namespace).Patch(context.TODO(), ep.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
configMaps, err := cluster.KubeClient.ConfigMaps(namespace).List(context.TODO(), clusterOptions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, cm := range configMaps.Items {
|
||||||
|
cm.Annotations = externalAnnotations
|
||||||
|
if _, err = cluster.KubeClient.ConfigMaps(namespace).Patch(context.TODO(), cm.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -503,7 +589,18 @@ func TestInheritedAnnotations(t *testing.T) {
|
||||||
err = checkResourcesInheritedAnnotations(cluster, result)
|
err = checkResourcesInheritedAnnotations(cluster, result)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// 3. Existing annotations (should not be removed)
|
// 3. Change from ConfigMaps to Endpoints
|
||||||
|
err = cluster.deletePatroniResources()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
cluster.OpConfig.KubernetesUseConfigMaps = false
|
||||||
|
err = createPatroniResources(cluster)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = cluster.Sync(newSpec.DeepCopy())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = checkResourcesInheritedAnnotations(cluster, result)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// 4. Existing annotations (should not be removed)
|
||||||
err = annotateResources(cluster)
|
err = annotateResources(cluster)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
maps.Copy(result, externalAnnotations)
|
maps.Copy(result, externalAnnotations)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue