add code to sync config maps

This commit is contained in:
Felix Kunde 2022-03-29 15:25:59 +02:00
parent aa8b4bf365
commit d643ad5e21
12 changed files with 409 additions and 35 deletions

View File

@ -9,7 +9,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
# Patroni needs to watch and manage endpoints
# Patroni needs to watch and manage config maps or endpoints
{{- if toString .Values.configGeneral.kubernetes_use_configmaps | eq "true" }}
- apiGroups:
- ""
@ -24,12 +24,6 @@ rules:
- patch
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
{{- else }}
- apiGroups:
- ""

View File

@ -89,12 +89,6 @@ rules:
- patch
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
{{- else }}
# to read configuration from ConfigMaps
- apiGroups:

View File

@ -37,7 +37,7 @@ The Postgres Operator can be deployed in the following ways:
* Kustomization
* Helm chart
### Manual deployment setup
### Manual deployment setup on Kubernetes
The Postgres Operator can be installed simply by applying yaml manifests. Note,
we provide the `/manifests` directory as an example only; you should consider
@ -71,6 +71,18 @@ manifest.
./run_operator_locally.sh
```
### Manual deployment setup on OpenShift
To install the Postgres Operator in OpenShift you have to change the config
parameter `kubernetes_use_configmaps` to `"true"`. Otherwise, the operator
and Patroni will store leader and config keys in `Endpoints` that are not
supported in OpenShift. This requires also a slightly different set of rules
for the `postgres-operator` and `postgres-pod` cluster roles.
```bash
oc create -f manifests/operator-service-account-rbac-openshift.yaml
```
### Helm chart
Alternatively, the operator can be installed by using the provided [Helm](https://helm.sh/)

View File

@ -1759,6 +1759,8 @@ class EndToEndTestCase(unittest.TestCase):
self.eventuallyEqual(lambda: len(k8s.api.custom_objects_api.list_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", label_selector="cluster-name=acid-minimal-cluster")["items"]), 0, "Manifest not deleted")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# check if everything has been deleted
self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_label), 0, "Pods not deleted")
self.eventuallyEqual(lambda: k8s.count_services_with_label(cluster_label), 0, "Service not deleted")

View File

@ -63,13 +63,13 @@ data:
# etcd_host: ""
external_traffic_policy: "Cluster"
# gcp_credentials: ""
# kubernetes_use_configmaps: "false"
# ignored_annotations: ""
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
# inherited_annotations: owned-by
# inherited_labels: application,environment
# kube_iam_role: ""
# kubernetes_use_configmaps: "false"
# log_s3_bucket: ""
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.1"
# logical_backup_google_application_credentials: ""

View File

@ -0,0 +1,283 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: postgres-operator
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: postgres-operator
rules:
# all verbs allowed for custom operator resources
- apiGroups:
- acid.zalan.do
resources:
- postgresqls
- postgresqls/status
- operatorconfigurations
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# operator only reads PostgresTeams
- apiGroups:
- acid.zalan.do
resources:
- postgresteams
verbs:
- get
- list
- watch
# all verbs allowed for event streams (Zalando-internal feature)
# - apiGroups:
# - zalando.org
# resources:
# - fabriceventstreams
# verbs:
# - create
# - delete
# - deletecollection
# - get
# - list
# - patch
# - update
# - watch
# to create or get/update CRDs when starting up
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- get
- patch
- update
# to read configuration and manage ConfigMaps used by Patroni
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# to send events to the CRs
- apiGroups:
- ""
resources:
- events
verbs:
- create
- get
- list
- patch
- update
- watch
# to CRUD secrets for database access
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- update
# to check nodes for node readiness label
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
# to read or delete existing PVCs. Creation via StatefulSet
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- delete
- get
- list
- patch
- update
# to read existing PVs. Creation should be done via dynamic provisioning
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- update # only for resizing AWS volumes
# to watch Spilo pods and do rolling updates. Creation via StatefulSet
- apiGroups:
- ""
resources:
- pods
verbs:
- delete
- get
- list
- patch
- update
- watch
# to resize the filesystem in Spilo pods when increasing volume size
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
# to CRUD services to point to Postgres cluster instances
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- patch
- update
# to CRUD the StatefulSet which controls the Postgres cluster instances
- apiGroups:
- apps
resources:
- statefulsets
- deployments
verbs:
- create
- delete
- get
- list
- patch
# to CRUD cron jobs for logical backups
- apiGroups:
- batch
resources:
- cronjobs
verbs:
- create
- delete
- get
- list
- patch
- update
# to get namespaces operator resources can run in
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
# to define PDBs. Update happens via delete/create
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
# to create ServiceAccounts in each namespace the operator watches
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- create
# to create role bindings to the postgres-pod service account
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
verbs:
- get
- create
# to grant privilege to run privileged pods (not needed by default)
#- apiGroups:
# - extensions
# resources:
# - podsecuritypolicies
# resourceNames:
# - privileged
# verbs:
# - use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: postgres-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: postgres-operator
subjects:
- kind: ServiceAccount
name: postgres-operator
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: postgres-pod
rules:
# Patroni needs to watch and manage config maps
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# Patroni needs to watch pods
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- patch
- update
- watch
# to let Patroni create a headless service
- apiGroups:
- ""
resources:
- services
verbs:
- create
# to grant privilege to run privileged pods (not needed by default)
#- apiGroups:
# - extensions
# resources:
# - podsecuritypolicies
# resourceNames:
# - privileged
# verbs:
# - use

View File

@ -59,6 +59,7 @@ type Config struct {
type kubeResources struct {
Services map[PostgresRole]*v1.Service
Endpoints map[PostgresRole]*v1.Endpoints
ConfigMaps map[PostgresRole]*v1.ConfigMap
Secrets map[types.UID]*v1.Secret
Statefulset *appsv1.StatefulSet
PodDisruptionBudget *policybeta1.PodDisruptionBudget
@ -1484,22 +1485,29 @@ func (c *Cluster) GetCurrentProcess() Process {
// GetStatus provides status of the cluster
func (c *Cluster) GetStatus() *ClusterStatus {
return &ClusterStatus{
status := &ClusterStatus{
Cluster: c.Spec.ClusterName,
Team: c.Spec.TeamID,
Status: c.Status,
Spec: c.Spec,
MasterService: c.GetServiceMaster(),
ReplicaService: c.GetServiceReplica(),
MasterEndpoint: c.GetEndpointMaster(),
ReplicaEndpoint: c.GetEndpointReplica(),
StatefulSet: c.GetStatefulSet(),
PodDisruptionBudget: c.GetPodDisruptionBudget(),
CurrentProcess: c.GetCurrentProcess(),
Error: fmt.Errorf("error: %s", c.Error),
}
if c.patroniKubernetesUseConfigMaps() {
status.MasterEndpoint = c.GetEndpointMaster()
status.ReplicaEndpoint = c.GetEndpointReplica()
} else {
status.MasterConfigMap = c.GetConfigMapMaster()
status.ReplicaConfigMap = c.GetConfigMapReplica()
}
return status
}
// Switchover does a switchover (via Patroni) to a candidate pod
@ -1579,10 +1587,11 @@ func (c *Cluster) deletePatroniClusterObjects() error {
}
if c.patroniKubernetesUseConfigMaps() {
actionsList = append(actionsList, c.deletePatroniClusterServices, c.deletePatroniClusterConfigMaps)
actionsList = append(actionsList, c.deletePatroniClusterConfigMaps)
} else {
actionsList = append(actionsList, c.deletePatroniClusterEndpoints)
}
actionsList = append(actionsList, c.deletePatroniClusterServices)
c.logger.Debugf("removing leftover Patroni objects (endpoints / services and configmaps)")
for _, deleter := range actionsList {

View File

@ -76,13 +76,12 @@ func (c *Cluster) statefulSetName() string {
return c.Name
}
func (c *Cluster) endpointName(role PostgresRole) string {
name := c.Name
if role == Replica {
name = name + "-repl"
func (c *Cluster) configMapName(role PostgresRole) string {
return c.serviceName(role)
}
return name
func (c *Cluster) endpointName(role PostgresRole) string {
return c.serviceName(role)
}
func (c *Cluster) serviceName(role PostgresRole) string {
@ -1821,6 +1820,16 @@ func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubse
return endpoints
}
func (c *Cluster) generateConfigMap(role PostgresRole) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: c.configMapName(role),
Namespace: c.Namespace,
Labels: c.roleLabelsSet(true, role),
},
}
}
func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription) []v1.EnvVar {
result := make([]v1.EnvVar, 0)

View File

@ -35,9 +35,15 @@ func (c *Cluster) listResources() error {
c.logger.Infof("found secret: %q (uid: %q) namesapce: %s", util.NameFromMeta(obj.ObjectMeta), obj.UID, obj.ObjectMeta.Namespace)
}
if c.patroniKubernetesUseConfigMaps() {
for role, configMap := range c.ConfigMaps {
c.logger.Infof("found %s config map: %q (uid: %q)", role, util.NameFromMeta(configMap.ObjectMeta), configMap.UID)
}
} else {
for role, endpoint := range c.Endpoints {
c.logger.Infof("found %s endpoint: %q (uid: %q)", role, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID)
}
}
for role, service := range c.Services {
c.logger.Infof("found %s service: %q (uid: %q)", role, util.NameFromMeta(service.ObjectMeta), service.UID)
@ -402,6 +408,20 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset
return result
}
func (c *Cluster) createConfigMap(role PostgresRole) (*v1.ConfigMap, error) {
c.setProcessName("creating config map")
configMapSpec := c.generateConfigMap(role)
configMap, err := c.KubeClient.ConfigMaps(configMapSpec.Namespace).Create(context.TODO(), configMapSpec, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("could not create %s config map: %v", role, err)
}
c.ConfigMaps[role] = configMap
return configMap, nil
}
func (c *Cluster) createPodDisruptionBudget() (*policybeta1.PodDisruptionBudget, error) {
podDisruptionBudgetSpec := c.generatePodDisruptionBudget()
podDisruptionBudget, err := c.KubeClient.
@ -589,11 +609,21 @@ func (c *Cluster) GetEndpointMaster() *v1.Endpoints {
return c.Endpoints[Master]
}
// GetEndpointReplica returns cluster's kubernetes master Endpoint
// GetEndpointReplica returns cluster's kubernetes replica Endpoint
func (c *Cluster) GetEndpointReplica() *v1.Endpoints {
return c.Endpoints[Replica]
}
// GetConfigMapMaster returns cluster's kubernetes master ConfigMap
func (c *Cluster) GetConfigMapMaster() *v1.ConfigMap {
return c.ConfigMaps[Master]
}
// GetConfigMapReplica returns cluster's kubernetes replica ConfigMap
func (c *Cluster) GetConfigMapReplica() *v1.ConfigMap {
return c.ConfigMaps[Replica]
}
// GetStatefulSet returns cluster's kubernetes StatefulSet
func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet {
return c.Statefulset

View File

@ -144,7 +144,11 @@ func (c *Cluster) syncServices() error {
for _, role := range []PostgresRole{Master, Replica} {
c.logger.Debugf("syncing %s service", role)
if !c.patroniKubernetesUseConfigMaps() {
if c.patroniKubernetesUseConfigMaps() {
if err := c.syncConfigMap(role); err != nil {
return fmt.Errorf("could not sync %s config map: %v", role, err)
}
} else {
if err := c.syncEndpoint(role); err != nil {
return fmt.Errorf("could not sync %s endpoint: %v", role, err)
}
@ -234,6 +238,40 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
return nil
}
func (c *Cluster) syncConfigMap(role PostgresRole) error {
var (
cm *v1.ConfigMap
err error
)
c.setProcessName("syncing %s config map", role)
if cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), c.configMapName(role), metav1.GetOptions{}); err == nil {
// TODO: No syncing of config map here, is this covered completely by updateService?
c.ConfigMaps[role] = cm
return nil
}
if !k8sutil.ResourceNotFound(err) {
return fmt.Errorf("could not get %s config map: %v", role, err)
}
// no existing config map, create new one
c.ConfigMaps[role] = nil
c.logger.Infof("could not find the cluster's %s config map", role)
if cm, err = c.createConfigMap(role); err == nil {
c.logger.Infof("created missing %s config map %q", role, util.NameFromMeta(cm.ObjectMeta))
} else {
if !k8sutil.ResourceAlreadyExists(err) {
return fmt.Errorf("could not create missing %s config map: %v", role, err)
}
c.logger.Infof("%s config map %q already exists", role, util.NameFromMeta(cm.ObjectMeta))
if cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), c.configMapName(role), metav1.GetOptions{}); err != nil {
return fmt.Errorf("could not fetch existing %s config map: %v", role, err)
}
}
c.ConfigMaps[role] = cm
return nil
}
func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
var (
pdb *policybeta1.PodDisruptionBudget

View File

@ -63,6 +63,8 @@ type ClusterStatus struct {
ReplicaService *v1.Service
MasterEndpoint *v1.Endpoints
ReplicaEndpoint *v1.Endpoints
MasterConfigMap *v1.ConfigMap
ReplicaConfigMap *v1.ConfigMap
StatefulSet *appsv1.StatefulSet
PodDisruptionBudget *policybeta1.PodDisruptionBudget

View File

@ -544,7 +544,8 @@ func (c *Controller) postgresqlCheck(obj interface{}) *acidv1.Postgresql {
Ensures the pod service account and role bindings exists in a namespace
before a PG cluster is created there so that a user does not have to deploy
these credentials manually. StatefulSets require the service account to
create pods; Patroni requires relevant RBAC bindings to access endpoints.
create pods; Patroni requires relevant RBAC bindings to access endpoints
or config maps.
The operator does not sync accounts/role bindings after creation.
*/