Fix role sync if default pool user/schema changed
It requires more accurate lookup function synchronization and couple fixes on the way (e.g. few get rid of using schema from a user secret). For lookup function, since it's idempotend, sync it when we're not sure if it was installed (e.g. when the operator was shutdown and start sync everything at the start) and then remember that it was installed.
This commit is contained in:
		
							parent
							
								
									c0a840c3ed
								
							
						
					
					
						commit
						6d1a1ea22c
					
				|  | @ -49,9 +49,18 @@ type Config struct { | |||
| 	PodServiceAccountRoleBinding *rbacv1.RoleBinding | ||||
| } | ||||
| 
 | ||||
| // K8S objects that are belongs to a connection pool
 | ||||
| type ConnectionPoolObjects struct { | ||||
| 	Deployment *appsv1.Deployment | ||||
| 	Service    *v1.Service | ||||
| 
 | ||||
| 	// It could happen that a connection pool was enabled, but the operator was
 | ||||
| 	// not able to properly process a corresponding event or was restarted. In
 | ||||
| 	// this case we will miss missing/require situation and a lookup function
 | ||||
| 	// will not be installed. To avoid synchronizing it all the time to prevent
 | ||||
| 	// this, we can remember the result in memory at least until the next
 | ||||
| 	// restart.
 | ||||
| 	LookupFunction bool | ||||
| } | ||||
| 
 | ||||
| type kubeResources struct { | ||||
|  | @ -1305,5 +1314,25 @@ func (c *Cluster) needSyncConnPoolDefaults( | |||
| 		c.logger.Warningf("Cannot generate expected resources, %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	for _, env := range poolContainer.Env { | ||||
| 		if env.Name == "PGUSER" { | ||||
| 			ref := env.ValueFrom.SecretKeyRef.LocalObjectReference | ||||
| 
 | ||||
| 			if ref.Name != c.credentialSecretName(config.User) { | ||||
| 				sync = true | ||||
| 				msg := fmt.Sprintf("Pool user is different (%s vs %s)", | ||||
| 					ref.Name, config.User) | ||||
| 				reasons = append(reasons, msg) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if env.Name == "PGSCHEMA" && env.Value != config.Schema { | ||||
| 			sync = true | ||||
| 			msg := fmt.Sprintf("Pool schema is different (%s vs %s)", | ||||
| 				env.Value, config.Schema) | ||||
| 			reasons = append(reasons, msg) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return sync, reasons | ||||
| } | ||||
|  |  | |||
|  | @ -269,6 +269,7 @@ func makeUserFlags(rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin | |||
| // perform remote authentification.
 | ||||
| func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error { | ||||
| 	var stmtBytes bytes.Buffer | ||||
| 	c.logger.Info("Installing lookup function") | ||||
| 
 | ||||
| 	if err := c.initDbConn(); err != nil { | ||||
| 		return fmt.Errorf("could not init database connection") | ||||
|  | @ -326,5 +327,6 @@ func (c *Cluster) installLookupFunction(poolSchema, poolUser string) error { | |||
| 		c.logger.Infof("Pool lookup function installed into %s", dbname) | ||||
| 	} | ||||
| 
 | ||||
| 	c.ConnectionPool.LookupFunction = true | ||||
| 	return nil | ||||
| } | ||||
|  |  | |||
|  | @ -1822,14 +1822,22 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) ( | |||
| 		spec.ConnectionPool.DockerImage, | ||||
| 		c.OpConfig.ConnectionPool.Image) | ||||
| 
 | ||||
| 	effectiveSchema := util.Coalesce( | ||||
| 		spec.ConnectionPool.Schema, | ||||
| 		c.OpConfig.ConnectionPool.Schema) | ||||
| 
 | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("could not generate resource requirements: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	secretSelector := func(key string) *v1.SecretKeySelector { | ||||
| 		effectiveUser := util.Coalesce( | ||||
| 			spec.ConnectionPool.User, | ||||
| 			c.OpConfig.ConnectionPool.User) | ||||
| 
 | ||||
| 		return &v1.SecretKeySelector{ | ||||
| 			LocalObjectReference: v1.LocalObjectReference{ | ||||
| 				Name: c.credentialSecretName(c.OpConfig.ConnectionPool.User), | ||||
| 				Name: c.credentialSecretName(effectiveUser), | ||||
| 			}, | ||||
| 			Key: key, | ||||
| 		} | ||||
|  | @ -1853,10 +1861,8 @@ func (c *Cluster) generateConnPoolPodTemplate(spec *acidv1.PostgresSpec) ( | |||
| 		// the convention is to use the same schema name as
 | ||||
| 		// connection pool username
 | ||||
| 		{ | ||||
| 			Name: "PGSCHEMA", | ||||
| 			ValueFrom: &v1.EnvVarSource{ | ||||
| 				SecretKeyRef: secretSelector("username"), | ||||
| 			}, | ||||
| 			Name:  "PGSCHEMA", | ||||
| 			Value: effectiveSchema, | ||||
| 		}, | ||||
| 		{ | ||||
| 			Name: "PGPASSWORD", | ||||
|  |  | |||
|  | @ -188,8 +188,7 @@ func (c *Cluster) deleteConnectionPool() (err error) { | |||
| 		return fmt.Errorf("could not delete deployment: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	c.logger.Infof("Connection pool deployment %q has been deleted", | ||||
| 		util.NameFromMeta(deployment.ObjectMeta)) | ||||
| 	c.logger.Infof("Connection pool deployment %q has been deleted", deploymentName) | ||||
| 
 | ||||
| 	// Repeat the same for the service object
 | ||||
| 	service := c.ConnectionPool.Service | ||||
|  | @ -211,8 +210,7 @@ func (c *Cluster) deleteConnectionPool() (err error) { | |||
| 		return fmt.Errorf("could not delete service: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	c.logger.Infof("Connection pool service %q has been deleted", | ||||
| 		util.NameFromMeta(deployment.ObjectMeta)) | ||||
| 	c.logger.Infof("Connection pool service %q has been deleted", serviceName) | ||||
| 
 | ||||
| 	c.ConnectionPool = nil | ||||
| 	return nil | ||||
|  |  | |||
|  | @ -469,7 +469,7 @@ func (c *Cluster) syncRoles() (err error) { | |||
| 		connPoolUser := c.systemUsers[constants.ConnectionPoolUserKeyName] | ||||
| 		userNames = append(userNames, connPoolUser.Name) | ||||
| 
 | ||||
| 		if _, exists := c.pgUsers[constants.ConnectionPoolUserKeyName]; !exists { | ||||
| 		if _, exists := c.pgUsers[connPoolUser.Name]; !exists { | ||||
| 			c.pgUsers[connPoolUser.Name] = connPoolUser | ||||
| 		} | ||||
| 	} | ||||
|  | @ -608,6 +608,10 @@ func (c *Cluster) syncLogicalBackupJob() error { | |||
| } | ||||
| 
 | ||||
| func (c *Cluster) syncConnectionPool(oldSpec, newSpec *acidv1.Postgresql, lookup InstallFunction) error { | ||||
| 	if c.ConnectionPool == nil { | ||||
| 		c.ConnectionPool = &ConnectionPoolObjects{} | ||||
| 	} | ||||
| 
 | ||||
| 	newNeedConnPool := c.needConnectionPoolWorker(&newSpec.Spec) | ||||
| 	oldNeedConnPool := c.needConnectionPoolWorker(&oldSpec.Spec) | ||||
| 
 | ||||
|  | @ -621,7 +625,7 @@ func (c *Cluster) syncConnectionPool(oldSpec, newSpec *acidv1.Postgresql, lookup | |||
| 
 | ||||
| 		// in this case also do not forget to install lookup function as for
 | ||||
| 		// creating cluster
 | ||||
| 		if !oldNeedConnPool { | ||||
| 		if !oldNeedConnPool || !c.ConnectionPool.LookupFunction { | ||||
| 			newConnPool := newSpec.Spec.ConnectionPool | ||||
| 
 | ||||
| 			specSchema := "" | ||||
|  | @ -676,11 +680,6 @@ func (c *Cluster) syncConnectionPool(oldSpec, newSpec *acidv1.Postgresql, lookup | |||
| // service is missing, create it. After checking, also remember an object for
 | ||||
| // the future references.
 | ||||
| func (c *Cluster) syncConnectionPoolWorker(oldSpec, newSpec *acidv1.Postgresql) error { | ||||
| 	if c.ConnectionPool == nil { | ||||
| 		c.logger.Warning("Connection pool resources are empty") | ||||
| 		c.ConnectionPool = &ConnectionPoolObjects{} | ||||
| 	} | ||||
| 
 | ||||
| 	deployment, err := c.KubeClient. | ||||
| 		Deployments(c.Namespace). | ||||
| 		Get(c.connPoolName(), metav1.GetOptions{}) | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue