define ownership between operator and clusters via annotation (#802)

* define ownership between operator and postgres clusters
* add documentation
* add unit test
This commit is contained in:
Felix Kunde 2020-03-17 16:34:31 +01:00 committed by GitHub
parent d666c52172
commit cf829df1a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 160 additions and 43 deletions

View File

@ -77,7 +77,7 @@ func main() {
log.Fatalf("couldn't get REST config: %v", err)
}
c := controller.NewController(&config)
c := controller.NewController(&config, "")
c.Run(stop, wg)

View File

@ -95,6 +95,34 @@ lacks access rights to any of them (except K8s system namespaces like
'list pods' execute at the cluster scope and fail at the first violation of
access rights.
## Operators with defined ownership of certain Postgres clusters
By default, multiple operators can only run together in one K8s cluster when
isolated into their [own namespaces](administrator.md#specify-the-namespace-to-watch).
But, it is also possible to define ownership between operator instances and
Postgres clusters running all in the same namespace or K8s cluster without
interfering.
First, define the [`CONTROLLER_ID`](../../manifests/postgres-operator.yaml#L38)
environment variable in the operator deployment manifest. Then specify the ID
in every Postgres cluster manifest you want this operator to watch using the
`"acid.zalan.do/controller"` annotation:
```yaml
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: demo-cluster
annotations:
"acid.zalan.do/controller": "second-operator"
spec:
...
```
Every other Postgres cluster which lacks the annotation will be ignored by this
operator. Conversely, operators without a defined `CONTROLLER_ID` will ignore
clusters with defined ownership of another operator.
## Role-based access control for the operator
The manifest [`operator-service-account-rbac.yaml`](../manifests/operator-service-account-rbac.yaml)

View File

@ -4,6 +4,8 @@ metadata:
name: acid-test-cluster
# labels:
# environment: demo
# annotations:
# "acid.zalan.do/controller": "second-operator"
spec:
dockerImage: registry.opensource.zalan.do/acid/spilo-12:1.6-p2
teamId: "acid"

View File

@ -35,3 +35,6 @@ spec:
# In order to use the CRD OperatorConfiguration instead, uncomment these lines and comment out the two lines above
# - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT
# value: postgresql-operator-default-configuration
# Define an ID to isolate controllers from each other
# - name: CONTROLLER_ID
# value: "second-operator"

View File

@ -13,6 +13,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/apiserver"
"github.com/zalando/postgres-operator/pkg/cluster"
"github.com/zalando/postgres-operator/pkg/spec"
@ -36,6 +37,7 @@ type Controller struct {
stopCh chan struct{}
controllerID string
curWorkerID uint32 //initialized with 0
curWorkerCluster sync.Map
clusterWorkers map[spec.NamespacedName]uint32
@ -61,13 +63,14 @@ type Controller struct {
}
// NewController creates a new controller
func NewController(controllerConfig *spec.ControllerConfig) *Controller {
func NewController(controllerConfig *spec.ControllerConfig, controllerId string) *Controller {
logger := logrus.New()
c := &Controller{
config: *controllerConfig,
opConfig: &config.Config{},
logger: logger.WithField("pkg", "controller"),
controllerID: controllerId,
curWorkerCluster: sync.Map{},
clusterWorkers: make(map[spec.NamespacedName]uint32),
clusters: make(map[spec.NamespacedName]*cluster.Cluster),
@ -239,6 +242,7 @@ func (c *Controller) initRoleBinding() {
func (c *Controller) initController() {
c.initClients()
c.controllerID = os.Getenv("CONTROLLER_ID")
if configObjectName := os.Getenv("POSTGRES_OPERATOR_CONFIGURATION_OBJECT"); configObjectName != "" {
if err := c.createConfigurationCRD(c.opConfig.EnableCRDValidation); err != nil {
@ -412,3 +416,16 @@ func (c *Controller) getEffectiveNamespace(namespaceFromEnvironment, namespaceFr
return namespace
}
// hasOwnership returns true if the controller is the "owner" of the postgresql.
// Whether it's owner is determined by the value of 'acid.zalan.do/controller'
// annotation. If the value matches the controllerID then it owns it, or if the
// controllerID is "" and there's no annotation set.
func (c *Controller) hasOwnership(postgresql *acidv1.Postgresql) bool {
if postgresql.Annotations != nil {
if owner, ok := postgresql.Annotations[constants.PostgresqlControllerAnnotationKey]; ok {
return owner == c.controllerID
}
}
return c.controllerID == ""
}

View File

@ -4,7 +4,7 @@ import (
"testing"
"github.com/zalando/postgres-operator/pkg/spec"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -13,10 +13,10 @@ const (
readyValue = "ready"
)
func initializeController() *Controller {
var c = NewController(&spec.ControllerConfig{})
c.opConfig.NodeReadinessLabel = map[string]string{readyLabel: readyValue}
return c
func newNodeTestController() *Controller {
var controller = NewController(&spec.ControllerConfig{}, "node-test")
controller.opConfig.NodeReadinessLabel = map[string]string{readyLabel: readyValue}
return controller
}
func makeNode(labels map[string]string, isSchedulable bool) *v1.Node {
@ -31,7 +31,7 @@ func makeNode(labels map[string]string, isSchedulable bool) *v1.Node {
}
}
var c = initializeController()
var nodeTestController = newNodeTestController()
func TestNodeIsReady(t *testing.T) {
testName := "TestNodeIsReady"
@ -57,7 +57,7 @@ func TestNodeIsReady(t *testing.T) {
},
}
for _, tt := range testTable {
if isReady := c.nodeIsReady(tt.in); isReady != tt.out {
if isReady := nodeTestController.nodeIsReady(tt.in); isReady != tt.out {
t.Errorf("%s: expected response %t doesn't match the actual %t for the node %#v",
testName, tt.out, isReady, tt.in)
}

View File

@ -40,12 +40,23 @@ func (c *Controller) clusterResync(stopCh <-chan struct{}, wg *sync.WaitGroup) {
// clusterListFunc obtains a list of all PostgreSQL clusters
func (c *Controller) listClusters(options metav1.ListOptions) (*acidv1.PostgresqlList, error) {
var pgList acidv1.PostgresqlList
// TODO: use the SharedInformer cache instead of quering Kubernetes API directly.
list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(options)
if err != nil {
c.logger.Errorf("could not list postgresql objects: %v", err)
}
return list, err
if c.controllerID != "" {
c.logger.Debugf("watch only clusters with controllerID %q", c.controllerID)
}
for _, pg := range list.Items {
if pg.Error == "" && c.hasOwnership(&pg) {
pgList.Items = append(pgList.Items, pg)
}
}
return &pgList, err
}
// clusterListAndSync lists all manifests and decides whether to run the sync or repair.
@ -455,41 +466,48 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1.
}
func (c *Controller) postgresqlAdd(obj interface{}) {
pg, ok := obj.(*acidv1.Postgresql)
if !ok {
c.logger.Errorf("could not cast to postgresql spec")
return
}
pg := c.postgresqlCheck(obj)
if pg != nil {
// We will not get multiple Add events for the same cluster
c.queueClusterEvent(nil, pg, EventAdd)
}
return
}
func (c *Controller) postgresqlUpdate(prev, cur interface{}) {
pgOld, ok := prev.(*acidv1.Postgresql)
if !ok {
c.logger.Errorf("could not cast to postgresql spec")
}
pgNew, ok := cur.(*acidv1.Postgresql)
if !ok {
c.logger.Errorf("could not cast to postgresql spec")
}
pgOld := c.postgresqlCheck(prev)
pgNew := c.postgresqlCheck(cur)
if pgOld != nil && pgNew != nil {
// Avoid the inifinite recursion for status updates
if reflect.DeepEqual(pgOld.Spec, pgNew.Spec) {
return
}
c.queueClusterEvent(pgOld, pgNew, EventUpdate)
}
return
}
func (c *Controller) postgresqlDelete(obj interface{}) {
pg := c.postgresqlCheck(obj)
if pg != nil {
c.queueClusterEvent(pg, nil, EventDelete)
}
return
}
func (c *Controller) postgresqlCheck(obj interface{}) *acidv1.Postgresql {
pg, ok := obj.(*acidv1.Postgresql)
if !ok {
c.logger.Errorf("could not cast to postgresql spec")
return
return nil
}
c.queueClusterEvent(pg, nil, EventDelete)
if !c.hasOwnership(pg) {
return nil
}
return pg
}
/*

View File

@ -1,10 +1,12 @@
package controller
import (
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/spec"
"reflect"
"testing"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/spec"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
@ -12,9 +14,55 @@ var (
False = false
)
func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) {
c := NewController(&spec.ControllerConfig{})
func newPostgresqlTestController() *Controller {
controller := NewController(&spec.ControllerConfig{}, "postgresql-test")
return controller
}
var postgresqlTestController = newPostgresqlTestController()
func TestControllerOwnershipOnPostgresql(t *testing.T) {
tests := []struct {
name string
pg *acidv1.Postgresql
owned bool
error string
}{
{
"Postgres cluster with defined ownership of mocked controller",
&acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{"acid.zalan.do/controller": "postgresql-test"},
},
},
True,
"Postgres cluster should be owned by operator, but controller says no",
},
{
"Postgres cluster with defined ownership of another controller",
&acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{"acid.zalan.do/controller": "stups-test"},
},
},
False,
"Postgres cluster should be owned by another operator, but controller say yes",
},
{
"Test Postgres cluster without defined ownership",
&acidv1.Postgresql{},
False,
"Postgres cluster should be owned by operator with empty controller ID, but controller says yes",
},
}
for _, tt := range tests {
if postgresqlTestController.hasOwnership(tt.pg) != tt.owned {
t.Errorf("%s: %v", tt.name, tt.error)
}
}
}
func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) {
tests := []struct {
name string
in *acidv1.PostgresSpec
@ -36,7 +84,7 @@ func TestMergeDeprecatedPostgreSQLSpecParameters(t *testing.T) {
},
}
for _, tt := range tests {
result := c.mergeDeprecatedPostgreSQLSpecParameters(tt.in)
result := postgresqlTestController.mergeDeprecatedPostgreSQLSpecParameters(tt.in)
if !reflect.DeepEqual(result, tt.out) {
t.Errorf("%s: %v", tt.name, tt.error)
}

View File

@ -17,8 +17,8 @@ const (
testInfrastructureRolesSecretName = "infrastructureroles-test"
)
func newMockController() *Controller {
controller := NewController(&spec.ControllerConfig{})
func newUtilTestController() *Controller {
controller := NewController(&spec.ControllerConfig{}, "util-test")
controller.opConfig.ClusterNameLabel = "cluster-name"
controller.opConfig.InfrastructureRolesSecretName =
spec.NamespacedName{Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesSecretName}
@ -27,7 +27,7 @@ func newMockController() *Controller {
return controller
}
var mockController = newMockController()
var utilTestController = newUtilTestController()
func TestPodClusterName(t *testing.T) {
var testTable = []struct {
@ -43,7 +43,7 @@ func TestPodClusterName(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.NamespaceDefault,
Labels: map[string]string{
mockController.opConfig.ClusterNameLabel: "testcluster",
utilTestController.opConfig.ClusterNameLabel: "testcluster",
},
},
},
@ -51,7 +51,7 @@ func TestPodClusterName(t *testing.T) {
},
}
for _, test := range testTable {
resp := mockController.podClusterName(test.in)
resp := utilTestController.podClusterName(test.in)
if resp != test.expected {
t.Errorf("expected response %v does not match the actual %v", test.expected, resp)
}
@ -73,7 +73,7 @@ func TestClusterWorkerID(t *testing.T) {
},
}
for _, test := range testTable {
resp := mockController.clusterWorkerID(test.in)
resp := utilTestController.clusterWorkerID(test.in)
if resp != test.expected {
t.Errorf("expected response %v does not match the actual %v", test.expected, resp)
}
@ -116,7 +116,7 @@ func TestGetInfrastructureRoles(t *testing.T) {
},
}
for _, test := range testTable {
roles, err := mockController.getInfrastructureRoles(&test.secretName)
roles, err := utilTestController.getInfrastructureRoles(&test.secretName)
if err != test.expectedError {
if err != nil && test.expectedError != nil && err.Error() == test.expectedError.Error() {
continue

View File

@ -7,4 +7,5 @@ const (
ElbTimeoutAnnotationValue = "3600"
KubeIAmAnnotation = "iam.amazonaws.com/role"
VolumeStorateProvisionerAnnotation = "pv.kubernetes.io/provisioned-by"
PostgresqlControllerAnnotationKey = "acid.zalan.do/controller"
)