Rename the configmap 'namespace' entry to avoid confusion with the map's owm namespace
This commit is contained in:
parent
dcf637d4ea
commit
ea84f9d577
|
|
@ -110,7 +110,7 @@ func (c *Cluster) preScaleDown(newStatefulSet *v1beta1.StatefulSet) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
podName := fmt.Sprintf("%s-0", c.Statefulset.Name)
|
podName := fmt.Sprintf("%s-0", c.Statefulset.Name)
|
||||||
masterCandidatePod, err := c.KubeClient.Pods(c.OpConfig.Namespace).Get(podName, metav1.GetOptions{})
|
masterCandidatePod, err := c.KubeClient.Pods(c.OpConfig.WatchedNamespace).Get(podName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not get master candidate pod: %v", err)
|
return fmt.Errorf("could not get master candidate pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -97,8 +97,8 @@ func (c *Controller) initOperatorConfig() {
|
||||||
c.logger.Infoln("no ConfigMap specified. Loading default values")
|
c.logger.Infoln("no ConfigMap specified. Loading default values")
|
||||||
}
|
}
|
||||||
|
|
||||||
if configMapData["namespace"] == "" { // Namespace in ConfigMap has priority over env var
|
if configMapData["watched_namespace"] == "" { // Namespace in ConfigMap has priority over env var
|
||||||
configMapData["namespace"] = c.config.Namespace
|
configMapData["watched_namespace"] = c.config.Namespace
|
||||||
}
|
}
|
||||||
if c.config.NoDatabaseAccess {
|
if c.config.NoDatabaseAccess {
|
||||||
configMapData["enable_database_access"] = "false"
|
configMapData["enable_database_access"] = "false"
|
||||||
|
|
|
||||||
|
|
@ -80,7 +80,7 @@ func (c *Controller) moveMasterPodsOffNode(node *v1.Node) {
|
||||||
opts := metav1.ListOptions{
|
opts := metav1.ListOptions{
|
||||||
LabelSelector: labels.Set(c.opConfig.ClusterLabels).String(),
|
LabelSelector: labels.Set(c.opConfig.ClusterLabels).String(),
|
||||||
}
|
}
|
||||||
podList, err := c.KubeClient.Pods(c.opConfig.Namespace).List(opts)
|
podList, err := c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Errorf("could not fetch list of the pods: %v", err)
|
c.logger.Errorf("could not fetch list of the pods: %v", err)
|
||||||
return
|
return
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ func (c *Controller) podListFunc(options metav1.ListOptions) (runtime.Object, er
|
||||||
TimeoutSeconds: options.TimeoutSeconds,
|
TimeoutSeconds: options.TimeoutSeconds,
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.KubeClient.Pods(c.opConfig.Namespace).List(opts)
|
return c.KubeClient.Pods(c.opConfig.WatchedNamespace).List(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface, error) {
|
func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface, error) {
|
||||||
|
|
@ -27,7 +27,7 @@ func (c *Controller) podWatchFunc(options metav1.ListOptions) (watch.Interface,
|
||||||
TimeoutSeconds: options.TimeoutSeconds,
|
TimeoutSeconds: options.TimeoutSeconds,
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.KubeClient.Pods(c.opConfig.Namespace).Watch(opts)
|
return c.KubeClient.Pods(c.opConfig.WatchedNamespace).Watch(opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event spec.PodEvent) {
|
func (c *Controller) dispatchPodEvent(clusterName spec.NamespacedName, event spec.PodEvent) {
|
||||||
|
|
|
||||||
|
|
@ -46,7 +46,7 @@ func (c *Controller) clusterListFunc(options metav1.ListOptions) (runtime.Object
|
||||||
|
|
||||||
req := c.KubeClient.CRDREST.
|
req := c.KubeClient.CRDREST.
|
||||||
Get().
|
Get().
|
||||||
Namespace(c.opConfig.Namespace).
|
Namespace(c.opConfig.WatchedNamespace).
|
||||||
Resource(constants.CRDResource).
|
Resource(constants.CRDResource).
|
||||||
VersionedParams(&options, metav1.ParameterCodec)
|
VersionedParams(&options, metav1.ParameterCodec)
|
||||||
|
|
||||||
|
|
@ -110,7 +110,7 @@ func (c *Controller) clusterWatchFunc(options metav1.ListOptions) (watch.Interfa
|
||||||
options.Watch = true
|
options.Watch = true
|
||||||
r, err := c.KubeClient.CRDREST.
|
r, err := c.KubeClient.CRDREST.
|
||||||
Get().
|
Get().
|
||||||
Namespace(c.opConfig.Namespace).
|
Namespace(c.opConfig.WatchedNamespace).
|
||||||
Resource(constants.CRDResource).
|
Resource(constants.CRDResource).
|
||||||
VersionedParams(&options, metav1.ParameterCodec).
|
VersionedParams(&options, metav1.ParameterCodec).
|
||||||
FieldsSelectorParam(nil).
|
FieldsSelectorParam(nil).
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ import (
|
||||||
// ClusterStatus provides status of the cluster
|
// ClusterStatus provides status of the cluster
|
||||||
func (c *Controller) ClusterStatus(team, cluster string) (*spec.ClusterStatus, error) {
|
func (c *Controller) ClusterStatus(team, cluster string) (*spec.ClusterStatus, error) {
|
||||||
clusterName := spec.NamespacedName{
|
clusterName := spec.NamespacedName{
|
||||||
Namespace: c.opConfig.Namespace,
|
Namespace: c.opConfig.WatchedNamespace,
|
||||||
Name: team + "-" + cluster,
|
Name: team + "-" + cluster,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -92,7 +92,7 @@ func (c *Controller) GetStatus() *spec.ControllerStatus {
|
||||||
// ClusterLogs dumps cluster ring logs
|
// ClusterLogs dumps cluster ring logs
|
||||||
func (c *Controller) ClusterLogs(team, name string) ([]*spec.LogEntry, error) {
|
func (c *Controller) ClusterLogs(team, name string) ([]*spec.LogEntry, error) {
|
||||||
clusterName := spec.NamespacedName{
|
clusterName := spec.NamespacedName{
|
||||||
Namespace: c.opConfig.Namespace,
|
Namespace: c.opConfig.WatchedNamespace,
|
||||||
Name: team + "-" + name,
|
Name: team + "-" + name,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -214,7 +214,7 @@ func (c *Controller) WorkerStatus(workerID uint32) (*spec.WorkerStatus, error) {
|
||||||
// ClusterHistory dumps history of cluster changes
|
// ClusterHistory dumps history of cluster changes
|
||||||
func (c *Controller) ClusterHistory(team, name string) ([]*spec.Diff, error) {
|
func (c *Controller) ClusterHistory(team, name string) ([]*spec.Diff, error) {
|
||||||
clusterName := spec.NamespacedName{
|
clusterName := spec.NamespacedName{
|
||||||
Namespace: c.opConfig.Namespace,
|
Namespace: c.opConfig.WatchedNamespace,
|
||||||
Name: team + "-" + name,
|
Name: team + "-" + name,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
"github.com/zalando-incubator/postgres-operator/pkg/spec"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -66,7 +67,7 @@ type Config struct {
|
||||||
Resources
|
Resources
|
||||||
Auth
|
Auth
|
||||||
Scalyr
|
Scalyr
|
||||||
Namespace string `name:"namespace"`
|
WatchedNamespace string `name:"watched_namespace"`
|
||||||
EtcdHost string `name:"etcd_host" default:"etcd-client.default.svc.cluster.local:2379"`
|
EtcdHost string `name:"etcd_host" default:"etcd-client.default.svc.cluster.local:2379"`
|
||||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spiloprivate-9.6:1.2-p4"`
|
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spiloprivate-9.6:1.2-p4"`
|
||||||
ServiceAccountName string `name:"service_account_name" default:"operator"`
|
ServiceAccountName string `name:"service_account_name" default:"operator"`
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue