Merge branch 'master' into bump-v1.8.1

This commit is contained in:
Felix Kunde 2022-05-19 11:34:56 +02:00
commit 6aa3ce7693
16 changed files with 238 additions and 97 deletions

View File

@ -1,2 +1,2 @@
# global owners
* @sdudoladov @Jan-M @CyberDem0n @FxKu @jopadi
* @sdudoladov @Jan-M @CyberDem0n @FxKu @jopadi @idanovinda

View File

@ -1,4 +1,5 @@
Sergey Dudoladov <sergey.dudoladov@zalando.de>
Felix Kunde <felix.kunde@zalando.de>
Jan Mussler <jan.mussler@zalando.de>
Jociele Padilha <jociele.padilha@zalando.de>
Jociele Padilha <jociele.padilha@zalando.de>
Ida Novindasari <ida.novindasari@zalando.de>

View File

@ -1032,12 +1032,20 @@ func (c *Cluster) processPodEvent(obj interface{}) error {
return fmt.Errorf("could not cast to PodEvent")
}
// can only take lock when (un)registerPodSubscriber is finshed
c.podSubscribersMu.RLock()
subscriber, ok := c.podSubscribers[spec.NamespacedName(event.PodName)]
c.podSubscribersMu.RUnlock()
if ok {
subscriber <- event
select {
case subscriber <- event:
default:
// ending up here when there is no receiver on the channel (i.e. waitForPodLabel finished)
// avoids blocking channel: https://gobyexample.com/non-blocking-channel-operations
}
}
// hold lock for the time of processing the event to avoid race condition
// with unregisterPodSubscriber closing the channel (see #1876)
c.podSubscribersMu.RUnlock()
return nil
}
@ -1501,34 +1509,16 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
var err error
c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate)
var wg sync.WaitGroup
podLabelErr := make(chan error)
stopCh := make(chan struct{})
wg.Add(1)
go func() {
defer wg.Done()
ch := c.registerPodSubscriber(candidate)
defer c.unregisterPodSubscriber(candidate)
role := Master
select {
case <-stopCh:
case podLabelErr <- func() (err2 error) {
_, err2 = c.waitForPodLabel(ch, stopCh, &role)
return
}():
}
}()
ch := c.registerPodSubscriber(candidate)
defer c.unregisterPodSubscriber(candidate)
defer close(stopCh)
if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil {
c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Successfully switched over from %q to %q", curMaster.Name, candidate)
if err = <-podLabelErr; err != nil {
_, err = c.waitForPodLabel(ch, stopCh, nil)
if err != nil {
err = fmt.Errorf("could not get master pod label: %v", err)
}
} else {
@ -1536,14 +1526,6 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err)
}
// signal the role label waiting goroutine to close the shop and go home
close(stopCh)
// wait until the goroutine terminates, since unregisterPodSubscriber
// must be called before the outer return; otherwise we risk subscribing to the same pod twice.
wg.Wait()
// close the label waiting channel no sooner than the waiting goroutine terminates.
close(podLabelErr)
return err
}

View File

@ -940,7 +940,6 @@ func (c *Cluster) generateSpiloPodEnvVars(
func appendEnvVars(envs []v1.EnvVar, appEnv ...v1.EnvVar) []v1.EnvVar {
collectedEnvs := envs
for _, env := range appEnv {
env.Name = strings.ToUpper(env.Name)
if !isEnvVarPresent(collectedEnvs, env.Name) {
collectedEnvs = append(collectedEnvs, env)
}
@ -950,7 +949,7 @@ func appendEnvVars(envs []v1.EnvVar, appEnv ...v1.EnvVar) []v1.EnvVar {
func isEnvVarPresent(envs []v1.EnvVar, key string) bool {
for _, env := range envs {
if env.Name == key {
if strings.EqualFold(env.Name, key) {
return true
}
}

View File

@ -504,7 +504,7 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
expectedS3BucketConfigMap := []ExpectedValue{
{
envIndex: 17,
envVarConstant: "WAL_S3_BUCKET",
envVarConstant: "wal_s3_bucket",
envVarValue: "global-s3-bucket-configmap",
},
}
@ -518,7 +518,7 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
expectedCustomVariableSecret := []ExpectedValue{
{
envIndex: 16,
envVarConstant: "CUSTOM_VARIABLE",
envVarConstant: "custom_variable",
envVarValueRef: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
@ -532,7 +532,7 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
expectedCustomVariableConfigMap := []ExpectedValue{
{
envIndex: 16,
envVarConstant: "CUSTOM_VARIABLE",
envVarConstant: "custom_variable",
envVarValue: "configmap-test",
},
}
@ -573,14 +573,14 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
},
{
envIndex: 20,
envVarConstant: "CLONE_AWS_ENDPOINT",
envVarConstant: "clone_aws_endpoint",
envVarValue: "s3.eu-west-1.amazonaws.com",
},
}
expectedCloneEnvSecret := []ExpectedValue{
{
envIndex: 20,
envVarConstant: "CLONE_AWS_ACCESS_KEY_ID",
envVarConstant: "clone_aws_access_key_id",
envVarValueRef: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
@ -599,7 +599,7 @@ func TestGenerateSpiloPodEnvVars(t *testing.T) {
},
{
envIndex: 20,
envVarConstant: "STANDBY_GOOGLE_APPLICATION_CREDENTIALS",
envVarConstant: "standby_google_application_credentials",
envVarValueRef: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{

View File

@ -67,7 +67,7 @@ func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error {
return fmt.Errorf("could not form patch for pod's rolling update flag: %v", err)
}
err = retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
err = retryutil.Retry(1*time.Second, 5*time.Second,
func() (bool, error) {
_, err2 := c.KubeClient.Pods(pod.Namespace).Patch(
context.TODO(),
@ -151,12 +151,13 @@ func (c *Cluster) unregisterPodSubscriber(podName spec.NamespacedName) {
c.podSubscribersMu.Lock()
defer c.podSubscribersMu.Unlock()
if _, ok := c.podSubscribers[podName]; !ok {
ch, ok := c.podSubscribers[podName]
if !ok {
panic("subscriber for pod '" + podName.String() + "' is not found")
}
close(c.podSubscribers[podName])
delete(c.podSubscribers, podName)
close(ch)
}
func (c *Cluster) registerPodSubscriber(podName spec.NamespacedName) chan PodEvent {
@ -399,11 +400,12 @@ func (c *Cluster) getPatroniMemberData(pod *v1.Pod) (patroni.MemberData, error)
}
func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
stopCh := make(chan struct{})
ch := c.registerPodSubscriber(podName)
defer c.unregisterPodSubscriber(podName)
stopChan := make(chan struct{})
defer close(stopCh)
err := retryutil.Retry(c.OpConfig.PatroniAPICheckInterval, c.OpConfig.PatroniAPICheckTimeout,
err := retryutil.Retry(1*time.Second, 5*time.Second,
func() (bool, error) {
err2 := c.KubeClient.Pods(podName.Namespace).Delete(
context.TODO(),
@ -421,7 +423,7 @@ func (c *Cluster) recreatePod(podName spec.NamespacedName) (*v1.Pod, error) {
if err := c.waitForPodDeletion(ch); err != nil {
return nil, err
}
pod, err := c.waitForPodLabel(ch, stopChan, nil)
pod, err := c.waitForPodLabel(ch, stopCh, nil)
if err != nil {
return nil, err
}
@ -446,7 +448,7 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp
continue
}
podName := util.NameFromMeta(pod.ObjectMeta)
podName := util.NameFromMeta(pods[i].ObjectMeta)
newPod, err := c.recreatePod(podName)
if err != nil {
return fmt.Errorf("could not recreate replica pod %q: %v", util.NameFromMeta(pod.ObjectMeta), err)
@ -520,13 +522,13 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
// if sync_standby replicas were found assume synchronous_mode is enabled and ignore other candidates list
if len(syncCandidates) > 0 {
sort.Slice(syncCandidates, func(i, j int) bool {
return util.IntFromIntStr(syncCandidates[i].Lag) < util.IntFromIntStr(syncCandidates[j].Lag)
return syncCandidates[i].Lag < syncCandidates[j].Lag
})
return spec.NamespacedName{Namespace: master.Namespace, Name: syncCandidates[0].Name}, nil
}
if len(candidates) > 0 {
sort.Slice(candidates, func(i, j int) bool {
return util.IntFromIntStr(candidates[i].Lag) < util.IntFromIntStr(candidates[j].Lag)
return candidates[i].Lag < candidates[j].Lag
})
return spec.NamespacedName{Namespace: master.Namespace, Name: candidates[0].Name}, nil
}

View File

@ -316,7 +316,7 @@ func (c *Cluster) annotationsSet(annotations map[string]string) map[string]strin
return nil
}
func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) {
func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopCh chan struct{}, role *PostgresRole) (*v1.Pod, error) {
timeout := time.After(c.OpConfig.PodLabelWaitTimeout)
for {
select {
@ -332,7 +332,7 @@ func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{
}
case <-timeout:
return nil, fmt.Errorf("pod label wait timeout")
case <-stopChan:
case <-stopCh:
return nil, fmt.Errorf("pod label wait cancelled")
}
}

View File

@ -451,7 +451,7 @@ func (c *Controller) Run(stopCh <-chan struct{}, wg *sync.WaitGroup) {
panic("could not acquire initial list of clusters")
}
wg.Add(5)
wg.Add(5 + util.Bool2Int(c.opConfig.EnablePostgresTeamCRD))
go c.runPodInformer(stopCh, wg)
go c.runPostgresqlInformer(stopCh, wg)
go c.clusterResync(stopCh, wg)

View File

@ -225,7 +225,7 @@ func (c *Controller) processEvent(event ClusterEvent) {
switch event.EventType {
case EventAdd:
if clusterFound {
lg.Infof("recieved add event for already existing Postgres cluster")
lg.Infof("received add event for already existing Postgres cluster")
return
}

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"net"
"net/http"
"strconv"
@ -16,7 +17,6 @@ import (
"github.com/sirupsen/logrus"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
@ -185,11 +185,27 @@ type ClusterMembers struct {
// ClusterMember cluster member data from Patroni API
type ClusterMember struct {
Name string `json:"name"`
Role string `json:"role"`
State string `json:"state"`
Timeline int `json:"timeline"`
Lag intstr.IntOrString `json:"lag,omitempty"`
Name string `json:"name"`
Role string `json:"role"`
State string `json:"state"`
Timeline int `json:"timeline"`
Lag ReplicationLag `json:"lag,omitempty"`
}
type ReplicationLag uint64
// UnmarshalJSON converts member lag (can be int or string) into uint64
func (rl *ReplicationLag) UnmarshalJSON(data []byte) error {
var lagUInt64 uint64
if data[0] == '"' {
*rl = math.MaxUint64
return nil
}
if err := json.Unmarshal(data, &lagUInt64); err != nil {
return err
}
*rl = ReplicationLag(lagUInt64)
return nil
}
// MemberDataPatroni child element

View File

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"math"
"net/http"
"reflect"
"testing"
@ -15,7 +16,6 @@ import (
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
var logger = logrus.New().WithField("test", "patroni")
@ -101,16 +101,27 @@ func TestGetClusterMembers(t *testing.T) {
Role: "sync_standby",
State: "running",
Timeline: 1,
Lag: intstr.IntOrString{IntVal: 0},
Lag: 0,
}, {
Name: "acid-test-cluster-2",
Role: "replica",
State: "running",
Timeline: 1,
Lag: intstr.IntOrString{Type: 1, StrVal: "unknown"},
Lag: math.MaxUint64,
}, {
Name: "acid-test-cluster-3",
Role: "replica",
State: "running",
Timeline: 1,
Lag: 3000000000,
}}
json := `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "sync_standby", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": "unknown"}]}`
json := `{"members": [
{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1},
{"name": "acid-test-cluster-1", "role": "sync_standby", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0},
{"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": "unknown"},
{"name": "acid-test-cluster-3", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 3000000000}
]}`
r := ioutil.NopCloser(bytes.NewReader([]byte(json)))
response := http.Response{

View File

@ -8,7 +8,6 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
"math"
"math/big"
"math/rand"
"reflect"
@ -324,18 +323,18 @@ func testNil(values ...*int32) bool {
return false
}
// Convert int to IntOrString type
// ToIntStr converts int to IntOrString type
func ToIntStr(val int) *intstr.IntOrString {
b := intstr.FromInt(val)
return &b
}
// Get int from IntOrString and return max int if string
func IntFromIntStr(intOrStr intstr.IntOrString) int {
if intOrStr.Type == 1 {
return math.MaxInt
// Bool2Int converts bool to int
func Bool2Int(flag bool) int {
if flag {
return 1
}
return intOrStr.IntValue()
return 0
}
// MaxInt32 : Return maximum of two integers provided via pointers. If one value

View File

@ -51,7 +51,23 @@ postgresqls
th(style='width: 140px') CPU
th(style='width: 130px') Memory
th(style='width: 100px') Size
th(style='width: 120px') Cost/Month
th(style='width: 100px') IOPS
th(style='width: 100px') Throughput
th(style='width: 120px')
.tooltip(style='width: 120px')
| Cost/Month
.tooltiptext
strong Cost = MAX(CPU, Memory) + rest
br
| 1 CPU core : 42.09$
br
| 1GB memory: 10.5225$
br
| 1GB volume: 0.0952$
br
| IOPS (-3000 baseline): 0.006$
br
| Throughput (-125 baseline): 0.0476$
th(stlye='width: 120px')
tbody
@ -69,6 +85,8 @@ postgresqls
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td { iops }
td { throughput }
td { calcCosts(nodes, cpu, memory, volume_size, iops, throughput) }$
td
@ -132,7 +150,23 @@ postgresqls
th(style='width: 140px') CPU
th(style='width: 130px') Memory
th(style='width: 100px') Size
th(style='width: 120px') Cost/Month
th(style='width: 100px') IOPS
th(style='width: 100px') Throughput
th(style='width: 120px')
.tooltip(style='width: 120px')
| Cost/Month
.tooltiptext
strong Cost = MAX(CPU, Memory) + rest
br
| 1 CPU core : 42.09$
br
| 1GB memory: 10.5225$
br
| 1GB volume: 0.0952$
br
| IOPS (-3000 baseline): 0.006$
br
| Throughput (-125 baseline): 0.0476$
th(stlye='width: 120px')
tbody
@ -152,6 +186,8 @@ postgresqls
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
td { volume_size }
td { iops }
td { throughput }
td { calcCosts(nodes, cpu, memory, volume_size, iops, throughput) }$
td
@ -229,28 +265,44 @@ postgresqls
const calcCosts = this.calcCosts = (nodes, cpu, memory, disk, iops, throughput) => {
podcount = Math.max(nodes, opts.config.min_pods)
corecost = toCores(cpu) * opts.config.cost_core
memorycost = toMemory(memory) * opts.config.cost_memory
corecost = toCores(cpu) * opts.config.cost_core * 30.5 * 24
memorycost = toMemory(memory) * opts.config.cost_memory * 30.5 * 24
diskcost = toDisk(disk) * opts.config.cost_ebs
iopscost = 0
if (iops !== undefined && iops > 3000) {
iopscost = (iops - 3000) * opts.config.cost_iops
if (iops !== undefined && iops > opts.config.free_iops) {
if (iops > opts.config.limit_iops) {
iops = opts.config.limit_iops
}
iopscost = (iops - opts.config.free_iops) * opts.config.cost_iops
}
throughputcost = 0
if (throughput !== undefined && throughput > 125) {
throughputcost = (throughput - 125) * opts.config.cost_throughput
if (throughput !== undefined && throughput > opts.config.free_throughput) {
if (throughput > opts.config.limit_throughput) {
throughput = opts.config.limit_throughput
}
throughputcost = (throughput - opts.config.free_throughput) * opts.config.cost_throughput
}
costs = podcount * (corecost + memorycost + diskcost + iopscost + throughputcost)
costs = podcount * (Math.max(corecost, memorycost) + diskcost + iopscost + throughputcost)
return costs.toFixed(2)
}
const toDisk = this.toDisk = value => {
if(value.endsWith("Gi")) {
if(value.endsWith("Mi")) {
value = value.substring(0, value.length-2)
value = Number(value) / 1000.
return value
}
else if(value.endsWith("Gi")) {
value = value.substring(0, value.length-2)
value = Number(value)
return value
}
else if(value.endsWith("Ti")) {
value = value.substring(0, value.length-2)
value = Number(value) * 1000
return value
}
return value
}

View File

@ -67,6 +67,10 @@ spec:
"cost_throughput": 0.0476,
"cost_core": 0.0575,
"cost_memory": 0.014375,
"free_iops": 3000,
"free_throughput": 125,
"limit_iops": 16000,
"limit_throughput": 1000,
"postgresql_versions": [
"14",
"13",

View File

@ -82,12 +82,16 @@ OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-nam
OPERATOR_UI_CONFIG = getenv('OPERATOR_UI_CONFIG', '{}')
OPERATOR_UI_MAINTENANCE_CHECK = getenv('OPERATOR_UI_MAINTENANCE_CHECK', '{}')
READ_ONLY_MODE = getenv('READ_ONLY_MODE', False) in [True, 'true']
RESOURCES_VISIBLE = getenv('RESOURCES_VISIBLE', True)
SPILO_S3_BACKUP_PREFIX = getenv('SPILO_S3_BACKUP_PREFIX', 'spilo/')
SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid')
TARGET_NAMESPACE = getenv('TARGET_NAMESPACE')
GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False)
MIN_PODS= getenv('MIN_PODS', 2)
RESOURCES_VISIBLE = getenv('RESOURCES_VISIBLE', True)
CUSTOM_MESSAGE_RED = getenv('CUSTOM_MESSAGE_RED', '')
APPLICATION_DEPLOYMENT_DOCS = getenv('APPLICATION_DEPLOYMENT_DOCS', '')
CONNECTION_DOCS = getenv('CONNECTION_DOCS', '')
# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/ (e.g. Europe - Franfurt)
COST_EBS = float(getenv('COST_EBS', 0.0952)) # GB per month
@ -95,8 +99,19 @@ COST_IOPS = float(getenv('COST_IOPS', 0.006)) # IOPS per month above 3000 basel
COST_THROUGHPUT = float(getenv('COST_THROUGHPUT', 0.0476)) # MB/s per month above 125 MB/s baseline
# compute costs, i.e. https://www.ec2instances.info/?region=eu-central-1&selected=m5.2xlarge
COST_CORE = 30.5 * 24 * float(getenv('COST_CORE', 0.0575)) # Core per hour m5.2xlarge / 8.
COST_MEMORY = 30.5 * 24 * float(getenv('COST_MEMORY', 0.014375)) # Memory GB m5.2xlarge / 32.
COST_CORE = float(getenv('COST_CORE', 0.0575)) # Core per hour m5.2xlarge / 8.
COST_MEMORY = float(getenv('COST_MEMORY', 0.014375)) # Memory GB m5.2xlarge / 32.
# maximum and limitation of IOPS and throughput
FREE_IOPS = float(getenv('FREE_IOPS', 3000))
LIMIT_IOPS = float(getenv('LIMIT_IOPS', 16000))
FREE_THROUGHPUT = float(getenv('FREE_THROUGHPUT', 125))
LIMIT_THROUGHPUT = float(getenv('LIMIT_THROUGHPUT', 1000))
# get the default value of core and memory
DEFAULT_MEMORY = getenv('DEFAULT_MEMORY', '300Mi')
DEFAULT_MEMORY_LIMIT = getenv('DEFAULT_MEMORY_LIMIT', '300Mi')
DEFAULT_CPU = getenv('DEFAULT_CPU', '10m')
DEFAULT_CPU_LIMIT = getenv('DEFAULT_CPU_LIMIT', '300m')
WALE_S3_ENDPOINT = getenv(
'WALE_S3_ENDPOINT',
@ -304,29 +319,34 @@ DEFAULT_UI_CONFIG = {
'nat_gateways_visible': True,
'users_visible': True,
'databases_visible': True,
'resources_visible': True,
'postgresql_versions': ['11','12','13'],
'resources_visible': RESOURCES_VISIBLE,
'postgresql_versions': ['11','12','13','14'],
'dns_format_string': '{0}.{1}.{2}',
'pgui_link': '',
'static_network_whitelist': {},
'read_only_mode': READ_ONLY_MODE,
'superuser_team': SUPERUSER_TEAM,
'target_namespace': TARGET_NAMESPACE,
'connection_docs': CONNECTION_DOCS,
'application_deployment_docs': APPLICATION_DEPLOYMENT_DOCS,
'cost_ebs': COST_EBS,
'cost_iops': COST_IOPS,
'cost_throughput': COST_THROUGHPUT,
'cost_core': COST_CORE,
'cost_memory': COST_MEMORY,
'min_pods': MIN_PODS
'min_pods': MIN_PODS,
'free_iops': FREE_IOPS,
'free_throughput': FREE_THROUGHPUT,
'limit_iops': LIMIT_IOPS,
'limit_throughput': LIMIT_THROUGHPUT
}
@app.route('/config')
@authorize
def get_config():
config = loads(OPERATOR_UI_CONFIG) or DEFAULT_UI_CONFIG
config['read_only_mode'] = READ_ONLY_MODE
config['resources_visible'] = RESOURCES_VISIBLE
config['superuser_team'] = SUPERUSER_TEAM
config['target_namespace'] = TARGET_NAMESPACE
config['min_pods'] = MIN_PODS
config = DEFAULT_UI_CONFIG.copy()
config.update(loads(OPERATOR_UI_CONFIG))
config['namespaces'] = (
[TARGET_NAMESPACE]
@ -961,11 +981,13 @@ def get_operator_get_logs(worker: int):
@app.route('/operator/clusters/<namespace>/<cluster>/logs')
@authorize
def get_operator_get_logs_per_cluster(namespace: str, cluster: str):
team, cluster_name = cluster.split('-', 1)
# team id might contain hyphens, try to find correct team name
user_teams = get_teams_for_user(session.get('user_name', ''))
for user_team in user_teams:
if cluster.find(user_team) == 0:
if cluster.find(user_team + '-') == 0:
team = cluster[:len(user_team)]
cluster_name = cluster[len(user_team)+1:]
cluster_name = cluster[len(user_team + '-'):]
break
return proxy_operator(f'/clusters/{team}/{namespace}/{cluster_name}/logs/')

View File

@ -64,3 +64,56 @@ label {
td {
vertical-align: middle !important;
}
.tooltip {
position: relative;
display: inline-block;
opacity: 1;
font-size: 14px;
font-weight: bold;
}
.tooltip:after {
content: '?';
display: inline-block;
font-family: sans-serif;
font-weight: bold;
text-align: center;
width: 16px;
height: 16px;
font-size: 12px;
line-height: 16px;
border-radius: 12px;
padding: 0px;
color: white;
background: black;
border: 1px solid black;
}
.tooltip .tooltiptext {
visibility: hidden;
width: 250px;
background-color: white;
color: #000;
text-align: justify;
border-radius: 6px;
padding: 10px 10px;
position: absolute;
z-index: 1;
bottom: 150%;
left: 50%;
margin-left: -120px;
border: 1px solid black;
font-weight: normal;
}
.tooltip .tooltiptext::after {
content: "";
position: absolute;
top: 100%;
left: 50%;
margin-left: -5px;
border-width: 5px;
border-style: solid;
border-color: black transparent transparent transparent;
}
.tooltip:hover .tooltiptext {
visibility: visible;
}