[UI] reflect new backup paths and cluster status (#1260)

* [UI] reflect new backup paths and cluster status
This commit is contained in:
Felix Kunde 2020-12-16 15:23:06 +01:00 committed by GitHub
parent 77252e316c
commit 636ba9b846
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 34 additions and 19 deletions

View File

@ -74,11 +74,13 @@ postgresql
.alert.alert-info(if='{ !progress.requestStatus }') PostgreSQL cluster requested
.alert.alert-danger(if='{ progress.requestStatus !== "OK" }') Create request failed
.alert.alert-success(if='{ progress.requestStatus === "OK" }') Create request successful ({ new Date(progress.createdTimestamp).toLocaleString() })
.alert.alert-success(if='{ progress.requestStatus === "OK" }') Manifest creation successful ({ new Date(progress.createdTimestamp).toLocaleString() })
.alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending
.alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created
.alert.alert-danger(if='{progress.status && progress.status.PostgresClusterStatus == "CreateFailed"}') Cluster creation failed: Check events and cluster name!
.alert.alert-info(if='{ !progress.statefulSet }') StatefulSet pending
.alert.alert-success(if='{ progress.statefulSet }') StatefulSet created
@ -127,6 +129,8 @@ postgresql
this.progress.pooler = false
this.progress.postgresql = true
this.progress.postgresqlManifest = data
// copy status as we delete later for edit
this.progress.status = data.status
this.progress.createdTimestamp = data.metadata.creationTimestamp
this.progress.poolerEnabled = data.spec.enableConnectionPooler
this.uid = this.progress.postgresqlManifest.metadata.uid
@ -203,6 +207,7 @@ postgresql
delete manifest.metadata.annotations[last_applied]
}
delete manifest.metadata.managedFields
delete manifest.metadata.creationTimestamp
delete manifest.metadata.deletionGracePeriodSeconds
delete manifest.metadata.deletionTimestamp

View File

@ -63,10 +63,8 @@ postgresqls
td(style='white-space: pre')
| { namespace }
td
a(
href='/#/status/{ cluster_path(this) }'
)
| { name }
a(href='/#/status/{ cluster_path(this) }') { name }
btn.btn-danger(if='{status.PostgresClusterStatus == "CreateFailed"}') Create Failed
td { nodes }
td { cpu } / { cpu_limit }
td { memory } / { memory_limit }
@ -230,7 +228,7 @@ postgresqls
)
const calcCosts = this.calcCosts = (nodes, cpu, memory, disk) => {
costs = nodes * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs)
costs = Math.max(nodes, opts.config.min_pods) * (toCores(cpu) * opts.config.cost_core + toMemory(memory) * opts.config.cost_memory + toDisk(disk) * opts.config.cost_ebs)
return costs.toFixed(2)
}

View File

@ -87,6 +87,7 @@ SPILO_S3_BACKUP_PREFIX = getenv('SPILO_S3_BACKUP_PREFIX', 'spilo/')
SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid')
TARGET_NAMESPACE = getenv('TARGET_NAMESPACE')
GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False)
MIN_PODS= getenv('MIN_PODS', 2)
# storage pricing, i.e. https://aws.amazon.com/ebs/pricing/
COST_EBS = float(getenv('COST_EBS', 0.119)) # GB per month
@ -308,7 +309,8 @@ DEFAULT_UI_CONFIG = {
'static_network_whitelist': {},
'cost_ebs': COST_EBS,
'cost_core': COST_CORE,
'cost_memory': COST_MEMORY
'cost_memory': COST_MEMORY,
'min_pods': MIN_PODS
}
@ -320,6 +322,7 @@ def get_config():
config['resources_visible'] = RESOURCES_VISIBLE
config['superuser_team'] = SUPERUSER_TEAM
config['target_namespace'] = TARGET_NAMESPACE
config['min_pods'] = MIN_PODS
config['namespaces'] = (
[TARGET_NAMESPACE]
@ -493,6 +496,7 @@ def get_postgresqls():
'uid': uid,
'namespaced_name': namespace + '/' + name,
'full_name': namespace + '/' + name + ('/' + uid if uid else ''),
'status': status,
}
for cluster in these(
read_postgresqls(
@ -506,6 +510,7 @@ def get_postgresqls():
'items',
)
for spec in [cluster.get('spec', {}) if cluster.get('spec', {}) is not None else {"error": "Invalid spec in manifest"}]
for status in [cluster.get('status', {})]
for metadata in [cluster['metadata']]
for namespace in [metadata['namespace']]
for name in [metadata['name']]

View File

@ -302,6 +302,7 @@ def read_versions(
if uid == 'wal' or defaulting(lambda: UUID(uid))
]
BACKUP_VERSION_PREFIXES = ['','9.5/', '9.6/', '10/','11/', '12/', '13/']
def read_basebackups(
pg_cluster,
@ -314,18 +315,24 @@ def read_basebackups(
):
environ['WALE_S3_ENDPOINT'] = s3_endpoint
suffix = '' if uid == 'base' else '/' + uid
return [
{
key: value
for key, value in basebackup.__dict__.items()
if isinstance(value, str) or isinstance(value, int)
}
for basebackup in Attrs.call(
f=configure_backup_cxt,
aws_instance_profile=use_aws_instance_profile,
s3_prefix=f's3://{bucket}/{prefix}{pg_cluster}{suffix}/wal/',
)._backup_list(detail=True)._backup_list(prefix=f"{prefix}{pg_cluster}{suffix}/wal/")
]
backups = []
for vp in BACKUP_VERSION_PREFIXES:
backups = backups + [
{
key: value
for key, value in basebackup.__dict__.items()
if isinstance(value, str) or isinstance(value, int)
}
for basebackup in Attrs.call(
f=configure_backup_cxt,
aws_instance_profile=use_aws_instance_profile,
s3_prefix=f's3://{bucket}/{prefix}{pg_cluster}{suffix}/wal/{vp}',
)._backup_list(detail=True)
]
return backups
def parse_time(s: str):