configurable container capabilities (#1336)
* configurable container capabilities * revert change on TestTLS * fix e2e test * minor fix
This commit is contained in:
parent
d488ae10a0
commit
12ad8c91fa
|
|
@ -130,6 +130,10 @@ spec:
|
||||||
kubernetes:
|
kubernetes:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
additional_pod_capabilities:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
cluster_domain:
|
cluster_domain:
|
||||||
type: string
|
type: string
|
||||||
default: "cluster.local"
|
default: "cluster.local"
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,10 @@ configUsers:
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
|
|
||||||
configKubernetes:
|
configKubernetes:
|
||||||
|
# list of additional capabilities for postgres container
|
||||||
|
# additional_pod_capabilities:
|
||||||
|
# - "SYS_NICE"
|
||||||
|
|
||||||
# default DNS domain of K8s cluster where operator is running
|
# default DNS domain of K8s cluster where operator is running
|
||||||
cluster_domain: cluster.local
|
cluster_domain: cluster.local
|
||||||
# additional labels assigned to the cluster objects
|
# additional labels assigned to the cluster objects
|
||||||
|
|
|
||||||
|
|
@ -61,6 +61,9 @@ configUsers:
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
|
|
||||||
configKubernetes:
|
configKubernetes:
|
||||||
|
# list of additional capabilities for postgres container
|
||||||
|
# additional_pod_capabilities: "SYS_NICE"
|
||||||
|
|
||||||
# default DNS domain of K8s cluster where operator is running
|
# default DNS domain of K8s cluster where operator is running
|
||||||
cluster_domain: cluster.local
|
cluster_domain: cluster.local
|
||||||
# additional labels assigned to the cluster objects
|
# additional labels assigned to the cluster objects
|
||||||
|
|
|
||||||
|
|
@ -351,6 +351,12 @@ configuration they are grouped under the `kubernetes` key.
|
||||||
used for AWS volume resizing and not required if you don't need that
|
used for AWS volume resizing and not required if you don't need that
|
||||||
capability. The default is `false`.
|
capability. The default is `false`.
|
||||||
|
|
||||||
|
* **additional_pod_capabilities**
|
||||||
|
list of additional capabilities to be added to the postgres container's
|
||||||
|
SecurityContext (e.g. SYS_NICE etc.). Please, make sure first that the
|
||||||
|
PodSecruityPolicy allows the capabilities listed here. Otherwise, the
|
||||||
|
container will not start. The default is empty.
|
||||||
|
|
||||||
* **master_pod_move_timeout**
|
* **master_pod_move_timeout**
|
||||||
The period of time to wait for the success of migration of master pods from
|
The period of time to wait for the success of migration of master pods from
|
||||||
an unschedulable node. The migration includes Patroni switchovers to
|
an unschedulable node. The migration includes Patroni switchovers to
|
||||||
|
|
|
||||||
|
|
@ -182,6 +182,10 @@ class K8s:
|
||||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||||
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||||
|
|
||||||
|
def count_pods_with_container_capabilities(self, capabilities, labels, namespace='default'):
|
||||||
|
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||||
|
return len(list(filter(lambda x: x.spec.containers[0].security_context.capabilities.add == capabilities, pods)))
|
||||||
|
|
||||||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||||
pod_phase = 'Failing over'
|
pod_phase = 'Failing over'
|
||||||
new_pod_node = ''
|
new_pod_node = ''
|
||||||
|
|
@ -433,6 +437,10 @@ class K8sBase:
|
||||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||||
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||||
|
|
||||||
|
def count_pods_with_container_capabilities(self, capabilities, labels, namespace='default'):
|
||||||
|
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||||
|
return len(list(filter(lambda x: x.spec.containers[0].security_context.capabilities.add == capabilities, pods)))
|
||||||
|
|
||||||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||||
pod_phase = 'Failing over'
|
pod_phase = 'Failing over'
|
||||||
new_pod_node = ''
|
new_pod_node = ''
|
||||||
|
|
|
||||||
|
|
@ -155,6 +155,25 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
|
def test_additional_pod_capabilities(self):
|
||||||
|
'''
|
||||||
|
Extend postgres container capabilities
|
||||||
|
'''
|
||||||
|
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||||
|
capabilities = ["SYS_NICE","CHOWN"]
|
||||||
|
patch_capabilities = {
|
||||||
|
"data": {
|
||||||
|
"additional_pod_capabilities": ','.join(capabilities),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
self.k8s.update_config(patch_capabilities)
|
||||||
|
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"},
|
||||||
|
"Operator does not get in sync")
|
||||||
|
|
||||||
|
self.eventuallyEqual(lambda: self.k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
|
||||||
|
2, "Container capabilities not updated")
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_overwrite_pooler_deployment(self):
|
def test_overwrite_pooler_deployment(self):
|
||||||
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")
|
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: postgres-operator
|
name: postgres-operator
|
||||||
data:
|
data:
|
||||||
|
# additional_pod_capabilities: "SYS_NICE"
|
||||||
# additional_secret_mount: "some-secret-name"
|
# additional_secret_mount: "some-secret-name"
|
||||||
# additional_secret_mount_path: "/some/dir"
|
# additional_secret_mount_path: "/some/dir"
|
||||||
api_port: "8080"
|
api_port: "8080"
|
||||||
|
|
|
||||||
|
|
@ -126,6 +126,10 @@ spec:
|
||||||
kubernetes:
|
kubernetes:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
additional_pod_capabilities:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
cluster_domain:
|
cluster_domain:
|
||||||
type: string
|
type: string
|
||||||
default: "cluster.local"
|
default: "cluster.local"
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,8 @@ configuration:
|
||||||
replication_username: standby
|
replication_username: standby
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
kubernetes:
|
kubernetes:
|
||||||
|
# additional_pod_capabilities:
|
||||||
|
# - "SYS_NICE"
|
||||||
cluster_domain: cluster.local
|
cluster_domain: cluster.local
|
||||||
cluster_labels:
|
cluster_labels:
|
||||||
application: spilo
|
application: spilo
|
||||||
|
|
|
||||||
|
|
@ -968,6 +968,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
||||||
"kubernetes": {
|
"kubernetes": {
|
||||||
Type: "object",
|
Type: "object",
|
||||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||||
|
"additional_pod_capabilities": {
|
||||||
|
Type: "array",
|
||||||
|
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||||
|
Schema: &apiextv1.JSONSchemaProps{
|
||||||
|
Type: "string",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"cluster_domain": {
|
"cluster_domain": {
|
||||||
Type: "string",
|
Type: "string",
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,7 @@ type KubernetesMetaConfiguration struct {
|
||||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||||
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
||||||
|
AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"`
|
||||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||||
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"`
|
||||||
|
|
|
||||||
|
|
@ -162,6 +162,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
||||||
*out = new(int64)
|
*out = new(int64)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.AdditionalPodCapabilities != nil {
|
||||||
|
in, out := &in.AdditionalPodCapabilities, &out.AdditionalPodCapabilities
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.EnablePodDisruptionBudget != nil {
|
if in.EnablePodDisruptionBudget != nil {
|
||||||
in, out := &in.EnablePodDisruptionBudget, &out.EnablePodDisruptionBudget
|
in, out := &in.EnablePodDisruptionBudget, &out.EnablePodDisruptionBudget
|
||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
|
|
|
||||||
|
|
@ -320,6 +320,19 @@ func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (loca
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func generateCapabilities(capabilities []string) v1.Capabilities {
|
||||||
|
if len(capabilities) > 1 {
|
||||||
|
additionalCapabilities := []v1.Capability{}
|
||||||
|
for _, capability := range capabilities {
|
||||||
|
additionalCapabilities = append(additionalCapabilities, v1.Capability(strings.ToUpper(capability)))
|
||||||
|
}
|
||||||
|
return v1.Capabilities{
|
||||||
|
Add: additionalCapabilities,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v1.Capabilities{}
|
||||||
|
}
|
||||||
|
|
||||||
func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity {
|
func nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity {
|
||||||
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
|
if len(nodeReadinessLabel) == 0 && nodeAffinity == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -430,6 +443,7 @@ func generateContainer(
|
||||||
envVars []v1.EnvVar,
|
envVars []v1.EnvVar,
|
||||||
volumeMounts []v1.VolumeMount,
|
volumeMounts []v1.VolumeMount,
|
||||||
privilegedMode bool,
|
privilegedMode bool,
|
||||||
|
additionalPodCapabilities v1.Capabilities,
|
||||||
) *v1.Container {
|
) *v1.Container {
|
||||||
return &v1.Container{
|
return &v1.Container{
|
||||||
Name: name,
|
Name: name,
|
||||||
|
|
@ -456,6 +470,7 @@ func generateContainer(
|
||||||
AllowPrivilegeEscalation: &privilegedMode,
|
AllowPrivilegeEscalation: &privilegedMode,
|
||||||
Privileged: &privilegedMode,
|
Privileged: &privilegedMode,
|
||||||
ReadOnlyRootFilesystem: util.False(),
|
ReadOnlyRootFilesystem: util.False(),
|
||||||
|
Capabilities: &additionalPodCapabilities,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1148,6 +1163,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
||||||
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
|
deduplicateEnvVars(spiloEnvVars, c.containerName(), c.logger),
|
||||||
volumeMounts,
|
volumeMounts,
|
||||||
c.OpConfig.Resources.SpiloPrivileged,
|
c.OpConfig.Resources.SpiloPrivileged,
|
||||||
|
generateCapabilities(c.OpConfig.AdditionalPodCapabilities),
|
||||||
)
|
)
|
||||||
|
|
||||||
// generate container specs for sidecars specified in the cluster manifest
|
// generate container specs for sidecars specified in the cluster manifest
|
||||||
|
|
@ -1901,6 +1917,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
||||||
envVars,
|
envVars,
|
||||||
[]v1.VolumeMount{},
|
[]v1.VolumeMount{},
|
||||||
c.OpConfig.SpiloPrivileged, // use same value as for normal DB pods
|
c.OpConfig.SpiloPrivileged, // use same value as for normal DB pods
|
||||||
|
v1.Capabilities{},
|
||||||
)
|
)
|
||||||
|
|
||||||
labels := map[string]string{
|
labels := map[string]string{
|
||||||
|
|
|
||||||
|
|
@ -1489,3 +1489,42 @@ func TestGenerateService(t *testing.T) {
|
||||||
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy)
|
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGenerateCapabilities(t *testing.T) {
|
||||||
|
|
||||||
|
testName := "TestGenerateCapabilities"
|
||||||
|
tests := []struct {
|
||||||
|
subTest string
|
||||||
|
configured []string
|
||||||
|
capabilities v1.Capabilities
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
subTest: "no capabilities",
|
||||||
|
configured: nil,
|
||||||
|
capabilities: v1.Capabilities{},
|
||||||
|
err: fmt.Errorf("could not parse capabilities configuration of nil"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "empty capabilities",
|
||||||
|
configured: []string{},
|
||||||
|
capabilities: v1.Capabilities{},
|
||||||
|
err: fmt.Errorf("could not parse empty capabilities configuration"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
subTest: "configured capabilities",
|
||||||
|
configured: []string{"SYS_NICE", "CHOWN"},
|
||||||
|
capabilities: v1.Capabilities{
|
||||||
|
Add: []v1.Capability{"SYS_NICE", "CHOWN"},
|
||||||
|
},
|
||||||
|
err: fmt.Errorf("could not parse empty capabilities configuration"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
caps := generateCapabilities(tt.configured)
|
||||||
|
if !reflect.DeepEqual(caps, tt.capabilities) {
|
||||||
|
t.Errorf("%s %s: expected `%v` but got `%v`",
|
||||||
|
testName, tt.subTest, tt.capabilities, caps)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -66,6 +66,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
||||||
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
||||||
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
||||||
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
||||||
|
result.AdditionalPodCapabilities = fromCRD.Kubernetes.AdditionalPodCapabilities
|
||||||
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
||||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||||
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat
|
||||||
|
|
|
||||||
|
|
@ -23,38 +23,39 @@ type CRD struct {
|
||||||
|
|
||||||
// Resources describes kubernetes resource specific configuration parameters
|
// Resources describes kubernetes resource specific configuration parameters
|
||||||
type Resources struct {
|
type Resources struct {
|
||||||
ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"`
|
ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"`
|
||||||
ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"`
|
ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"`
|
||||||
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
||||||
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
||||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||||
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
||||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||||
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
SpiloPrivileged bool `name:"spilo_privileged" default:"false"`
|
||||||
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
|
AdditionalPodCapabilities []string `name:"additional_pod_capabilities" default:""`
|
||||||
InheritedLabels []string `name:"inherited_labels" default:""`
|
ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"`
|
||||||
InheritedAnnotations []string `name:"inherited_annotations" default:""`
|
InheritedLabels []string `name:"inherited_labels" default:""`
|
||||||
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
InheritedAnnotations []string `name:"inherited_annotations" default:""`
|
||||||
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
DownscalerAnnotations []string `name:"downscaler_annotations"`
|
||||||
DeleteAnnotationDateKey string `name:"delete_annotation_date_key"`
|
ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"`
|
||||||
DeleteAnnotationNameKey string `name:"delete_annotation_name_key"`
|
DeleteAnnotationDateKey string `name:"delete_annotation_date_key"`
|
||||||
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
|
DeleteAnnotationNameKey string `name:"delete_annotation_name_key"`
|
||||||
PodToleration map[string]string `name:"toleration" default:""`
|
PodRoleLabel string `name:"pod_role_label" default:"spilo-role"`
|
||||||
DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
|
PodToleration map[string]string `name:"toleration" default:""`
|
||||||
DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"`
|
DefaultCPURequest string `name:"default_cpu_request" default:"100m"`
|
||||||
DefaultCPULimit string `name:"default_cpu_limit" default:"1"`
|
DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"`
|
||||||
DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"`
|
DefaultCPULimit string `name:"default_cpu_limit" default:"1"`
|
||||||
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"`
|
||||||
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
MinCPULimit string `name:"min_cpu_limit" default:"250m"`
|
||||||
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"`
|
||||||
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"`
|
||||||
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
PodEnvironmentSecret string `name:"pod_environment_secret"`
|
||||||
MaxInstances int32 `name:"max_instances" default:"-1"`
|
NodeReadinessLabel map[string]string `name:"node_readiness_label" default:""`
|
||||||
MinInstances int32 `name:"min_instances" default:"-1"`
|
MaxInstances int32 `name:"max_instances" default:"-1"`
|
||||||
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
MinInstances int32 `name:"min_instances" default:"-1"`
|
||||||
|
ShmVolume *bool `name:"enable_shm_volume" default:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type InfrastructureRole struct {
|
type InfrastructureRole struct {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue