diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-configmap.yaml similarity index 80% rename from charts/postgres-operator/values-crd.yaml rename to charts/postgres-operator/values-configmap.yaml index c6f11e493..ae9e06fe9 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-configmap.yaml @@ -13,35 +13,34 @@ image: podAnnotations: {} podLabels: {} -configTarget: "OperatorConfigurationCRD" +configTarget: "ConfigMap" -# general top-level configuration parameters +# general configuration parameters configGeneral: # choose if deployment creates/updates CRDs with OpenAPIV3Validation - enable_crd_validation: true + enable_crd_validation: "true" # start any new database pod without limitations on shm memory - enable_shm_volume: true + enable_shm_volume: "true" # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Spilo docker image docker_image: registry.opensource.zalan.do/acid/spilo-11:1.6-p1 # max number of instances in Postgres cluster. -1 = no limit - min_instances: -1 + min_instances: "-1" # min number of instances in Postgres cluster. -1 = no limit - max_instances: -1 + max_instances: "-1" # period between consecutive repair requests repair_period: 5m # period between consecutive sync requests resync_period: 30m # can prevent certain cases of memory overcommitment - # set_memory_request_to_limit: false + # set_memory_request_to_limit: "false" # map of sidecar names to docker images - # sidecar_docker_images - # example: "exampleimage:exampletag" + # sidecar_docker_images: "" # number of routines the operator spawns to process requests concurrently - workers: 4 + workers: "4" # parameters describing Postgres users configUsers: @@ -54,33 +53,27 @@ configKubernetes: # default DNS domain of K8s cluster where operator is running cluster_domain: cluster.local # additional labels assigned to the cluster objects - cluster_labels: - application: spilo + cluster_labels: application:spilo # label assigned to Kubernetes objects created by the operator - cluster_name_label: cluster-name - # additional annotations to add to every database pod - # custom_pod_annotations: - # keya: valuea - # keyb: valueb + cluster_name_label: version + # annotations attached to each database pod + # custom_pod_annotations: keya:valuea,keyb:valueb # toggles pod anti affinity on the Postgres pods - enable_pod_antiaffinity: false + enable_pod_antiaffinity: "false" # toggles PDB to set to MinAvailabe 0 or 1 - enable_pod_disruption_budget: true + enable_pod_disruption_budget: "true" # name of the secret containing infrastructure roles names and passwords # infrastructure_roles_secret_name: postgresql-infrastructure-roles # list of labels that can be inherited from the cluster manifest - # inherited_labels: - # - application - # - environment + # inherited_labels: application,environment # timeout for successful migration of master pods from unschedulable node # master_pod_move_timeout: 20m # set of labels that a running and active node should possess to be considered ready - # node_readiness_label: - # status: ready + # node_readiness_label: "" # name of the secret containing the OAuth2 token to pass to the teams API # oauth_token_secret_name: postgresql-operator @@ -99,12 +92,12 @@ configKubernetes: # Postgres pods are terminated forcefully after this timeout pod_terminate_grace_period: 5m # template for database user secrets generated by the operator - secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" + secret_name_template: '{username}.{cluster}.credentials' # group ID with write-access to volumes (required to run Spilo as non-root process) - # spilo_fsgroup: 103 + # spilo_fsgroup: "103" # whether the Spilo container should run in privileged mode - spilo_privileged: false + spilo_privileged: "false" # operator watches for postgres objects in the given namespace watched_namespace: "*" # listen to all namespaces @@ -139,34 +132,32 @@ configLoadBalancer: # DNS zone for cluster DNS name when load balancer is configured for cluster db_hosted_zone: db.example.com # annotations to apply to service when load balancing is enabled - # custom_service_annotations: - # keyx: valuez - # keya: valuea + # custom_service_annotations: "keyx:valuez,keya:valuea" # toggles service type load balancer pointing to the master pod of the cluster - enable_master_load_balancer: false + enable_master_load_balancer: "false" # toggles service type load balancer pointing to the replica pod of the cluster - enable_replica_load_balancer: false + enable_replica_load_balancer: "false" # defines the DNS name string template for the master load balancer cluster - master_dns_name_format: "{cluster}.{team}.{hostedzone}" + master_dns_name_format: '{cluster}.{team}.{hostedzone}' # defines the DNS name string template for the replica load balancer cluster - replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" + replica_dns_name_format: '{cluster}-repl.{team}.{hostedzone}' # options to aid debugging of the operator itself configDebug: # toggles verbose debug logs from the operator - debug_logging: true + debug_logging: "true" # toggles operator functionality that require access to the postgres database - enable_database_access: true + enable_database_access: "true" # parameters affecting logging and REST API listener configLoggingRestApi: # REST API listener listens to this port - api_port: 8080 + api_port: "8080" # number of entries in the cluster history ring buffer - cluster_history_entries: 1000 + cluster_history_entries: "1000" # number of lines in the ring buffer used to store cluster logs - ring_log_lines: 100 + ring_log_lines: "100" # configure interaction with non-Kubernetes objects from AWS or GCP configAwsOrGcp: @@ -208,49 +199,34 @@ configLogicalBackup: # automate creation of human users with teams API service configTeamsApi: # team_admin_role will have the rights to grant roles coming from PG manifests - # enable_admin_role_for_users: true + # enable_admin_role_for_users: "true" # toggle to grant superuser to team members created from the Teams API - enable_team_superuser: false + # enable_team_superuser: "false" + # toggles usage of the Teams API by the operator - enable_teams_api: false + enable_teams_api: "false" # should contain a URL to use for authentication (username and token) - # pam_configuration: "" + # pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees # operator will add all team member roles to this group and add a pg_hba line - pam_role_name: zalandos + # pam_role_name: zalandos + # List of teams which members need the superuser role in each Postgres cluster - # postgres_superuser_teams: - # - postgres_superusers + # postgres_superuser_teams: "postgres_superusers" # List of roles that cannot be overwritten by an application, team or infrastructure role - protected_role_names: - - admin + # protected_role_names: "admin" + # role name to grant to team members created from the Teams API - team_admin_role: admin + # team_admin_role: "admin" + # postgres config parameters to apply to each team member role - team_api_role_configuration: - log_statement: all + # team_api_role_configuration: "log_statement:all" + # URL of the Teams API service # teams_api_url: http://fake-teams-api.default.svc.cluster.local -# Scalyr is a log management tool that Zalando uses as a sidecar -configScalyr: - # API key for the Scalyr sidecar - # scalyr_api_key: "" - - # Docker image for the Scalyr sidecar - # scalyr_image: "" - - # CPU limit value for the Scalyr sidecar - scalyr_cpu_limit: "1" - # CPU rquest value for the Scalyr sidecar - scalyr_cpu_request: 100m - # Memory limit value for the Scalyr sidecar - scalyr_memory_limit: 1Gi - # Memory request value for the Scalyr sidecar - scalyr_memory_request: 50Mi - rbac: # Specifies whether RBAC resources should be created create: true diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index ae9e06fe9..c6f11e493 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -13,34 +13,35 @@ image: podAnnotations: {} podLabels: {} -configTarget: "ConfigMap" +configTarget: "OperatorConfigurationCRD" -# general configuration parameters +# general top-level configuration parameters configGeneral: # choose if deployment creates/updates CRDs with OpenAPIV3Validation - enable_crd_validation: "true" + enable_crd_validation: true # start any new database pod without limitations on shm memory - enable_shm_volume: "true" + enable_shm_volume: true # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Spilo docker image docker_image: registry.opensource.zalan.do/acid/spilo-11:1.6-p1 # max number of instances in Postgres cluster. -1 = no limit - min_instances: "-1" + min_instances: -1 # min number of instances in Postgres cluster. -1 = no limit - max_instances: "-1" + max_instances: -1 # period between consecutive repair requests repair_period: 5m # period between consecutive sync requests resync_period: 30m # can prevent certain cases of memory overcommitment - # set_memory_request_to_limit: "false" + # set_memory_request_to_limit: false # map of sidecar names to docker images - # sidecar_docker_images: "" + # sidecar_docker_images + # example: "exampleimage:exampletag" # number of routines the operator spawns to process requests concurrently - workers: "4" + workers: 4 # parameters describing Postgres users configUsers: @@ -53,27 +54,33 @@ configKubernetes: # default DNS domain of K8s cluster where operator is running cluster_domain: cluster.local # additional labels assigned to the cluster objects - cluster_labels: application:spilo + cluster_labels: + application: spilo # label assigned to Kubernetes objects created by the operator - cluster_name_label: version - # annotations attached to each database pod - # custom_pod_annotations: keya:valuea,keyb:valueb + cluster_name_label: cluster-name + # additional annotations to add to every database pod + # custom_pod_annotations: + # keya: valuea + # keyb: valueb # toggles pod anti affinity on the Postgres pods - enable_pod_antiaffinity: "false" + enable_pod_antiaffinity: false # toggles PDB to set to MinAvailabe 0 or 1 - enable_pod_disruption_budget: "true" + enable_pod_disruption_budget: true # name of the secret containing infrastructure roles names and passwords # infrastructure_roles_secret_name: postgresql-infrastructure-roles # list of labels that can be inherited from the cluster manifest - # inherited_labels: application,environment + # inherited_labels: + # - application + # - environment # timeout for successful migration of master pods from unschedulable node # master_pod_move_timeout: 20m # set of labels that a running and active node should possess to be considered ready - # node_readiness_label: "" + # node_readiness_label: + # status: ready # name of the secret containing the OAuth2 token to pass to the teams API # oauth_token_secret_name: postgresql-operator @@ -92,12 +99,12 @@ configKubernetes: # Postgres pods are terminated forcefully after this timeout pod_terminate_grace_period: 5m # template for database user secrets generated by the operator - secret_name_template: '{username}.{cluster}.credentials' + secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" # group ID with write-access to volumes (required to run Spilo as non-root process) - # spilo_fsgroup: "103" + # spilo_fsgroup: 103 # whether the Spilo container should run in privileged mode - spilo_privileged: "false" + spilo_privileged: false # operator watches for postgres objects in the given namespace watched_namespace: "*" # listen to all namespaces @@ -132,32 +139,34 @@ configLoadBalancer: # DNS zone for cluster DNS name when load balancer is configured for cluster db_hosted_zone: db.example.com # annotations to apply to service when load balancing is enabled - # custom_service_annotations: "keyx:valuez,keya:valuea" + # custom_service_annotations: + # keyx: valuez + # keya: valuea # toggles service type load balancer pointing to the master pod of the cluster - enable_master_load_balancer: "false" + enable_master_load_balancer: false # toggles service type load balancer pointing to the replica pod of the cluster - enable_replica_load_balancer: "false" + enable_replica_load_balancer: false # defines the DNS name string template for the master load balancer cluster - master_dns_name_format: '{cluster}.{team}.{hostedzone}' + master_dns_name_format: "{cluster}.{team}.{hostedzone}" # defines the DNS name string template for the replica load balancer cluster - replica_dns_name_format: '{cluster}-repl.{team}.{hostedzone}' + replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" # options to aid debugging of the operator itself configDebug: # toggles verbose debug logs from the operator - debug_logging: "true" + debug_logging: true # toggles operator functionality that require access to the postgres database - enable_database_access: "true" + enable_database_access: true # parameters affecting logging and REST API listener configLoggingRestApi: # REST API listener listens to this port - api_port: "8080" + api_port: 8080 # number of entries in the cluster history ring buffer - cluster_history_entries: "1000" + cluster_history_entries: 1000 # number of lines in the ring buffer used to store cluster logs - ring_log_lines: "100" + ring_log_lines: 100 # configure interaction with non-Kubernetes objects from AWS or GCP configAwsOrGcp: @@ -199,34 +208,49 @@ configLogicalBackup: # automate creation of human users with teams API service configTeamsApi: # team_admin_role will have the rights to grant roles coming from PG manifests - # enable_admin_role_for_users: "true" + # enable_admin_role_for_users: true # toggle to grant superuser to team members created from the Teams API - # enable_team_superuser: "false" - + enable_team_superuser: false # toggles usage of the Teams API by the operator - enable_teams_api: "false" + enable_teams_api: false # should contain a URL to use for authentication (username and token) - # pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees + # pam_configuration: "" # operator will add all team member roles to this group and add a pg_hba line - # pam_role_name: zalandos - + pam_role_name: zalandos # List of teams which members need the superuser role in each Postgres cluster - # postgres_superuser_teams: "postgres_superusers" + # postgres_superuser_teams: + # - postgres_superusers # List of roles that cannot be overwritten by an application, team or infrastructure role - # protected_role_names: "admin" - + protected_role_names: + - admin # role name to grant to team members created from the Teams API - # team_admin_role: "admin" - + team_admin_role: admin # postgres config parameters to apply to each team member role - # team_api_role_configuration: "log_statement:all" - + team_api_role_configuration: + log_statement: all # URL of the Teams API service # teams_api_url: http://fake-teams-api.default.svc.cluster.local +# Scalyr is a log management tool that Zalando uses as a sidecar +configScalyr: + # API key for the Scalyr sidecar + # scalyr_api_key: "" + + # Docker image for the Scalyr sidecar + # scalyr_image: "" + + # CPU limit value for the Scalyr sidecar + scalyr_cpu_limit: "1" + # CPU rquest value for the Scalyr sidecar + scalyr_cpu_request: 100m + # Memory limit value for the Scalyr sidecar + scalyr_memory_limit: 1Gi + # Memory request value for the Scalyr sidecar + scalyr_memory_request: 50Mi + rbac: # Specifies whether RBAC resources should be created create: true diff --git a/docs/developer.md b/docs/developer.md index 4d0ea2e12..10421e5c2 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -295,13 +295,13 @@ Please, reflect your changes in tests, for example in: For the CRD-based configuration, please update the following files: * the default [OperatorConfiguration](../manifests/postgresql-operator-default-configuration.yaml) -* the Helm chart's [values-crd file](../charts/postgres-operator/values.yaml) +* the Helm chart's [values file](../charts/postgres-operator/values.yaml) * the CRD's [validation](../manifests/operatorconfiguration.crd.yaml) Reflect the changes in the ConfigMap configuration as well (note that numeric and boolean parameters have to use double quotes here): * [ConfigMap](../manifests/configmap.yaml) manifest -* the Helm chart's default [values file](../charts/postgres-operator/values.yaml) +* the Helm chart's default [values-configmap file](../charts/postgres-operator/values-configmap.yaml) ### Updating documentation diff --git a/docs/quickstart.md b/docs/quickstart.md index c1291633a..84f9017a9 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -81,10 +81,11 @@ the repo root. With Helm v3 installed you should be able to run: helm install postgres-operator ./charts/postgres-operator ``` -To use CRD-based configuration you need to specify the [values-crd yaml file](../charts/postgres-operator/values-crd.yaml). +To use ConfigMap-based configuration you need to specify the +[values-configmap yaml file](../charts/postgres-operator/values-configmap.yaml). ```bash -helm install postgres-operator ./charts/postgres-operator -f ./charts/postgres-operator/values-crd.yaml +helm install postgres-operator ./charts/postgres-operator -f ./charts/postgres-operator/values-configmap.yaml ``` The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index eeb3a3cd5..5b1a15b1d 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -42,7 +42,7 @@ configuration: pod_role_label: spilo-role pod_service_account_name: zalando-postgres-operator pod_terminate_grace_period: 5m - secret_name_template: "{username}.{cluster}.credentials" + secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" # spilo_fsgroup: 103 spilo_privileged: false # toleration: @@ -61,14 +61,14 @@ configuration: resource_check_interval: 3s resource_check_timeout: 10m load_balancer: - db_hosted_zone: db.example.com - enable_master_load_balancer: true + # db_hosted_zone: db.example.com + enable_master_load_balancer: false enable_replica_load_balancer: false # custom_service_annotations: # keyx: valuex # keyy: valuey - master_dns_name_format: "{cluster}.{team}.staging.{hostedzone}" - replica_dns_name_format: "{cluster}-repl.{team}.staging.{hostedzone}" + master_dns_name_format: "{cluster}.{team}.{hostedzone}" + replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" aws_or_gcp: # additional_secret_mount: "some-secret-name" # additional_secret_mount_path: "/some/dir"