build multi-arch pooler image (#3077)

* build multi-arch pooler image
* add pooler build step in delivery.yaml and bump pooler version
* pull from docker hub not zalando registry
* add pooler step to ghcr workflow
* pass infra roles to auth file via pooler entrypoint
* introduce extra pooler secret for mounting auth_file
* use pbgouncer as image name and push to ghcr on next merge
* build with latest pgbouncer
* integrate new image in e2e process and update pooler image default
* update pooler build dependencies
* build pooler image for e2e test
* more Makefile and e2e run script tweaking

---------

Co-authored-by: Ida Novindasari <idanovinda@gmail.com>
This commit is contained in:
Felix Kunde 2026-04-28 13:34:36 +02:00 committed by GitHub
parent 97f4de7cc0
commit e1713705f4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 363 additions and 40 deletions

View File

@ -34,6 +34,12 @@ jobs:
OPERATOR_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${GITHUB_REF/refs\/tags\//}"
echo "OPERATOR_IMAGE=$OPERATOR_IMAGE" >> $GITHUB_OUTPUT
- name: Define pooler image name
id: image_pooler
run: |
POOLER_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}/pgbouncer:${GITHUB_REF/refs\/tags\//}"
echo "POOLER_IMAGE=$POOLER_IMAGE" >> $GITHUB_OUTPUT
- name: Define UI image name
id: image_ui
run: |
@ -69,6 +75,15 @@ jobs:
tags: "${{ steps.image.outputs.OPERATOR_IMAGE }}"
platforms: linux/amd64,linux/arm64
- name: Build and push multiarch pooler image to ghcr
uses: docker/build-push-action@v3
with:
context: pooler
push: true
build-args: BASE_IMAGE=alpine:3.22
tags: "${{ steps.image_pooler.outputs.POOLER_IMAGE }}"
platforms: linux/amd64,linux/arm64
- name: Build and push multiarch ui image to ghcr
uses: docker/build-push-action@v3
with:

View File

@ -1,4 +1,4 @@
.PHONY: clean local test linux macos mocks docker push e2e
.PHONY: clean local test linux macos mocks docker pooler push e2e
BINARY ?= postgres-operator
BUILD_FLAGS ?= -v
@ -49,6 +49,7 @@ endif
PATH := $(GOPATH)/bin:$(PATH)
SHELL := env PATH="$(PATH)" $(SHELL)
IMAGE_TAG := $(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)
POOLER_TAG := $(IMAGE)/pgbouncer:$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)
default: local
@ -95,6 +96,9 @@ docker: $(GENERATED_CRDS) ${DOCKERDIR}/${DOCKERFILE}
echo "git describe $(shell git describe --tags --always --dirty)"
docker build --rm -t "$(IMAGE_TAG)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" --build-arg BASE_IMAGE="${BASE_IMAGE}" .
pooler:
cd pooler; docker build --rm -t "$(POOLER_TAG)" --build-arg VERSION="${VERSION}" --build-arg BASE_IMAGE="${BASE_IMAGE}" .
indocker-race:
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.25.3 bash -c "make linux"
@ -113,5 +117,5 @@ test: mocks $(GENERATED) $(GENERATED_CRDS)
codegen: $(GENERATED)
e2e: docker # build operator image to be tested
e2e: docker pooler # build operator and pooler images to be tested
cd e2e; make e2etest

View File

@ -672,7 +672,7 @@ spec:
default: "pooler"
connection_pooler_image:
type: string
default: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
default: "ghcr.io/zalando/postgres-operator/pgbouncer:latest"
connection_pooler_max_db_connections:
type: integer
default: 60

View File

@ -443,7 +443,7 @@ configConnectionPooler:
# db user for pooler to use
connection_pooler_user: "pooler"
# docker image
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
connection_pooler_image: "ghcr.io/zalando/postgres-operator/pgbouncer:latest"
# max db connections the pooler should hold
connection_pooler_max_db_connections: 60
# default pooling mode

View File

@ -42,6 +42,33 @@ pipeline:
-f docker/Dockerfile \
--push .
- id: build-pooler
env:
<<: *BUILD_ENV
type: script
vm_config:
type: linux
commands:
- desc: Build image
cmd: |
cd pooler
if [ -z ${CDP_SOURCE_BRANCH} ]; then
IMAGE=${MULTI_ARCH_REGISTRY}/pgbouncer
else
IMAGE=${MULTI_ARCH_REGISTRY}/pgbouncer-test
fi
docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use
docker buildx build --platform "linux/amd64,linux/arm64" \
--build-arg BASE_IMAGE="${ALPINE_BASE_IMAGE}" \
-t "${IMAGE}:${CDP_BUILD_VERSION}" \
--push .
if [ -z ${CDP_SOURCE_BRANCH} ]; then
cdp-promote-image ${IMAGE}:${CDP_BUILD_VERSION}
fi
- id: build-operator-ui
env:
<<: *BUILD_ENV

View File

@ -1075,7 +1075,7 @@ operator being able to provide some reasonable defaults.
* **connection_pooler_image**
Docker image to use for connection pooler deployment.
Default: "registry.opensource.zalan.do/acid/pgbouncer"
Default: "ghcr.io/zalando/postgres-operator/pgbouncer:latest"
* **connection_pooler_max_db_connections**
How many connections the pooler can max hold. This value is divided among the

View File

@ -3,6 +3,7 @@
export cluster_name="postgres-operator-e2e-tests"
export kubeconfig_path="/tmp/kind-config-${cluster_name}"
export operator_image="ghcr.io/zalando/postgres-operator:latest"
export pooler_image="ghcr.io/zalando/postgres-operator/pgbouncer:latest"
export e2e_test_runner_image="ghcr.io/zalando/postgres-operator-e2e-tests-runner:latest"
docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \
@ -11,4 +12,5 @@ docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \
--mount type=bind,source="$(readlink -f tests)",target=/tests \
--mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \
--mount type=bind,source="$(readlink -f scripts)",target=/scripts \
-e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}"
-e OPERATOR_IMAGE="${operator_image}" -e POOLER_IMAGE="${pooler_image}" \
"${e2e_test_runner_image}"

View File

@ -26,17 +26,33 @@ echo "Kubeconfig path: ${kubeconfig_path}"
function pull_images(){
operator_tag=$(git describe --tags --always --dirty)
image_name="ghcr.io/zalando/postgres-operator:${operator_tag}"
if [[ -z $(docker images -q "${image_name}") ]]
then
if ! docker pull "${image_name}"
then
echo "Failed to pull operator image: ${image_name}"
exit 1
components=("postgres-operator" "pooler")
image_urls=("ghcr.io/zalando/postgres-operator:${operator_tag}" "ghcr.io/zalando/postgres-operator/pgbouncer:${operator_tag}")
for i in "${!components[@]}"; do
component="${components[$i]}"
image="${image_urls[$i]}"
if [[ -z $(docker images -q "$image") ]]; then
echo "Pulling $component image: $image"
if ! docker pull "$image"; then
echo "Failed to pull $component image: $image"
exit 1
fi
else
echo "$component image already exists: $image"
fi
fi
operator_image="${image_name}"
echo "Using operator image: ${operator_image}"
# Set variables for later use
if [[ "$component" == "postgres-operator" ]]; then
operator_image="$image"
elif [[ "$component" == "pooler" ]]; then
pooler_image="$image"
fi
done
echo "Using operator image: $operator_image"
echo "Using pooler image: $pooler_image"
}
function start_kind(){
@ -55,10 +71,11 @@ function start_kind(){
kind load docker-image "${spilo_image}" --name ${cluster_name}
}
function load_operator_image() {
echo "Loading operator image"
function load_operator_images() {
echo "Loading operator images"
export KUBECONFIG="${kubeconfig_path}"
kind load docker-image "${operator_image}" --name ${cluster_name}
kind load docker-image "${pooler_image}" --name ${cluster_name}
}
function set_kind_api_server_ip(){
@ -85,7 +102,8 @@ function run_tests(){
--mount type=bind,source="$(readlink -f tests)",target=/tests \
--mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \
--mount type=bind,source="$(readlink -f scripts)",target=/scripts \
-e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}" ${E2E_TEST_CASE-} $@
-e OPERATOR_IMAGE="${operator_image}" -e POOLER_IMAGE="${pooler_image}" \
"${e2e_test_runner_image}" ${E2E_TEST_CASE-} $@
}
function cleanup(){
@ -100,7 +118,7 @@ function main(){
[[ -z ${NOCLEANUP-} ]] && trap "cleanup" QUIT TERM EXIT
pull_images
[[ ! -f ${kubeconfig_path} ]] && start_kind
load_operator_image
load_operator_images
set_kind_api_server_ip
generate_certificate

View File

@ -116,6 +116,7 @@ class EndToEndTestCase(unittest.TestCase):
configmap["data"]["workers"] = "1"
configmap["data"]["docker_image"] = SPILO_CURRENT
configmap["data"]["major_version_upgrade_mode"] = "full"
configmap["data"]["connection_pooler_image"] = os.environ['POOLER_IMAGE']
with open("manifests/configmap.yaml", 'w') as f:
yaml.dump(configmap, f, Dumper=yaml.Dumper)
@ -698,7 +699,7 @@ class EndToEndTestCase(unittest.TestCase):
self.eventuallyEqual(lambda: k8s.count_running_pods(master_pooler_label), 2, "No pooler pods found")
self.eventuallyEqual(lambda: k8s.count_running_pods(replica_pooler_label), 2, "No pooler replica pods found")
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label), 2, "No pooler service found")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label), 1, "Pooler secret not created")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label), 3, "Not all pooler secrets found")
# TLS still enabled so check existing env variables and volume mounts
self.eventuallyEqual(lambda: k8s.count_pods_with_env_variable("CONNECTION_POOLER_CLIENT_TLS_CRT", pooler_label), 4, "TLS env variable CONNECTION_POOLER_CLIENT_TLS_CRT missing in pooler pods")
@ -756,7 +757,7 @@ class EndToEndTestCase(unittest.TestCase):
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label),
1, "No pooler service found")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label),
1, "Secret not created")
2, "Not all pooler secrets created")
# Turn off only replica connection pooler
k8s.api.custom_objects_api.patch_namespaced_custom_object(
@ -784,7 +785,7 @@ class EndToEndTestCase(unittest.TestCase):
'ClusterIP',
"Expected LoadBalancer service type for master, found {}")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label),
1, "Secret not created")
2, "Not all pooler secrets created")
# scale up connection pooler deployment
k8s.api.custom_objects_api.patch_namespaced_custom_object(
@ -819,8 +820,8 @@ class EndToEndTestCase(unittest.TestCase):
0, "Pooler pods not scaled down")
self.eventuallyEqual(lambda: k8s.count_services_with_label(pooler_label),
0, "Pooler service not removed")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label('application=spilo,cluster-name=acid-minimal-cluster'),
4, "Secrets not deleted")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(pooler_label),
0, "Not all pooler secrets deleted")
# Verify that all the databases have pooler schema installed.
# Do this via psql, since otherwise we need to deal with

6
go.sum
View File

@ -71,8 +71,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.11.2 h1:x6gxUeu39V0BHZiugWe8LXZYZ+Utk7hSJGThs8sdzfs=
github.com/lib/pq v1.11.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@ -113,8 +111,6 @@ github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
@ -126,7 +122,6 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
@ -170,7 +165,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=

View File

@ -17,7 +17,7 @@ data:
connection_pooler_default_cpu_request: "500m"
connection_pooler_default_memory_limit: 100Mi
connection_pooler_default_memory_request: 100Mi
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
connection_pooler_image: "ghcr.io/zalando/postgres-operator/pgbouncer:latest"
connection_pooler_max_db_connections: "60"
connection_pooler_mode: "transaction"
connection_pooler_number_of_instances: "2"

View File

@ -23,7 +23,7 @@ spec:
serviceAccountName: postgres-operator
containers:
- name: postgres-operator
image: registry.opensource.zalan.do/acid/pgbouncer:master-32
image: ghcr.io/zalando/postgres-operator/pgbouncer:latest
imagePullPolicy: IfNotPresent
resources:
requests:

View File

@ -670,7 +670,7 @@ spec:
default: "pooler"
connection_pooler_image:
type: string
default: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
default: "ghcr.io/zalando/postgres-operator/pgbouncer:latest"
connection_pooler_max_db_connections:
type: integer
default: 60

View File

@ -218,7 +218,7 @@ configuration:
connection_pooler_default_cpu_request: "500m"
connection_pooler_default_memory_limit: 100Mi
connection_pooler_default_memory_request: 100Mi
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32"
connection_pooler_image: "ghcr.io/zalando/postgres-operator/pgbouncer:latest"
# connection_pooler_max_db_connections: 60
connection_pooler_mode: "transaction"
connection_pooler_number_of_instances: 2

View File

@ -31,6 +31,7 @@ var poolerRunAsGroup = int64(101)
// ConnectionPoolerObjects K8s objects that are belong to connection pooler
type ConnectionPoolerObjects struct {
AuthSecret *v1.Secret
Deployment *appsv1.Deployment
Service *v1.Service
Name string
@ -167,6 +168,38 @@ func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncRe
return reason, nil
}
func (c *Cluster) generateUserlist() string {
var sb strings.Builder
poolerAdminUser := c.systemUsers[constants.ConnectionPoolerUserKeyName]
fmt.Fprintf(&sb, "\"%s\" \"%s\"\n", poolerAdminUser.Name, poolerAdminUser.Password)
for roleName, infraRole := range c.InfrastructureRoles {
if infraRole.Password != "" {
fmt.Fprintf(&sb, "\"%s\" \"%s\"\n", roleName, infraRole.Password)
}
}
return sb.String()
}
func (c *Cluster) generateConnectionPoolerAuthSecret(connectionPooler *ConnectionPoolerObjects) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels,
Name: fmt.Sprintf("%s-userlist", connectionPooler.Name),
Namespace: connectionPooler.Namespace,
Annotations: c.annotationsSet(nil),
OwnerReferences: c.ownerReferences(),
},
Type: v1.SecretTypeOpaque,
// Secret data must be bytes. Kubernetes handles the encoding.
StringData: map[string]string{
"userlist.txt": c.generateUserlist(),
},
}
}
// Generate pool size related environment variables.
//
// MAX_DB_CONN would specify the global maximum for connections to a target
@ -320,6 +353,18 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
}
envVars = append(envVars, c.getConnectionPoolerEnvVars()...)
infraRolesList := make([]string, 0)
for infraRoleName := range c.InfrastructureRoles {
infraRolesList = append(infraRolesList, infraRoleName)
}
if len(infraRolesList) > 0 {
envVars = append(envVars, v1.EnvVar{
Name: "INFRASTRUCTURE_ROLES",
Value: strings.Join(infraRolesList, ","),
})
}
poolerContainer := v1.Container{
Name: connectionPoolerContainer,
Image: effectiveDockerImage,
@ -343,12 +388,29 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
},
}
var poolerVolumes []v1.Volume
var volumeMounts []v1.VolumeMount
// mount secret volume with userlist.txt for pgBouncer to authenticate users
poolerVolumes = append(poolerVolumes, v1.Volume{
Name: fmt.Sprintf("%s-userlist-volume", c.connectionPoolerName(role)),
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: fmt.Sprintf("%s-userlist", c.connectionPoolerName(role)),
},
},
})
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: fmt.Sprintf("%s-userlist-volume", c.connectionPoolerName(role)),
MountPath: "/etc/pgbouncer/userlist.txt",
SubPath: "userlist.txt",
ReadOnly: true,
})
// If the cluster has custom TLS certificates configured, we do the following:
// 1. Add environment variables to tell pgBouncer where to find the TLS certificates
// 2. Reference the secret in a volume
// 3. Mount the volume to the container at /tls
var poolerVolumes []v1.Volume
var volumeMounts []v1.VolumeMount
if spec.TLS != nil && spec.TLS.SecretName != "" {
getPoolerTLSEnv := func(k string) string {
keyName := ""
@ -635,12 +697,31 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
c.logger.Infof("connection pooler service %s has been deleted for role %s", service.Name, role)
}
// Repeat the same for the auth secret
authSecret := c.ConnectionPooler[role].AuthSecret
if authSecret == nil {
c.logger.Debug("no connection pooler auth secret to delete")
} else {
err := c.KubeClient.
Secrets(c.Namespace).
Delete(context.TODO(), authSecret.Name, metav1.DeleteOptions{})
if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("connection pooler auth secret %s for role %s has already been deleted", authSecret.Name, role)
} else if err != nil {
return fmt.Errorf("could not delete connection pooler auth secret: %v", err)
}
c.logger.Infof("connection pooler auth secret %s has been deleted for role %s", authSecret.Name, role)
}
c.ConnectionPooler[role].AuthSecret = nil
c.ConnectionPooler[role].Deployment = nil
c.ConnectionPooler[role].Service = nil
return nil
}
// delete connection pooler
// delete connection pooler secret
func (c *Cluster) deleteConnectionPoolerSecret() (err error) {
// Repeat the same for the secret object
secretName := c.credentialSecretName(c.OpConfig.ConnectionPooler.User)
@ -656,6 +737,7 @@ func (c *Cluster) deleteConnectionPoolerSecret() (err error) {
return fmt.Errorf("could not delete pooler secret: %v", err)
}
}
return nil
}
@ -971,11 +1053,42 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
pods []v1.Pod
service *v1.Service
newService *v1.Service
authSecret *v1.Secret
newAuthSecret *v1.Secret
err error
)
updatedPodAnnotations := map[string]*string{}
syncReason := make([]string, 0)
// create extra secret for connection pooler authentication
newAuthSecret = c.generateConnectionPoolerAuthSecret(c.ConnectionPooler[role])
if authSecret, err = c.KubeClient.Secrets(c.Namespace).Get(context.TODO(), fmt.Sprintf("%s-userlist", c.connectionPoolerName(role)), metav1.GetOptions{}); err == nil {
c.ConnectionPooler[role].AuthSecret = authSecret
// make sure existing annotations are preserved
newAuthSecret.Annotations = c.annotationsSet(authSecret.Annotations)
authSecret, err = c.KubeClient.Secrets(authSecret.Namespace).Update(context.TODO(), newAuthSecret, metav1.UpdateOptions{})
if err != nil {
return NoSync, fmt.Errorf("could not update connection pooler auth secret: %v", err)
}
c.ConnectionPooler[role].AuthSecret = authSecret
} else if !k8sutil.ResourceNotFound(err) {
return NoSync, fmt.Errorf("could not get auth secret for connection pooler to sync: %v", err)
}
if k8sutil.ResourceNotFound(err) {
c.logger.Warningf("auth secret %s for connection pooler is not found, create it", fmt.Sprintf("%s-userlist", c.connectionPoolerName(role)))
authSecret, err = c.KubeClient.
Secrets(newAuthSecret.Namespace).
Create(context.TODO(), newAuthSecret, metav1.CreateOptions{})
if err != nil {
return NoSync, err
}
c.ConnectionPooler[role].AuthSecret = authSecret
}
// next the pooler deployment
deployment, err = c.KubeClient.
Deployments(c.Namespace).
Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{})

View File

@ -30,6 +30,7 @@ func newFakeK8sPoolerTestClient() (k8sutil.KubernetesClient, *fake.Clientset) {
StatefulSetsGetter: clientSet.AppsV1(),
DeploymentsGetter: clientSet.AppsV1(),
ServicesGetter: clientSet.CoreV1(),
SecretsGetter: clientSet.CoreV1(),
}, clientSet
}
@ -803,6 +804,7 @@ func TestConnectionPoolerDeploymentSpec(t *testing.T) {
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
AuthSecret: nil,
Deployment: nil,
Service: nil,
LookupFunction: true,
@ -1019,6 +1021,7 @@ func TestPoolerTLS(t *testing.T) {
// create pooler resources
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{}
cluster.ConnectionPooler[Master] = &ConnectionPoolerObjects{
AuthSecret: nil,
Deployment: nil,
Service: nil,
Name: cluster.connectionPoolerName(Master),
@ -1089,12 +1092,14 @@ func TestConnectionPoolerServiceSpec(t *testing.T) {
}
cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{
Master: {
AuthSecret: nil,
Deployment: nil,
Service: nil,
LookupFunction: false,
Role: Master,
},
Replica: {
AuthSecret: nil,
Deployment: nil,
Service: nil,
LookupFunction: false,

View File

@ -2967,6 +2967,7 @@ func newLBFakeClient() (k8sutil.KubernetesClient, *fake.Clientset) {
DeploymentsGetter: clientSet.AppsV1(),
PodsGetter: clientSet.CoreV1(),
ServicesGetter: clientSet.CoreV1(),
SecretsGetter: clientSet.CoreV1(),
}, clientSet
}

View File

@ -275,7 +275,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.ConnectionPooler.Image = util.Coalesce(
fromCRD.ConnectionPooler.Image,
"registry.opensource.zalan.do/acid/pgbouncer")
"ghcr.io/zalando/postgres-operator/pgbouncer:latest")
result.ConnectionPooler.Mode = util.Coalesce(
fromCRD.ConnectionPooler.Mode,

View File

@ -155,7 +155,7 @@ type ConnectionPooler struct {
NumberOfInstances *int32 `name:"connection_pooler_number_of_instances" default:"2"`
Schema string `name:"connection_pooler_schema" default:"pooler"`
User string `name:"connection_pooler_user" default:"pooler"`
Image string `name:"connection_pooler_image" default:"registry.opensource.zalan.do/acid/pgbouncer"`
Image string `name:"connection_pooler_image" default:"ghcr.io/zalando/postgres-operator/pgbouncer:latest"`
Mode string `name:"connection_pooler_mode" default:"transaction"`
MaxDBConnections *int32 `name:"connection_pooler_max_db_connections" default:"60"`
ConnectionPoolerDefaultCPURequest string `name:"connection_pooler_default_cpu_request"`

54
pooler/Dockerfile Normal file
View File

@ -0,0 +1,54 @@
ARG BASE_IMAGE=alpine:3.22
FROM ${BASE_IMAGE} AS build_stage
RUN apk add -U --no-cache \
autoconf \
automake \
curl \
gcc \
libc-dev \
libevent \
libevent-dev \
libtool \
make \
openssl-dev \
pkgconfig \
git
WORKDIR /src
RUN git clone --single-branch --depth 1 https://github.com/pgbouncer/pgbouncer.git . && \
git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
RUN git submodule init && git submodule update
RUN ./autogen.sh && \
./configure --prefix=/pgbouncer --with-libevent=/usr/lib && \
sed -i '/dist_man_MANS/d' Makefile && \
make && \
make install
FROM ${BASE_IMAGE}
RUN apk -U upgrade --no-cache \
&& apk --no-cache add bash c-ares ca-certificates gettext libevent openssl postgresql-client
RUN addgroup -g 101 -S pgbouncer && \
adduser -u 100 -S pgbouncer -G pgbouncer && \
mkdir -p /etc/pgbouncer /var/log/pgbouncer /var/run/pgbouncer /etc/ssl/certs
COPY --from=build_stage /pgbouncer/bin/pgbouncer /bin/pgbouncer
COPY pgbouncer.ini.tmpl /etc/pgbouncer/
COPY entrypoint.sh /entrypoint.sh
RUN chown -R pgbouncer:pgbouncer \
/var/log/pgbouncer \
/var/run/pgbouncer \
/etc/pgbouncer \
/etc/ssl/certs \
&& chmod +x /entrypoint.sh
USER pgbouncer:pgbouncer
WORKDIR /etc/pgbouncer
ENTRYPOINT ["/bin/sh", "/entrypoint.sh"]

19
pooler/entrypoint.sh Executable file
View File

@ -0,0 +1,19 @@
#!/bin/sh
set -ex
if [ -z "${CONNECTION_POOLER_CLIENT_TLS_CRT}" ]; then
openssl req -nodes -new -x509 -subj /CN=spilo.dummy.org \
-keyout /etc/ssl/certs/pgbouncer.key \
-out /etc/ssl/certs/pgbouncer.crt
else
ln -s ${CONNECTION_POOLER_CLIENT_TLS_CRT} /etc/ssl/certs/pgbouncer.crt
ln -s ${CONNECTION_POOLER_CLIENT_TLS_KEY} /etc/ssl/certs/pgbouncer.key
if [ ! -z "${CONNECTION_POOLER_CLIENT_CA_FILE}" ]; then
ln -s ${CONNECTION_POOLER_CLIENT_CA_FILE} /etc/ssl/certs/ca.crt
fi
fi
envsubst < /etc/pgbouncer/pgbouncer.ini.tmpl > /etc/pgbouncer/pgbouncer.ini
exec /bin/pgbouncer /etc/pgbouncer/pgbouncer.ini

70
pooler/pgbouncer.ini.tmpl Normal file
View File

@ -0,0 +1,70 @@
# vim: set ft=dosini:
[databases]
* = host=$PGHOST port=$PGPORT auth_user=$PGUSER
postgres = host=$PGHOST port=$PGPORT auth_user=$PGUSER
[pgbouncer]
pool_mode = $CONNECTION_POOLER_MODE
listen_port = $CONNECTION_POOLER_PORT
listen_addr = *
admin_users = $PGUSER
stats_users = $INFRASTRUCTURE_ROLES
auth_dbname = postgres
auth_file = /etc/pgbouncer/userlist.txt
auth_query = SELECT * FROM $PGSCHEMA.user_lookup($1)
auth_type = md5
logfile = /var/log/pgbouncer/pgbouncer.log
pidfile = /var/run/pgbouncer/pgbouncer.pid
server_tls_sslmode = require
server_tls_ca_file = /etc/ssl/certs/pgbouncer.crt
server_tls_protocols = secure
client_tls_sslmode = require
client_tls_key_file = /etc/ssl/certs/pgbouncer.key
client_tls_cert_file = /etc/ssl/certs/pgbouncer.crt
log_connections = 0
log_disconnections = 0
# Number of prepared statements to cache on a server connection (zero value
# disables support of prepared statements).
max_prepared_statements = 200
# How many server connections to allow per user/database pair.
default_pool_size = $CONNECTION_POOLER_DEFAULT_SIZE
# Add more server connections to pool if below this number. Improves behavior
# when usual load comes suddenly back after period of total inactivity.
#
# NOTE: This value is per pool, i.e. a pair of (db, user), not a global one.
# Which means on the higher level it has to be calculated from the max allowed
# database connections and number of databases and users. If not taken into
# account, then for too many users or databases PgBouncer will go crazy
# opening/evicting connections. For now disable it.
#
# min_pool_size = $CONNECTION_POOLER_MIN_SIZE
# How many additional connections to allow to a pool
reserve_pool_size = $CONNECTION_POOLER_RESERVE_SIZE
# Maximum number of client connections allowed.
max_client_conn = $CONNECTION_POOLER_MAX_CLIENT_CONN
# Do not allow more than this many connections per database (regardless of
# pool, i.e. user)
max_db_connections = $CONNECTION_POOLER_MAX_DB_CONN
# If a client has been in "idle in transaction" state longer, it will be
# disconnected. [seconds]
idle_transaction_timeout = 600
# If login failed, because of failure from connect() or authentication that
# pooler waits this much before retrying to connect. Default is 15. [seconds]
server_login_retry = 5
# To ignore extra parameter in startup packet. By default only 'database' and
# 'user' are allowed, all others raise error. This is needed to tolerate
# overenthusiastic JDBC wanting to unconditionally set 'extra_float_digits=2'
# in startup packet.
ignore_startup_parameters = extra_float_digits,options