[bitnami/kafka] Release 3.4.0-debian-11-r22 (#31113)

Signed-off-by: Bitnami Containers <bitnami-bot@vmware.com>
This commit is contained in:
Bitnami Bot 2023-04-20 18:45:49 +02:00 committed by GitHub
parent e760ecfbc6
commit b621287054
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 190 additions and 232 deletions

View File

@ -3,11 +3,12 @@ FROM docker.io/bitnami/minideb:bullseye
ARG JAVA_EXTRA_SECURITY_DIR="/bitnami/java/extra-security"
ARG TARGETARCH
LABEL org.opencontainers.image.base.name="docker.io/bitnami/minideb:bullseye" \
org.opencontainers.image.created="2023-04-19T21:13:12Z" \
LABEL com.vmware.cp.artifact.flavor="sha256:109c7d51bd69bb6b3df71017440c1ea0699454f81fe188056c083f0b57c96ea6" \
org.opencontainers.image.base.name="docker.io/bitnami/minideb:bullseye" \
org.opencontainers.image.created="2023-04-20T15:48:12Z" \
org.opencontainers.image.description="Application packaged by VMware, Inc" \
org.opencontainers.image.licenses="Apache-2.0" \
org.opencontainers.image.ref.name="3.4.0-debian-11-r21" \
org.opencontainers.image.ref.name="3.4.0-debian-11-r22" \
org.opencontainers.image.title="kafka" \
org.opencontainers.image.vendor="VMware, Inc." \
org.opencontainers.image.version="3.4.0"
@ -23,8 +24,8 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN install_packages ca-certificates curl procps zlib1g
RUN mkdir -p /tmp/bitnami/pkg/cache/ && cd /tmp/bitnami/pkg/cache/ && \
COMPONENTS=( \
"java-17.0.7-7-0-linux-${OS_ARCH}-debian-11" \
"wait-for-port-1.0.6-6-linux-${OS_ARCH}-debian-11" \
"java-17.0.7-7-0-linux-${OS_ARCH}-debian-11" \
"render-template-1.0.5-5-linux-${OS_ARCH}-debian-11" \
"kafka-3.4.0-6-linux-${OS_ARCH}-debian-11" \
) && \
@ -49,7 +50,7 @@ RUN /opt/bitnami/scripts/kafka/postunpack.sh
ENV APP_VERSION="3.4.0" \
BITNAMI_APP_NAME="kafka" \
JAVA_HOME="/opt/bitnami/java" \
PATH="/opt/bitnami/java/bin:/opt/bitnami/common/bin:/opt/bitnami/kafka/bin:$PATH"
PATH="/opt/bitnami/common/bin:/opt/bitnami/java/bin:/opt/bitnami/kafka/bin:$PATH"
EXPOSE 9092

View File

@ -1,14 +1,6 @@
version: "2"
services:
zookeeper:
image: docker.io/bitnami/zookeeper:3.8
ports:
- "2181:2181"
volumes:
- "zookeeper_data:/bitnami"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
image: docker.io/bitnami/kafka:3.4
ports:
@ -16,13 +8,8 @@ services:
volumes:
- "kafka_data:/bitnami"
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes
depends_on:
- zookeeper
volumes:
zookeeper_data:
driver: local
kafka_data:
driver: local

View File

@ -30,12 +30,14 @@ kafka_env_vars=(
KAFKA_TLS_TYPE
KAFKA_TLS_CLIENT_AUTH
KAFKA_OPTS
KAFKA_CFG_ADVERTISED_LISTENERS
KAFKA_CFG_LISTENERS
KAFKA_CFG_ADVERTISED_LISTENERS
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP
KAFKA_CFG_ZOOKEEPER_CONNECT
KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE
KAFKA_CFG_SASL_ENABLED_MECHANISMS
KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL
KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL
KAFKA_CFG_INTER_BROKER_LISTENER_NAME
KAFKA_CFG_MAX_REQUEST_SIZE
KAFKA_CFG_MAX_PARTITION_FETCH_BYTES
KAFKA_ENABLE_KRAFT
@ -70,7 +72,8 @@ export KAFKA_BASE_DIR="${BITNAMI_ROOT_DIR}/kafka"
export KAFKA_VOLUME_DIR="/bitnami/kafka"
export KAFKA_DATA_DIR="${KAFKA_VOLUME_DIR}/data"
export KAFKA_CONF_DIR="${KAFKA_BASE_DIR}/config"
export KAFKA_CONF_FILE="${KAFKA_CONF_DIR}/server.properties"
export KAFKA_CONF_FILE="${KAFKA_CONF_DIR}/kraft/server.properties"
export KAFKA_ZK_CONF_FILE="${KAFKA_CONF_DIR}/server.properties"
export KAFKA_MOUNTED_CONF_DIR="${KAFKA_VOLUME_DIR}/config"
export KAFKA_CERTS_DIR="${KAFKA_CONF_DIR}/certs"
export KAFKA_INITSCRIPTS_DIR="/docker-entrypoint-initdb.d"
@ -93,15 +96,17 @@ export KAFKA_TLS_CLIENT_AUTH="${KAFKA_TLS_CLIENT_AUTH:-required}"
export KAFKA_OPTS="${KAFKA_OPTS:-}"
# Kafka configuration overrides
export KAFKA_CFG_LISTENERS="${KAFKA_CFG_LISTENERS:-PLAINTEXT://:9092,CONTROLLER://:9093}"
export KAFKA_CFG_ADVERTISED_LISTENERS="${KAFKA_CFG_ADVERTISED_LISTENERS:-PLAINTEXT://:9092}"
export KAFKA_CFG_LISTENERS="${KAFKA_CFG_LISTENERS:-PLAINTEXT://:9092}"
export KAFKA_CFG_ZOOKEEPER_CONNECT="${KAFKA_CFG_ZOOKEEPER_CONNECT:-localhost:2181}"
export KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE="${KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE:-true}"
export KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP="${KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP:-}"
export KAFKA_CFG_ZOOKEEPER_CONNECT="${KAFKA_CFG_ZOOKEEPER_CONNECT:-}"
export KAFKA_CFG_SASL_ENABLED_MECHANISMS="${KAFKA_CFG_SASL_ENABLED_MECHANISMS:-PLAIN,SCRAM-SHA-256,SCRAM-SHA-512}"
export KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL="${KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL:-}"
export KAFKA_CFG_MAX_REQUEST_SIZE="${KAFKA_CFG_MAX_REQUEST_SIZE:-1048576}"
export KAFKA_CFG_MAX_PARTITION_FETCH_BYTES="${KAFKA_CFG_MAX_PARTITION_FETCH_BYTES:-1048576}"
export KAFKA_ENABLE_KRAFT="${KAFKA_ENABLE_KRAFT:-no}"
export KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL="${KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL:-}"
export KAFKA_CFG_INTER_BROKER_LISTENER_NAME="${KAFKA_CFG_INTER_BROKER_LISTENER_NAME:-}"
export KAFKA_CFG_MAX_REQUEST_SIZE="${KAFKA_CFG_MAX_REQUEST_SIZE:-}"
export KAFKA_CFG_MAX_PARTITION_FETCH_BYTES="${KAFKA_CFG_MAX_PARTITION_FETCH_BYTES:-}"
export KAFKA_ENABLE_KRAFT="${KAFKA_ENABLE_KRAFT:-yes}"
export KAFKA_KRAFT_CLUSTER_ID="${KAFKA_KRAFT_CLUSTER_ID:-}"
# ZooKeeper connection settings

View File

@ -34,7 +34,7 @@ replace_in_file "${KAFKA_BASE_DIR}/bin/kafka-server-start.sh" " [-]loggc" " "
replace_in_file "${KAFKA_CONF_DIR}/log4j.properties" "DailyRollingFileAppender" "ConsoleAppender"
# Disable the default console logger in favour of KafkaAppender (which provides the exact output)
echo "log4j.appender.stdout.Threshold=OFF" >>/opt/bitnami/kafka/config/log4j.properties
echo "log4j.appender.stdout.Threshold=OFF" >>"${KAFKA_CONF_DIR}/log4j.properties"
# Remove invalid parameters for ConsoleAppender
remove_in_file "${KAFKA_CONF_DIR}/log4j.properties" "DatePattern"

View File

@ -23,7 +23,7 @@ if [[ "${KAFKA_ZOOKEEPER_PROTOCOL:-}" =~ SSL ]]; then
export KAFKA_OPTS="$KAFKA_OPTS $ZOOKEEPER_SSL_CONFIG"
fi
flags=("$KAFKA_CONF_FILE")
flags=("$(kafka_get_conf_file)")
[[ -z "${KAFKA_EXTRA_FLAGS:-}" ]] || flags=("${flags[@]}" "${KAFKA_EXTRA_FLAGS[@]}")
START_COMMAND=("$KAFKA_HOME/bin/kafka-server-start.sh" "${flags[@]}" "$@")

View File

@ -26,6 +26,7 @@ if [[ -z "${KAFKA_CFG_BROKER_ID:-}" ]]; then
export KAFKA_CFG_BROKER_ID=-1
fi
fi
# Set the default tuststore locations
kafka_configure_default_truststore_locations
# Ensure Kafka environment variables are valid
@ -41,6 +42,9 @@ fi
for dir in "$KAFKA_LOG_DIR" "$KAFKA_CONF_DIR" "$KAFKA_MOUNTED_CONF_DIR" "$KAFKA_VOLUME_DIR" "$KAFKA_DATA_DIR"; do
ensure_dir_exists "$dir" "$KAFKA_OWNERSHIP_USER"
done
# shellcheck disable=SC2148
# Ensure Kafka is initialized
kafka_initialize
# If KRaft is enabled initialize

View File

@ -56,6 +56,7 @@ kafka_common_conf_set() {
# Backwards compatibility measure to configure the TLS truststore locations
# Globals:
# KAFKA_CONF_FILE
# KAFKA_ZK_CONF_FILE
# Arguments:
# None
# Returns:
@ -99,6 +100,7 @@ kafka_configure_default_truststore_locations() {
# Set a configuration setting value to server.properties
# Globals:
# KAFKA_CONF_FILE
# KAFKA_ZK_CONF_FILE
# Arguments:
# $1 - key
# $2 - values (array)
@ -106,7 +108,7 @@ kafka_configure_default_truststore_locations() {
# None
#########################
kafka_server_conf_set() {
kafka_common_conf_set "$KAFKA_CONF_FILE" "$@"
kafka_common_conf_set "$(kafka_get_conf_file)" "$@"
}
########################
@ -239,35 +241,32 @@ kafka_validate() {
}
if is_boolean_yes "$KAFKA_ENABLE_KRAFT"; then
if [[ -z "$KAFKA_CFG_BROKER_ID" ]]; then
print_validation_error "KRaft requires KAFKA_CFG_BROKER_ID to be set for the quorum controller"
fi
if [[ -z "$KAFKA_CFG_CONTROLLER_QUORUM_VOTERS" ]]; then
print_validation_error "KRaft requires KAFKA_CFG_CONTROLLER_QUORUM_VOTERS to be set"
fi
if [[ -n "${KAFKA_CFG_NODE_ID:-}" ]] || [[ -n "${KAFKA_CFG_CONTROLLER_QUORUM_VOTERS:-}" ]]; then
if [[ -z "${KAFKA_CFG_NODE_ID:-}" ]]; then
print_validation_error "KRaft requires KAFKA_CFG_NODE_ID to be set for the quorum controller"
fi
if [[ -z "$KAFKA_CFG_CONTROLLER_QUORUM_VOTERS" ]]; then
print_validation_error "KRaft requires KAFKA_CFG_CONTROLLER_QUORUM_VOTERS to be set"
fi
if [[ -n "$KAFKA_CFG_BROKER_ID" ]] && [[ -n "$KAFKA_CFG_CONTROLLER_QUORUM_VOTERS" ]]; then
old_IFS=$IFS
IFS=','
read -r -a voters <<< "$KAFKA_CFG_CONTROLLER_QUORUM_VOTERS"
IFS=${old_IFS}
broker_id_matched=false
node_id_matched=false
for voter in "${voters[@]}"; do
if [[ "$voter" == *"$KAFKA_CFG_BROKER_ID"* ]]; then
broker_id_matched=true
if [[ "$voter" == *"$KAFKA_CFG_NODE_ID"* ]]; then
node_id_matched=true
break
fi
done
if [[ "$broker_id_matched" == false ]]; then
warn "KAFKA_CFG_BROKER_ID must match what is set in KAFKA_CFG_CONTROLLER_QUORUM_VOTERS"
if [[ "$node_id_matched" == false ]]; then
warn "KAFKA_CFG_NODE_ID must match what is set in KAFKA_CFG_CONTROLLER_QUORUM_VOTERS"
fi
fi
if [[ -z "$KAFKA_CFG_CONTROLLER_LISTENER_NAMES" ]]; then
print_validation_error "KRaft requires KAFKA_CFG_CONTROLLER_LISTENER_NAMES to be set"
fi
if [[ -n "$KAFKA_CFG_PROCESS_ROLES" ]]; then
if [[ -n "${KAFKA_CFG_PROCESS_ROLES:-}" ]]; then
old_IFS=$IFS
IFS=','
read -r -a roles <<< "$KAFKA_CFG_PROCESS_ROLES"
@ -283,10 +282,8 @@ kafka_validate() {
if [[ "$controller_exists" == false ]]; then
warn "KAFKA_CFG_PROCESS_ROLES must include 'controller' for KRaft"
fi
else
print_validation_error "KAFKA_CFG_PROCESS_ROLES must be set to enable KRaft model"
fi
if [[ -n "$KAFKA_CFG_LISTENERS" ]]; then
if [[ -n "${KAFKA_CFG_LISTENERS:-}" ]]; then
old_IFS=$IFS
IFS=','
read -r -a listener <<< "$KAFKA_CFG_LISTENERS"
@ -302,8 +299,6 @@ kafka_validate() {
if [[ "$controller_exists" == false ]]; then
warn "KAFKA_CFG_LISTENERS must include a listener for CONTROLLER"
fi
else
print_validation_error "KRaft requires KAFKA_CFG_LISTENERS to be set"
fi
fi
@ -562,7 +557,7 @@ kafka_configure_ssl() {
remove_previous_cert_value() {
local key="${1:?missing key}"
files=(
"${KAFKA_CONF_FILE}"
"$(kafka_get_conf_file)"
"${KAFKA_CONF_DIR}/producer.properties"
"${KAFKA_CONF_DIR}/consumer.properties"
)
@ -760,7 +755,9 @@ kafka_configure_from_environment_variables() {
done
value="${!var}"
kafka_server_conf_set "$key" "$value"
if [[ -n "$value" ]]; then
kafka_server_conf_set "$key" "$value"
fi
done
}
@ -801,7 +798,7 @@ kraft_initialize() {
fi
info "Formatting storage directories to add metadata..."
debug_execute "$KAFKA_HOME/bin/kafka-storage.sh" format --config "$KAFKA_CONF_FILE" --cluster-id "$KAFKA_KRAFT_CLUSTER_ID" --ignore-formatted
debug_execute "$KAFKA_HOME/bin/kafka-storage.sh" format --config "$(kafka_get_conf_file)" --cluster-id "$KAFKA_KRAFT_CLUSTER_ID" --ignore-formatted
}
########################
@ -869,7 +866,7 @@ kafka_initialize() {
fi
# Remove security.inter.broker.protocol if KAFKA_CFG_INTER_BROKER_LISTENER_NAME is configured
if [[ -n "${KAFKA_CFG_INTER_BROKER_LISTENER_NAME:-}" ]]; then
remove_in_file "$KAFKA_CONF_FILE" "security.inter.broker.protocol" false
remove_in_file "$(kafka_get_conf_file)" "security.inter.broker.protocol" false
fi
kafka_configure_producer_consumer_message_sizes
fi
@ -956,3 +953,20 @@ kafka_stop() {
! is_kafka_running && return
stop_service_using_pid "$KAFKA_PID_FILE" TERM
}
########################
# Get configuration file to use
# Globals:
# KAFKA_ENABLE_KRAFT
# Arguments:
# None
# Returns:
# Path to the conf file to use
#########################
kafka_get_conf_file() {
if is_boolean_yes "$KAFKA_ENABLE_KRAFT"; then
echo "$KAFKA_CONF_FILE"
else
echo "$KAFKA_ZK_CONF_FILE"
fi
}

View File

@ -107,17 +107,6 @@ In this example, we will create an Apache Kafka client instance that will connec
docker network create app-tier --driver bridge
```
#### Step 2: Launch the Zookeeper server instance
Use the `--network app-tier` argument to the `docker run` command to attach the Zookeeper container to the `app-tier` network.
```console
docker run -d --name zookeeper-server \
--network app-tier \
-e ALLOW_ANONYMOUS_LOGIN=yes \
bitnami/zookeeper:latest
```
#### Step 2: Launch the Apache Kafka server instance
Use the `--network app-tier` argument to the `docker run` command to attach the Apache Kafka container to the `app-tier` network.
@ -126,7 +115,6 @@ Use the `--network app-tier` argument to the `docker run` command to attach the
docker run -d --name kafka-server \
--network app-tier \
-e ALLOW_PLAINTEXT_LISTENER=yes \
-e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181 \
bitnami/kafka:latest
```
@ -137,7 +125,6 @@ Finally we create a new container instance to launch the Apache Kafka client and
```console
docker run -it --rm \
--network app-tier \
-e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181 \
bitnami/kafka:latest kafka-topics.sh --list --bootstrap-server kafka-server:9092
```
@ -153,14 +140,12 @@ networks:
driver: bridge
services:
zookeeper:
image: 'bitnami/zookeeper:latest'
networks:
- app-tier
kafka:
image: 'bitnami/kafka:latest'
networks:
- app-tier
environment:
- ALLOW_PLAINTEXT_LISTENER=yes
myapp:
image: 'YOUR_APPLICATION_IMAGE'
networks:
@ -170,8 +155,7 @@ services:
> **IMPORTANT**:
>
> 1. Please update the `YOUR_APPLICATION_IMAGE` placeholder in the above snippet with your application image
> 2. Configure Apache Kafka and ZooKeeper persistence, and configure them either via environment variables or by [mounting configuration files](#full-configuration).
> 3. In your application container, use the hostname `kafka` to connect to the Apache Kafka server
> 2. In your application container, use the hostname `kafka` to connect to the Apache Kafka server
Launch the containers using:
@ -195,21 +179,23 @@ The configuration can easily be setup with the Bitnami Apache Kafka Docker image
* `KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD`: Apache Kafka Zookeeper truststore file password. No defaults.
* `KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME`: Verify Zookeeper hostname on TLS certificates. Defaults: **true**.
* `KAFKA_ZOOKEEPER_TLS_TYPE`: Choose the TLS certificate format to use. Allowed values: `JKS`, `PEM`. Defaults: **JKS**.
* `KAFKA_CFG_LISTENERS`: Kafka `listeners` configuration override. Default: **PLAINTEXT://:9092,CONTROLLER://:9093**
* `KAFKA_CFG_ADVERTISED_LISTENERS`: Kafka `advertised.listeners` configuration override. Default: **PLAINTEXT://:9092**
* `KAFKA_CFG_SASL_ENABLED_MECHANISMS`: Allowed mechanism when using SASL either for clients, inter broker, or zookeeper comunications. Allowed values: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512` or a comma separated combination of those values. Default: **PLAIN,SCRAM-SHA-256,SCRAM-SHA-512**
* `KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL`: SASL mechanism to use for inter broker communications. No defaults.
* `KAFKA_TLS_CLIENT_AUTH`: Configures kafka brokers to request client authentication. Allowed values: `required`, `requested`, `none`. Defaults: **required**.
* `KAFKA_TLS_TYPE`: Choose the TLS certificate format to use. Allowed values: `JKS`, `PEM`. Defaults: **JKS**.
* `KAFKA_CLIENT_USERS`: Users that will be created into Zookeeper when using SASL for client communications. Separated by commas. Default: **user**
* `KAFKA_CLIENT_PASSWORDS`: Passwords for the users specified at`KAFKA_CLIENT_USERS`. Separated by commas. Default: **bitnami**
* `KAFKA_CFG_MAX_PARTITION_FETCH_BYTES`: The maximum amount of data per-partition the server will return. Default: **1048576**
* `KAFKA_CFG_MAX_REQUEST_SIZE`: The maximum size of a request in bytes. Default: **1048576**
* `KAFKA_ENABLE_KRAFT`: Whether to enable Kafka Raft (KRaft) mode. Default: **no**
* `KAFKA_CFG_MAX_PARTITION_FETCH_BYTES`: The maximum amount of data per-partition the server will return. No defaults.
* `KAFKA_CFG_MAX_REQUEST_SIZE`: The maximum size of a request in bytes. No defaults.
* `KAFKA_ENABLE_KRAFT`: Whether to enable Kafka Raft (KRaft) mode. Default: **yes**
* `KAFKA_KRAFT_CLUSTER_ID`: Kafka cluster ID when using Kafka Raft (KRaft). No defaults.
Additionally, any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Apache Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads` or `KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE` in order to configure `auto.create.topics.enable`.
```console
docker run --name kafka -e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true bitnami/kafka:latest
docker run --name kafka -e ALLOW_PLAINTEXT_LISTENER=yes -e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true bitnami/kafka:latest
```
or by modifying the [`docker-compose.yml`](https://github.com/bitnami/containers/blob/main/bitnami/kafka/docker-compose.yml) file present in this repository:
@ -218,7 +204,7 @@ or by modifying the [`docker-compose.yml`](https://github.com/bitnami/containers
kafka:
...
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true
...
```
@ -229,24 +215,12 @@ To use Apache Kafka in a development setup, create the following `docker-compose
```yaml
version: "3"
services:
zookeeper:
image: 'bitnami/zookeeper:latest'
ports:
- '2181:2181'
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
image: 'bitnami/kafka:latest'
ports:
- '9092:9092'
environment:
- KAFKA_BROKER_ID=1
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes
depends_on:
- zookeeper
```
To deploy it, run the following command in the directory where the `docker-compose.yml` file is located:
@ -255,44 +229,48 @@ To deploy it, run the following command in the directory where the `docker-compo
docker-compose up -d
```
### Kafka without Zookeeper (KRaft)
### Kafka with Zookeeper
Apache Kafka Raft (KRaft) makes use of a new quorum controller service in Kafka which replaces the previous controller and makes use of an event-based variant of the Raft consensus protocol.
This greatly simplifies Kafka's architecture by consolidating responsibility for metadata into Kafka itself, rather than splitting it between two different systems: ZooKeeper and Kafka.
This greatly simplifies Kafkas architecture by consolidating responsibility for metadata into Kafka itself, rather than splitting it between two different systems: ZooKeeper and Kafka.
More Info can be found here: <https://developer.confluent.io/learn/kraft/>
> **NOTE:** According to [KIP-833](https://cwiki.apache.org/confluence/display/KAFKA/KIP-833%3A+Mark+KRaft+as+Production+Ready), KRaft is now in a production-ready state.
Configuration here has been crafted from the [Kraft Repo](https://github.com/apache/kafka/tree/trunk/config/kraft).
However, if you want to keep using ZooKeeper, you can use the following configuration:
```yaml
version: "2"
```diff
version: "3"
services:
- zookeeper:
- image: 'bitnami/zookeeper:latest'
- ports:
- - '2181:2181'
- environment:
- - ALLOW_ANONYMOUS_LOGIN=yes
kafka:
image: 'bitnami/kafka:latest'
ports:
- '9092:9092'
environment:
+ - KAFKA_ENABLE_KRAFT=yes
+ - KAFKA_CFG_PROCESS_ROLES=broker,controller
+ - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
- - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092
+ - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093
+ - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092
- KAFKA_BROKER_ID=1
+ - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@127.0.0.1:9093
- - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes
- depends_on:
- - zookeeper
zookeeper:
image: docker.io/bitnami/zookeeper:3.8
ports:
- "2181:2181"
volumes:
- "zookeeper_data:/bitnami"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
image: docker.io/bitnami/kafka:3.4
ports:
- "9092:9092"
volumes:
- "kafka_data:/bitnami"
environment:
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_ENABLE_KRAFT=no
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092
depends_on:
- zookeeper
volumes:
zookeeper_data:
driver: local
kafka_data:
driver: local
```
### Accessing Apache Kafka with internal and external clients
@ -303,12 +281,9 @@ To do so, add the following environment variables to your docker-compose:
```diff
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes
+ - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CLIENT:PLAINTEXT,EXTERNAL:PLAINTEXT
+ - KAFKA_CFG_LISTENERS=CLIENT://:9092,EXTERNAL://:9093
+ - KAFKA_CFG_ADVERTISED_LISTENERS=CLIENT://kafka:9092,EXTERNAL://localhost:9093
+ - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=CLIENT
+ - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094
+ - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://localhost:9094
```
And expose the external port:
@ -318,18 +293,19 @@ And expose the external port:
```diff
ports:
- - '9092:9092'
+ - '9093:9093'
+ - '9094:9094'
```
**Note**: To connect from an external machine, change `localhost` above to your host's external IP/hostname and include `EXTERNAL://0.0.0.0:9093` in `KAFKA_CFG_LISTENERS` to allow for remote connections.
#### Producer and consumer using external client
These clients, from the same host, will use `localhost` to connect to Apache Kafka.
```console
kafka-console-producer.sh --bootstrap-server 127.0.0.1:9093 --topic test
kafka-console-consumer.sh --bootstrap-server 127.0.0.1:9093 --topic test --from-beginning
kafka-console-producer.sh --bootstrap-server 127.0.0.1:9094 --topic test
kafka-console-consumer.sh --bootstrap-server 127.0.0.1:9094 --topic test --from-beginning
```
If running these commands from another machine, change the address accordingly.
@ -355,20 +331,13 @@ The Bitnami Apache Kafka docker image disables the PLAINTEXT listener for securi
ALLOW_PLAINTEXT_LISTENER=yes
```
In order to configure authentication, you must configure the Apache Kafka listeners properly. This container assumes the names below will be used for the listeners:
* INTERNAL: used for inter-broker communications.
* CLIENT: used for communications with clients that are within the same network as Apache Kafka brokers.
Let's see an example to configure Apache Kafka with `SASL_SSL` authentication for communications with clients, and `SSL` authentication for inter-broker communication.
In order to configure authentication, you must configure the Apache Kafka listeners properly.Let's see an example to configure Apache Kafka with `SASL_SSL` authentication for communications with clients, and `PLAINTEXT` authentication for controller-related communications.
The environment variables below should be defined to configure the listeners, and the SASL credentials for client communications:
```console
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:SSL,CLIENT:SASL_SSL
KAFKA_CFG_LISTENERS=INTERNAL://:9093,CLIENT://:9092
KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://kafka:9093,CLIENT://kafka:9092
KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL
KAFKA_CFG_LISTENERS=SASL_SSL://:9092,CONTROLLER://:9093
KAFKA_CFG_ADVERTISED_LISTENERS=SASL_SSL://localhost:9092
KAFKA_CLIENT_USERS=user
KAFKA_CLIENT_PASSWORDS=password
```
@ -396,25 +365,14 @@ The following docker-compose file is an example showing how to mount your JKS ce
version: '2'
services:
zookeeper:
image: 'bitnami/zookeeper:latest'
ports:
- '2181:2181'
environment:
- ZOO_ENABLE_AUTH=yes
- ZOO_SERVER_USERS=kafka
- ZOO_SERVER_PASSWORDS=kafka_password
kafka:
image: 'bitnami/kafka:latest'
hostname: kafka.example.com
ports:
- '9092'
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_LISTENERS=SASL_SSL://:9092
- KAFKA_CFG_LISTENERS=SASL_SSL://:9092,CONTROLLER://:9093
- KAFKA_CFG_ADVERTISED_LISTENERS=SASL_SSL://:9092
- KAFKA_ZOOKEEPER_USER=kafka
- KAFKA_ZOOKEEPER_PASSWORD=kafka_password
- KAFKA_CLIENT_USERS=user
- KAFKA_CLIENT_PASSWORDS=password
- KAFKA_CERTIFICATE_PASSWORD=certificatePassword123
@ -502,36 +460,21 @@ In order to authenticate Apache Kafka against a Zookeeper server with `SASL_SSL`
An Apache Kafka cluster can easily be setup with the Bitnami Apache Kafka Docker image using the following environment variables:
* `KAFKA_CFG_ZOOKEEPER_CONNECT`: Comma separated host:port pairs, each corresponding to a Zookeeper Server.
* `KAFKA_CFG_CONTROLLER_QUORUM_VOTERS`: Comma separated host:port pairs, each corresponding to a Kafka controller connection.
Create a Docker network to enable visibility to each other via the docker container name
```console
docker network create app-tier --driver bridge
```
#### Step 1: Create the first node for Zookeeper
The first step is to create one Zookeeper instance.
```console
docker run --name zookeeper \
--network app-tier \
-e ALLOW_ANONYMOUS_LOGIN=yes \
-p 2181:2181 \
bitnami/zookeeper:latest
```
#### Step 2: Create the first node for Apache Kafka
#### Step 1: Create the first node for Apache Kafka
The first step is to create one Apache Kafka instance.
```console
docker run --name kafka1 \
docker run --name kafka-0 \
--network app-tier \
-e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093 \
-e KAFKA_CFG_NODE_ID=0 \
-e KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv \
-e ALLOW_PLAINTEXT_LISTENER=yes \
-p :9092 \
-p :9093 \
bitnami/kafka:latest
```
@ -540,11 +483,14 @@ docker run --name kafka1 \
Next we start a new Apache Kafka container.
```console
docker run --name kafka2 \
docker run --name kafka-1 \
--network app-tier \
-e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093 \
-e KAFKA_CFG_NODE_ID=1 \
-e KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv \
-e ALLOW_PLAINTEXT_LISTENER=yes \
-p :9092 \
-p :9093 \
bitnami/kafka:latest
```
@ -553,11 +499,14 @@ docker run --name kafka2 \
Next we start another new Apache Kafka container.
```console
docker run --name kafka3 \
docker run --name kafka-3 \
--network app-tier \
-e KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093 \
-e KAFKA_CFG_NODE_ID=3 \
-e KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv \
-e ALLOW_PLAINTEXT_LISTENER=yes \
-p :9092 \
-p :9093 \
bitnami/kafka:latest
```
@ -569,42 +518,62 @@ With Docker Compose, topic replication can be setup using:
version: '2'
services:
zookeeper:
image: 'bitnami/zookeeper:latest'
kafka-0:
image: docker.io/bitnami/kafka:testing
ports:
- '2181:2181'
- "9092"
- "9093"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka1:
image: 'bitnami/kafka:latest'
ports:
- '9092'
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- BRDEBUG=1
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093
- KAFKA_CFG_BROKER_ID=0
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
- ALLOW_PLAINTEXT_LISTENER=yes
kafka2:
image: 'bitnami/kafka:latest'
volumes:
- kafka_0_data:/bitnami/kafka
kafka-1:
image: docker.io/bitnami/kafka:testing
ports:
- '9092'
- "9092"
- "9093"
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- BRDEBUG=1
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093
- KAFKA_CFG_BROKER_ID=1
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
- ALLOW_PLAINTEXT_LISTENER=yes
kafka3:
image: 'bitnami/kafka:latest'
volumes:
- kafka_1_data:/bitnami/kafka
kafka-2:
image: docker.io/bitnami/kafka:testing
ports:
- '9092'
- "9092"
- "9093"
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- BRDEBUG=1
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
- KAFKA_CFG_BROKER_ID=2
- ALLOW_PLAINTEXT_LISTENER=yes
volumes:
- kafka_2_data:/bitnami/kafka
volumes:
kafka_0_data:
driver: local
kafka_1_data:
driver: local
kafka_2_data:
driver: local
```
Then, you can create a replicated topic with:
```console
root@kafka1:/# /opt/bitnami/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --topic mytopic --partitions 3 --replication-factor 3
root@kafka-0:/# /opt/bitnami/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --topic mytopic --partitions 3 --replication-factor 3
Created topic "mytopic".
root@kafka1:/# /opt/bitnami/kafka/bin/kafka-topics.sh --describe --bootstrap-server localhost:9092 --topic mytopic
root@kafka-0:/# /opt/bitnami/kafka/bin/kafka-topics.sh --describe --bootstrap-server localhost:9092 --topic mytopic
Topic:mytopic PartitionCount:3 ReplicationFactor:3 Configs:
Topic: mytopic Partition: 0 Leader: 2 Replicas: 2,3,1 Isr: 2,3,1
Topic: mytopic Partition: 1 Leader: 3 Replicas: 3,1,2 Isr: 3,1,2
@ -613,7 +582,7 @@ Topic:mytopic PartitionCount:3 ReplicationFactor:3 Configs:
### Full configuration
The image looks for configuration files (server.properties, log4j.properties, etc.) in the `/bitnami/kafka/config/` directory, this directory can be changed by setting the KAFKA_MOUNTED_CONF_DIR environment variable.
The image looks for configuration files (server.properties, log4j.properties, etc.) in the `/bitnami/kafka/config/` and `/bitnami/kafka/config/kraft` directories, this can be changed by setting the KAFKA_MOUNTED_CONF_DIR environment variable.
```console
docker run --name kafka -v /path/to/server.properties:/bitnami/kafka/config/server.properties bitnami/kafka:latest
@ -776,6 +745,10 @@ docker-compose up kafka
Branch 2 has been renamed to 2.8 and branch 3 has been splited into branches 3.0 and 3.1 mirroing the upstream [Apache Kafka's naming policy](https://kafka.apache.org/downloads)
### 3.4.0-debian-11-r23, 3.3.2-debian-11-r29 and 3.2.3-debian-11-r73
* Apache Kafka is now configured using Kraft. You can disable this configuration with the `KAFKA_ENABLE_KRAFT=false` env var and by following the instructions in this guide.
### 3.0.0-debian-10-r0
* Apache Kafka 3.0 deprecates the `--zookeper` flag in shell commands. Related operations such as topic creation require the use of updated flags. Please, refer to [Apache Kafka's official release notes](https://archive.apache.org/dist/kafka/3.0.0/RELEASE_NOTES.html) for further information on the changes introduced by this version.

View File

@ -1,54 +1,41 @@
version: "2"
services:
zookeeper:
image: docker.io/bitnami/zookeeper:3.8
ports:
- "2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
volumes:
- zookeeper_data:/bitnami/zookeeper
kafka-0:
image: docker.io/bitnami/kafka:3.4
ports:
- "9092"
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_BROKER_ID=0
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_NODE_ID=0
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
volumes:
- kafka_0_data:/bitnami/kafka
depends_on:
- zookeeper
kafka-1:
image: docker.io/bitnami/kafka:3.4
ports:
- "9092"
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_BROKER_ID=1
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_NODE_ID=1
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
volumes:
- kafka_1_data:/bitnami/kafka
depends_on:
- zookeeper
kafka-2:
image: docker.io/bitnami/kafka:3.4
ports:
- "9092"
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_BROKER_ID=2
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_NODE_ID=2
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-0:9093,1@kafka-1:9093,2@kafka-2:9093
- KAFKA_KRAFT_CLUSTER_ID=abcdefghijklmnopqrstuv
volumes:
- kafka_2_data:/bitnami/kafka
depends_on:
- zookeeper
volumes:
zookeeper_data:
driver: local
kafka_0_data:
driver: local
kafka_1_data:

View File

@ -1,14 +1,6 @@
version: "2"
services:
zookeeper:
image: docker.io/bitnami/zookeeper:3.8
ports:
- "2181:2181"
volumes:
- "zookeeper_data:/bitnami"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
image: docker.io/bitnami/kafka:3.4
ports:
@ -16,13 +8,8 @@ services:
volumes:
- "kafka_data:/bitnami"
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes
depends_on:
- zookeeper
volumes:
zookeeper_data:
driver: local
kafka_data:
driver: local