2.0.2-debian-10-r0 release
This commit is contained in:
parent
dcc5c0b9e4
commit
e7efdb33d1
|
|
@ -0,0 +1,31 @@
|
|||
FROM docker.io/bitnami/minideb:buster
|
||||
LABEL maintainer "Bitnami <containers@bitnami.com>"
|
||||
|
||||
ENV HOME="/" \
|
||||
OS_ARCH="amd64" \
|
||||
OS_FLAVOUR="debian-10" \
|
||||
OS_NAME="linux"
|
||||
|
||||
COPY prebuildfs /
|
||||
# Install required system packages and dependencies
|
||||
RUN install_packages acl ca-certificates curl gzip libc6 procps tar
|
||||
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "wait-for-port" "1.0.0-3" --checksum 7521d9a4f9e4e182bf32977e234026caa7b03759799868335bccb1edd8f8fd12
|
||||
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "influxdb" "2.0.2-0" --checksum 0e66afd7d4eb49b6b8a8797f4a669165343c3d8ca50b5a76233d267936872873
|
||||
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "gosu" "1.12.0-2" --checksum 4d858ac600c38af8de454c27b7f65c0074ec3069880cb16d259a6e40a46bbc50
|
||||
RUN apt-get update && apt-get upgrade -y && \
|
||||
rm -r /var/lib/apt/lists /var/cache/apt/archives
|
||||
RUN chmod g+rwX /opt/bitnami
|
||||
|
||||
COPY rootfs /
|
||||
RUN /opt/bitnami/scripts/influxdb/postunpack.sh
|
||||
ENV BITNAMI_APP_NAME="influxdb" \
|
||||
BITNAMI_IMAGE_VERSION="2.0.2-debian-10-r0" \
|
||||
PATH="/opt/bitnami/common/bin:/opt/bitnami/influxdb/bin:$PATH"
|
||||
|
||||
VOLUME [ "/bitnami/influxdb" ]
|
||||
|
||||
EXPOSE 8086 8088
|
||||
|
||||
USER 1001
|
||||
ENTRYPOINT [ "/opt/bitnami/scripts/influxdb/entrypoint.sh" ]
|
||||
CMD [ "/opt/bitnami/scripts/influxdb/run.sh" ]
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
version: '2'
|
||||
services:
|
||||
influxdb:
|
||||
image: docker.io/bitnami/influxdb:2-debian-10
|
||||
ports:
|
||||
- 8086:8086
|
||||
- 8088:8088
|
||||
environment:
|
||||
- INFLUXDB_ADMIN_USER_PASSWORD=bitnami123
|
||||
- INFLUXDB_ADMIN_USER_TOKEN=admintoken123
|
||||
volumes:
|
||||
- influxdb_data:/bitnami/influxdb
|
||||
volumes:
|
||||
influxdb_data:
|
||||
driver: local
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"gosu": {
|
||||
"arch": "amd64",
|
||||
"digest": "4d858ac600c38af8de454c27b7f65c0074ec3069880cb16d259a6e40a46bbc50",
|
||||
"distro": "debian-10",
|
||||
"type": "NAMI",
|
||||
"version": "1.12.0-2"
|
||||
},
|
||||
"influxdb": {
|
||||
"arch": "amd64",
|
||||
"digest": "0e66afd7d4eb49b6b8a8797f4a669165343c3d8ca50b5a76233d267936872873",
|
||||
"distro": "debian-10",
|
||||
"type": "NAMI",
|
||||
"version": "2.0.2-0"
|
||||
},
|
||||
"wait-for-port": {
|
||||
"arch": "amd64",
|
||||
"digest": "7521d9a4f9e4e182bf32977e234026caa7b03759799868335bccb1edd8f8fd12",
|
||||
"distro": "debian-10",
|
||||
"type": "NAMI",
|
||||
"version": "1.0.0-3"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
Bitnami containers ship with software bundles. You can find the licenses under:
|
||||
/opt/bitnami/nami/COPYING
|
||||
/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Bitnami custom library
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
# Constants
|
||||
BOLD='\033[1m'
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Print the welcome page
|
||||
# Globals:
|
||||
# DISABLE_WELCOME_MESSAGE
|
||||
# BITNAMI_APP_NAME
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
print_welcome_page() {
|
||||
if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then
|
||||
if [[ -n "$BITNAMI_APP_NAME" ]]; then
|
||||
print_image_welcome_page
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Print the welcome page for a Bitnami Docker image
|
||||
# Globals:
|
||||
# BITNAMI_APP_NAME
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
print_image_welcome_page() {
|
||||
local github_url="https://github.com/bitnami/bitnami-docker-${BITNAMI_APP_NAME}"
|
||||
|
||||
log ""
|
||||
log "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}"
|
||||
log "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}"
|
||||
log "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}"
|
||||
log ""
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing Bitnami components
|
||||
|
||||
# Constants
|
||||
CACHE_ROOT="/tmp/bitnami/pkg/cache"
|
||||
DOWNLOAD_URL="https://downloads.bitnami.com/files/stacksmith"
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Download and unpack a Bitnami package
|
||||
# Globals:
|
||||
# OS_NAME
|
||||
# OS_ARCH
|
||||
# OS_FLAVOUR
|
||||
# Arguments:
|
||||
# $1 - component's name
|
||||
# $2 - component's version
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
component_unpack() {
|
||||
local name="${1:?name is required}"
|
||||
local version="${2:?version is required}"
|
||||
local base_name="${name}-${version}-${OS_NAME}-${OS_ARCH}-${OS_FLAVOUR}"
|
||||
local package_sha256=""
|
||||
local directory="/opt/bitnami"
|
||||
|
||||
# Validate arguments
|
||||
shift 2
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-c|--checksum)
|
||||
shift
|
||||
package_sha256="${1:?missing package checksum}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
echo "Downloading $base_name package"
|
||||
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz" ]; then
|
||||
echo "${CACHE_ROOT}/${base_name}.tar.gz already exists, skipping download."
|
||||
cp "${CACHE_ROOT}/${base_name}.tar.gz" .
|
||||
rm "${CACHE_ROOT}/${base_name}.tar.gz"
|
||||
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz.sha256" ]; then
|
||||
echo "Using the local sha256 from ${CACHE_ROOT}/${base_name}.tar.gz.sha256"
|
||||
package_sha256="$(< "${CACHE_ROOT}/${base_name}.tar.gz.sha256")"
|
||||
rm "${CACHE_ROOT}/${base_name}.tar.gz.sha256"
|
||||
fi
|
||||
else
|
||||
curl --remote-name --silent "${DOWNLOAD_URL}/${base_name}.tar.gz"
|
||||
fi
|
||||
if [ -n "$package_sha256" ]; then
|
||||
echo "Verifying package integrity"
|
||||
echo "$package_sha256 ${base_name}.tar.gz" | sha256sum --check -
|
||||
fi
|
||||
tar --directory "${directory}" --extract --gunzip --file "${base_name}.tar.gz" --no-same-owner --strip-components=2 "${base_name}/files/"
|
||||
rm "${base_name}.tar.gz"
|
||||
}
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing files
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Replace a regex in a file
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - substitute regex
|
||||
# $4 - use POSIX regex. Default: true
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
replace_in_file() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local substitute_regex="${3:?substitute regex is required}"
|
||||
local posix_regex=${4:-true}
|
||||
|
||||
local result
|
||||
|
||||
# We should avoid using 'sed in-place' substitutions
|
||||
# 1) They are not compatible with files mounted from ConfigMap(s)
|
||||
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
|
||||
del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
|
||||
if [[ $posix_regex = true ]]; then
|
||||
result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
|
||||
else
|
||||
result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
|
||||
fi
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
||||
########################
|
||||
# Remove a line in a file based on a regex
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - use POSIX regex. Default: true
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
remove_in_file() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local posix_regex=${3:-true}
|
||||
local result
|
||||
|
||||
# We should avoid using 'sed in-place' substitutions
|
||||
# 1) They are not compatible with files mounted from ConfigMap(s)
|
||||
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
|
||||
if [[ $posix_regex = true ]]; then
|
||||
result="$(sed -E "/$match_regex/d" "$filename")"
|
||||
else
|
||||
result="$(sed "/$match_regex/d" "$filename")"
|
||||
fi
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
||||
########################
|
||||
# Appends text after the last line matching a pattern
|
||||
# Arguments:
|
||||
# $1 - file
|
||||
# $2 - match regex
|
||||
# $3 - contents to add
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
append_file_after_last_match() {
|
||||
local file="${1:?missing file}"
|
||||
local match_regex="${2:?missing pattern}"
|
||||
local value="${3:?missing value}"
|
||||
|
||||
# We read the file in reverse, replace the first match (0,/pattern/s) and then reverse the results again
|
||||
result="$(tac "$file" | sed -E "0,/($match_regex)/s||${value}\n\1|" | tac)"
|
||||
echo "$result" > "$file"
|
||||
}
|
||||
|
|
@ -0,0 +1,183 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for file system actions
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Ensure a file/directory is owned (user and group) but the given user
|
||||
# Arguments:
|
||||
# $1 - filepath
|
||||
# $2 - owner
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
owned_by() {
|
||||
local path="${1:?path is missing}"
|
||||
local owner="${2:?owner is missing}"
|
||||
|
||||
chown "$owner":"$owner" "$path"
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure a directory exists and, optionally, is owned by the given user
|
||||
# Arguments:
|
||||
# $1 - directory
|
||||
# $2 - owner
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_dir_exists() {
|
||||
local dir="${1:?directory is missing}"
|
||||
local owner="${2:-}"
|
||||
|
||||
mkdir -p "${dir}"
|
||||
if [[ -n $owner ]]; then
|
||||
owned_by "$dir" "$owner"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Checks whether a directory is empty or not
|
||||
# arguments:
|
||||
# $1 - directory
|
||||
# returns:
|
||||
# boolean
|
||||
#########################
|
||||
is_dir_empty() {
|
||||
local dir="${1:?missing directory}"
|
||||
|
||||
if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Checks whether a mounted directory is empty or not
|
||||
# arguments:
|
||||
# $1 - directory
|
||||
# returns:
|
||||
# boolean
|
||||
#########################
|
||||
is_mounted_dir_empty() {
|
||||
local dir="${1:?missing directory}"
|
||||
|
||||
if is_dir_empty "$dir" || find "$dir" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" -exec false {} +; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Checks whether a file can be written to or not
|
||||
# arguments:
|
||||
# $1 - file
|
||||
# returns:
|
||||
# boolean
|
||||
#########################
|
||||
is_file_writable() {
|
||||
local file="${1:?missing file}"
|
||||
local dir
|
||||
dir="$(dirname "$file")"
|
||||
|
||||
if [[ ( -f "$file" && -w "$file" ) || ( ! -f "$file" && -d "$dir" && -w "$dir" ) ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Relativize a path
|
||||
# arguments:
|
||||
# $1 - path
|
||||
# $2 - base
|
||||
# returns:
|
||||
# None
|
||||
#########################
|
||||
relativize() {
|
||||
local -r path="${1:?missing path}"
|
||||
local -r base="${2:?missing base}"
|
||||
pushd "$base" >/dev/null
|
||||
realpath -q --no-symlinks --relative-base="$base" "$path" | sed -e 's|^/$|.|' -e 's|^/||'
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure permisions and ownership recursively
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - paths (as a string).
|
||||
# Flags:
|
||||
# -f|--file-mode - mode for directories.
|
||||
# -d|--dir-mode - mode for files.
|
||||
# -u|--user - user
|
||||
# -g|--group - group
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
configure_permissions_ownership() {
|
||||
local -r paths="${1:?paths is missing}"
|
||||
local dir_mode=""
|
||||
local file_mode=""
|
||||
local user=""
|
||||
local group=""
|
||||
|
||||
# Validate arguments
|
||||
shift 1
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-f|--file-mode)
|
||||
shift
|
||||
file_mode="${1:?missing mode for files}"
|
||||
;;
|
||||
-d|--dir-mode)
|
||||
shift
|
||||
dir_mode="${1:?missing mode for directories}"
|
||||
;;
|
||||
-u|--user)
|
||||
shift
|
||||
user="${1:?missing user}"
|
||||
;;
|
||||
-g|--group)
|
||||
shift
|
||||
group="${1:?missing group}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
read -r -a filepaths <<< "$paths"
|
||||
for p in "${filepaths[@]}"; do
|
||||
if [[ -e "$p" ]]; then
|
||||
if [[ -n $dir_mode ]]; then
|
||||
find -L "$p" -type d -exec chmod "$dir_mode" {} \;
|
||||
fi
|
||||
if [[ -n $file_mode ]]; then
|
||||
find -L "$p" -type f -exec chmod "$file_mode" {} \;
|
||||
fi
|
||||
if [[ -n $user ]] && [[ -n $group ]]; then
|
||||
chown -LR "$user":"$group" "$p"
|
||||
elif [[ -n $user ]] && [[ -z $group ]]; then
|
||||
chown -LR "$user" "$p"
|
||||
elif [[ -z $user ]] && [[ -n $group ]]; then
|
||||
chgrp -LR "$group" "$p"
|
||||
fi
|
||||
else
|
||||
stderr_print "$p does not exist"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library to use for scripts expected to be used as Kubernetes lifecycle hooks
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load generic libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libos.sh
|
||||
|
||||
# Override functions that log to stdout/stderr of the current process, so they print to process 1
|
||||
for function_to_override in stderr_print debug_execute; do
|
||||
# Output is sent to output of process 1 and thus end up in the container log
|
||||
# The hook output in general isn't saved
|
||||
eval "$(declare -f "$function_to_override") >/proc/1/fd/1 2>/proc/1/fd/2"
|
||||
done
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for logging functions
|
||||
|
||||
# Constants
|
||||
RESET='\033[0m'
|
||||
RED='\033[38;5;1m'
|
||||
GREEN='\033[38;5;2m'
|
||||
YELLOW='\033[38;5;3m'
|
||||
MAGENTA='\033[38;5;5m'
|
||||
CYAN='\033[38;5;6m'
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Print to STDERR
|
||||
# Arguments:
|
||||
# Message to print
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
stderr_print() {
|
||||
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
|
||||
local bool="${BITNAMI_QUIET:-false}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if ! [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
|
||||
printf "%b\\n" "${*}" >&2
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Log message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
log() {
|
||||
stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}"
|
||||
}
|
||||
########################
|
||||
# Log an 'info' message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
info() {
|
||||
log "${GREEN}INFO ${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
warn() {
|
||||
log "${YELLOW}WARN ${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log an 'error' message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
error() {
|
||||
log "${RED}ERROR${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log a 'debug' message
|
||||
# Globals:
|
||||
# BITNAMI_DEBUG
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
debug() {
|
||||
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
|
||||
local bool="${BITNAMI_DEBUG:-false}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
|
||||
log "${MAGENTA}DEBUG${RESET} ==> ${*}"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Indent a string
|
||||
# Arguments:
|
||||
# $1 - string
|
||||
# $2 - number of indentation characters (default: 4)
|
||||
# $3 - indentation character (default: " ")
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
indent() {
|
||||
local string="${1:-}"
|
||||
local num="${2:?missing num}"
|
||||
local char="${3:-" "}"
|
||||
# Build the indentation unit string
|
||||
local indent_unit=""
|
||||
for ((i = 0; i < num; i++)); do
|
||||
indent_unit="${indent_unit}${char}"
|
||||
done
|
||||
echo "$string" | sed "s/^/${indent_unit}/"
|
||||
}
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for network functions
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Resolve IP address for a host/domain (i.e. DNS lookup)
|
||||
# Arguments:
|
||||
# $1 - Hostname to resolve
|
||||
# $2 - IP address version (v4, v6), leave empty for resolving to any version
|
||||
# Returns:
|
||||
# IP
|
||||
#########################
|
||||
dns_lookup() {
|
||||
local host="${1:?host is missing}"
|
||||
local ip_version="${2:-}"
|
||||
getent "ahosts${ip_version}" "$host" | awk '/STREAM/ {print $1 }' | head -n 1
|
||||
}
|
||||
|
||||
#########################
|
||||
# Wait for a hostname and return the IP
|
||||
# Arguments:
|
||||
# $1 - hostname
|
||||
# $2 - number of retries
|
||||
# $3 - seconds to wait between retries
|
||||
# Returns:
|
||||
# - IP address that corresponds to the hostname
|
||||
#########################
|
||||
wait_for_dns_lookup() {
|
||||
local hostname="${1:?hostname is missing}"
|
||||
local retries="${2:-5}"
|
||||
local seconds="${3:-1}"
|
||||
check_host() {
|
||||
if [[ $(dns_lookup "$hostname") == "" ]]; then
|
||||
false
|
||||
else
|
||||
true
|
||||
fi
|
||||
}
|
||||
# Wait for the host to be ready
|
||||
retry_while "check_host ${hostname}" "$retries" "$seconds"
|
||||
dns_lookup "$hostname"
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine's IP
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Machine IP
|
||||
#########################
|
||||
get_machine_ip() {
|
||||
local -a ip_addresses
|
||||
local hostname
|
||||
hostname="$(hostname)"
|
||||
read -r -a ip_addresses <<< "$(dns_lookup "$hostname" | xargs echo)"
|
||||
if [[ "${#ip_addresses[@]}" -gt 1 ]]; then
|
||||
warn "Found more than one IP address associated to hostname ${hostname}: ${ip_addresses[*]}, will use ${ip_addresses[0]}"
|
||||
elif [[ "${#ip_addresses[@]}" -lt 1 ]]; then
|
||||
error "Could not find any IP address associated to hostname ${hostname}"
|
||||
exit 1
|
||||
fi
|
||||
echo "${ip_addresses[0]}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a resolved hostname
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_hostname_resolved() {
|
||||
local -r host="${1:?missing value}"
|
||||
if [[ -n "$(dns_lookup "$host")" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Parse URL
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - uri - String
|
||||
# $2 - component to obtain. Valid options (scheme, authority, userinfo, host, port, path, query or fragment) - String
|
||||
# Returns:
|
||||
# String
|
||||
parse_uri() {
|
||||
local uri="${1:?uri is missing}"
|
||||
local component="${2:?component is missing}"
|
||||
|
||||
# Solution based on https://tools.ietf.org/html/rfc3986#appendix-B with
|
||||
# additional sub-expressions to split authority into userinfo, host and port
|
||||
# Credits to Patryk Obara (see https://stackoverflow.com/a/45977232/6694969)
|
||||
local -r URI_REGEX='^(([^:/?#]+):)?(//((([^@/?#]+)@)?([^:/?#]+)(:([0-9]+))?))?(/([^?#]*))?(\?([^#]*))?(#(.*))?'
|
||||
# || | ||| | | | | | | | | |
|
||||
# |2 scheme | ||6 userinfo 7 host | 9 port | 11 rpath | 13 query | 15 fragment
|
||||
# 1 scheme: | |5 userinfo@ 8 :... 10 path 12 ?... 14 #...
|
||||
# | 4 authority
|
||||
# 3 //...
|
||||
local index=0
|
||||
case "$component" in
|
||||
scheme)
|
||||
index=2
|
||||
;;
|
||||
authority)
|
||||
index=4
|
||||
;;
|
||||
userinfo)
|
||||
index=6
|
||||
;;
|
||||
host)
|
||||
index=7
|
||||
;;
|
||||
port)
|
||||
index=9
|
||||
;;
|
||||
path)
|
||||
index=10
|
||||
;;
|
||||
query)
|
||||
index=13
|
||||
;;
|
||||
fragment)
|
||||
index=14
|
||||
;;
|
||||
*)
|
||||
stderr_print "unrecognized component $component"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
[[ "$uri" =~ $URI_REGEX ]] && echo "${BASH_REMATCH[${index}]}"
|
||||
}
|
||||
|
|
@ -0,0 +1,350 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for operating system actions
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libfs.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Check if an user exists in the system
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
user_exists() {
|
||||
local user="${1:?user is missing}"
|
||||
id "$user" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if a group exists in the system
|
||||
# Arguments:
|
||||
# $1 - group
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
group_exists() {
|
||||
local group="${1:?group is missing}"
|
||||
getent group "$group" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Create a group in the system if it does not exist already
|
||||
# Arguments:
|
||||
# $1 - group
|
||||
# Flags:
|
||||
# -s|--system - Whether to create new user as system user (uid <= 999)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_group_exists() {
|
||||
local group="${1:?group is missing}"
|
||||
local is_system_user=false
|
||||
|
||||
# Validate arguments
|
||||
shift 1
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-s|--system)
|
||||
is_system_user=true
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if ! group_exists "$group"; then
|
||||
local -a args=("$group")
|
||||
$is_system_user && args+=("--system")
|
||||
groupadd "${args[@]}" >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Create an user in the system if it does not exist already
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# Flags:
|
||||
# -g|--group - the group the new user should belong to
|
||||
# -h|--home - the home directory for the new user
|
||||
# -s|--system - whether to create new user as system user (uid <= 999)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_user_exists() {
|
||||
local user="${1:?user is missing}"
|
||||
local group=""
|
||||
local home=""
|
||||
local is_system_user=false
|
||||
|
||||
# Validate arguments
|
||||
shift 1
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-g|--group)
|
||||
shift
|
||||
group="${1:?missing group}"
|
||||
;;
|
||||
-h|--home)
|
||||
shift
|
||||
home="${1:?missing home directory}"
|
||||
;;
|
||||
-s|--system)
|
||||
is_system_user=true
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if ! user_exists "$user"; then
|
||||
local -a user_args=("-N" "$user")
|
||||
$is_system_user && user_args+=("--system")
|
||||
useradd "${user_args[@]}" >/dev/null 2>&1
|
||||
fi
|
||||
|
||||
if [[ -n "$group" ]]; then
|
||||
local -a group_args=("$group")
|
||||
$is_system_user && group_args+=("--system")
|
||||
ensure_group_exists "${group_args[@]}"
|
||||
usermod -g "$group" "$user" >/dev/null 2>&1
|
||||
fi
|
||||
|
||||
if [[ -n "$home" ]]; then
|
||||
mkdir -p "$home"
|
||||
usermod -d "$home" "$user" >/dev/null 2>&1
|
||||
configure_permissions_ownership "$home" -d "775" -f "664" -u "$user" -g "$group"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the script is currently running as root
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# $2 - group
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
am_i_root() {
|
||||
if [[ "$(id -u)" = "0" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Get total memory available
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Memory in bytes
|
||||
#########################
|
||||
get_total_memory() {
|
||||
echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024))
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine size depending on specified memory
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Flags:
|
||||
# --memory - memory size (optional)
|
||||
# Returns:
|
||||
# Detected instance size
|
||||
#########################
|
||||
get_machine_size() {
|
||||
local memory=""
|
||||
# Validate arguments
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--memory)
|
||||
shift
|
||||
memory="${1:?missing memory}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
if [[ -z "$memory" ]]; then
|
||||
debug "Memory was not specified, detecting available memory automatically"
|
||||
memory="$(get_total_memory)"
|
||||
fi
|
||||
sanitized_memory=$(convert_to_mb "$memory")
|
||||
if [[ "$sanitized_memory" -gt 26000 ]]; then
|
||||
echo 2xlarge
|
||||
elif [[ "$sanitized_memory" -gt 13000 ]]; then
|
||||
echo xlarge
|
||||
elif [[ "$sanitized_memory" -gt 6000 ]]; then
|
||||
echo large
|
||||
elif [[ "$sanitized_memory" -gt 3000 ]]; then
|
||||
echo medium
|
||||
elif [[ "$sanitized_memory" -gt 1500 ]]; then
|
||||
echo small
|
||||
else
|
||||
echo micro
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine size depending on specified memory
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - memory size (optional)
|
||||
# Returns:
|
||||
# Detected instance size
|
||||
#########################
|
||||
get_supported_machine_sizes() {
|
||||
echo micro small medium large xlarge 2xlarge
|
||||
}
|
||||
|
||||
########################
|
||||
# Convert memory size from string to amount of megabytes (i.e. 2G -> 2048)
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - memory size
|
||||
# Returns:
|
||||
# Result of the conversion
|
||||
#########################
|
||||
convert_to_mb() {
|
||||
local amount="${1:-}"
|
||||
if [[ $amount =~ ^([0-9]+)(m|M|g|G) ]]; then
|
||||
size="${BASH_REMATCH[1]}"
|
||||
unit="${BASH_REMATCH[2]}"
|
||||
if [[ "$unit" = "g" || "$unit" = "G" ]]; then
|
||||
amount="$((size * 1024))"
|
||||
else
|
||||
amount="$size"
|
||||
fi
|
||||
fi
|
||||
echo "$amount"
|
||||
}
|
||||
|
||||
|
||||
#########################
|
||||
# Redirects output to /dev/null if debug mode is disabled
|
||||
# Globals:
|
||||
# BITNAMI_DEBUG
|
||||
# Arguments:
|
||||
# $@ - Command to execute
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
debug_execute() {
|
||||
if ${BITNAMI_DEBUG:-false}; then
|
||||
"$@"
|
||||
else
|
||||
"$@" >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Retries a command a given number of times
|
||||
# Arguments:
|
||||
# $1 - cmd (as a string)
|
||||
# $2 - max retries. Default: 12
|
||||
# $3 - sleep between retries (in seconds). Default: 5
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
retry_while() {
|
||||
local cmd="${1:?cmd is missing}"
|
||||
local retries="${2:-12}"
|
||||
local sleep_time="${3:-5}"
|
||||
local return_value=1
|
||||
|
||||
read -r -a command <<< "$cmd"
|
||||
for ((i = 1 ; i <= retries ; i+=1 )); do
|
||||
"${command[@]}" && return_value=0 && break
|
||||
sleep "$sleep_time"
|
||||
done
|
||||
return $return_value
|
||||
}
|
||||
|
||||
########################
|
||||
# Generate a random string
|
||||
# Arguments:
|
||||
# -t|--type - String type (ascii, alphanumeric, numeric), defaults to ascii
|
||||
# -c|--count - Number of characters, defaults to 32
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
# Returns:
|
||||
# String
|
||||
#########################
|
||||
generate_random_string() {
|
||||
local type="ascii"
|
||||
local count="32"
|
||||
local filter
|
||||
local result
|
||||
# Validate arguments
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-t|--type)
|
||||
shift
|
||||
type="$1"
|
||||
;;
|
||||
-c|--count)
|
||||
shift
|
||||
count="$1"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
# Validate type
|
||||
case "$type" in
|
||||
ascii)
|
||||
filter="[:print:]"
|
||||
;;
|
||||
alphanumeric)
|
||||
filter="a-zA-Z0-9"
|
||||
;;
|
||||
numeric)
|
||||
filter="0-9"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid type ${type}" >&2
|
||||
return 1
|
||||
esac
|
||||
# Obtain count + 10 lines from /dev/urandom to ensure that the resulting string has the expected size
|
||||
# Note there is a very small chance of strings starting with EOL character
|
||||
# Therefore, the higher amount of lines read, this will happen less frequently
|
||||
result="$(head -n "$((count + 10))" /dev/urandom | tr -dc "$filter" | head -c "$count")"
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
########################
|
||||
# Create md5 hash from a string
|
||||
# Arguments:
|
||||
# $1 - string
|
||||
# Returns:
|
||||
# md5 hash - string
|
||||
#########################
|
||||
generate_md5_hash() {
|
||||
local -r str="${1:?missing input string}"
|
||||
echo -n "$str" | md5sum | awk '{print $1}'
|
||||
}
|
||||
|
|
@ -0,0 +1,122 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Bitnami persistence library
|
||||
# Used for bringing persistence capabilities to applications that don't have clear separation of data and logic
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/libfs.sh
|
||||
. /opt/bitnami/scripts/libos.sh
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libversion.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Persist an application directory
|
||||
# Globals:
|
||||
# BITNAMI_ROOT_DIR
|
||||
# BITNAMI_VOLUME_DIR
|
||||
# Arguments:
|
||||
# $1 - App folder name
|
||||
# $2 - List of app files to persist
|
||||
# Returns:
|
||||
# true if all steps succeeded, false otherwise
|
||||
#########################
|
||||
persist_app() {
|
||||
local -r app="${1:?missing app}"
|
||||
local -a files_to_restore
|
||||
read -r -a files_to_persist <<< "$2"
|
||||
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
|
||||
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
|
||||
# Persist the individual files
|
||||
if [[ "${#files_to_persist[@]}" -lt 0 ]]; then
|
||||
warn "No files are configured to be persisted"
|
||||
return
|
||||
fi
|
||||
pushd "$install_dir" >/dev/null
|
||||
local file_to_persist_relative file_to_persist_destination file_to_persist_destination_folder
|
||||
local -r tmp_file="/tmp/perms.acl"
|
||||
for file_to_persist in "${files_to_persist[@]}"; do
|
||||
if [[ ! -f "$file_to_persist" && ! -d "$file_to_persist" ]]; then
|
||||
error "Cannot persist '${file_to_persist}' because it does not exist"
|
||||
return 1
|
||||
fi
|
||||
file_to_persist_relative="$(relativize "$file_to_persist" "$install_dir")"
|
||||
file_to_persist_destination="${persist_dir}/${file_to_persist_relative}"
|
||||
file_to_persist_destination_folder="$(dirname "$file_to_persist_destination")"
|
||||
# Get original permissions for existing files, which will be applied later
|
||||
# Exclude the root directory with 'sed', to avoid issues when copying the entirety of it to a volume
|
||||
getfacl -R "$file_to_persist_relative" | sed -E '/# file: (\..+|[^.])/,$!d' > "$tmp_file"
|
||||
# Copy directories to the volume
|
||||
ensure_dir_exists "$file_to_persist_destination_folder"
|
||||
cp -Lr --preserve=links "$file_to_persist_relative" "$file_to_persist_destination_folder"
|
||||
# Restore permissions
|
||||
pushd "$persist_dir" >/dev/null
|
||||
if am_i_root; then
|
||||
setfacl --restore="$tmp_file"
|
||||
else
|
||||
# When running as non-root, don't change ownership
|
||||
setfacl --restore=<(grep -E -v '^# (owner|group):' "$tmp_file")
|
||||
fi
|
||||
popd >/dev/null
|
||||
done
|
||||
popd >/dev/null
|
||||
rm -f "$tmp_file"
|
||||
# Install the persisted files into the installation directory, via symlinks
|
||||
restore_persisted_app "$@"
|
||||
}
|
||||
|
||||
########################
|
||||
# Restore a persisted application directory
|
||||
# Globals:
|
||||
# BITNAMI_ROOT_DIR
|
||||
# BITNAMI_VOLUME_DIR
|
||||
# FORCE_MAJOR_UPGRADE
|
||||
# Arguments:
|
||||
# $1 - App folder name
|
||||
# $2 - List of app files to restore
|
||||
# Returns:
|
||||
# true if all steps succeeded, false otherwise
|
||||
#########################
|
||||
restore_persisted_app() {
|
||||
local -r app="${1:?missing app}"
|
||||
local -a files_to_restore
|
||||
read -r -a files_to_restore <<< "$2"
|
||||
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
|
||||
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
|
||||
# Restore the individual persisted files
|
||||
if [[ "${#files_to_restore[@]}" -lt 0 ]]; then
|
||||
warn "No persisted files are configured to be restored"
|
||||
return
|
||||
fi
|
||||
local file_to_restore_relative file_to_restore_origin file_to_restore_destination
|
||||
for file_to_restore in "${files_to_restore[@]}"; do
|
||||
file_to_restore_relative="$(relativize "$file_to_restore" "$install_dir")"
|
||||
# We use 'realpath --no-symlinks' to ensure that the case of '.' is covered and the directory is removed
|
||||
file_to_restore_origin="$(realpath --no-symlinks "${install_dir}/${file_to_restore_relative}")"
|
||||
file_to_restore_destination="$(realpath --no-symlinks "${persist_dir}/${file_to_restore_relative}")"
|
||||
rm -rf "$file_to_restore_origin"
|
||||
ln -sfn "$file_to_restore_destination" "$file_to_restore_origin"
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if an application directory was already persisted
|
||||
# Globals:
|
||||
# BITNAMI_VOLUME_DIR
|
||||
# Arguments:
|
||||
# $1 - App folder name
|
||||
# Returns:
|
||||
# true if all steps succeeded, false otherwise
|
||||
#########################
|
||||
is_app_initialized() {
|
||||
local -r app="${1:?missing app}"
|
||||
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
|
||||
if ! is_mounted_dir_empty "$persist_dir"; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
|
@ -0,0 +1,225 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing services
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/libvalidations.sh
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Read the provided pid file and returns a PID
|
||||
# Arguments:
|
||||
# $1 - Pid file
|
||||
# Returns:
|
||||
# PID
|
||||
#########################
|
||||
get_pid_from_file() {
|
||||
local pid_file="${1:?pid file is missing}"
|
||||
|
||||
if [[ -f "$pid_file" ]]; then
|
||||
if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then
|
||||
echo "$(< "$pid_file")"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if a provided PID corresponds to a running service
|
||||
# Arguments:
|
||||
# $1 - PID
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_service_running() {
|
||||
local pid="${1:?pid is missing}"
|
||||
|
||||
kill -0 "$pid" 2>/dev/null
|
||||
}
|
||||
|
||||
########################
|
||||
# Stop a service by sending a termination signal to its pid
|
||||
# Arguments:
|
||||
# $1 - Pid file
|
||||
# $2 - Signal number (optional)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
stop_service_using_pid() {
|
||||
local pid_file="${1:?pid file is missing}"
|
||||
local signal="${2:-}"
|
||||
local pid
|
||||
|
||||
pid="$(get_pid_from_file "$pid_file")"
|
||||
[[ -z "$pid" ]] || ! is_service_running "$pid" && return
|
||||
|
||||
if [[ -n "$signal" ]]; then
|
||||
kill "-${signal}" "$pid"
|
||||
else
|
||||
kill "$pid"
|
||||
fi
|
||||
|
||||
local counter=10
|
||||
while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do
|
||||
sleep 1
|
||||
counter=$((counter - 1))
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Start cron daemon
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# true if started correctly, false otherwise
|
||||
#########################
|
||||
cron_start() {
|
||||
if [[ -x "/usr/sbin/cron" ]]; then
|
||||
/usr/sbin/cron
|
||||
elif [[ -x "/usr/sbin/crond" ]]; then
|
||||
/usr/sbin/crond
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Generate a cron configuration file for a given service
|
||||
# Arguments:
|
||||
# $1 - Service name
|
||||
# $2 - Command
|
||||
# Flags:
|
||||
# --run-as - User to run as (default: root)
|
||||
# --schedule - Cron schedule configuration (default: * * * * *)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
generate_cron_conf() {
|
||||
local service_name="${1:?service name is missing}"
|
||||
local cmd="${2:?command is missing}"
|
||||
local run_as="root"
|
||||
local schedule="* * * * *"
|
||||
|
||||
# Parse optional CLI flags
|
||||
shift 2
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--run-as)
|
||||
shift
|
||||
run_as="$1"
|
||||
;;
|
||||
--schedule)
|
||||
shift
|
||||
schedule="$1"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag ${1}" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
mkdir -p /etc/cron.d
|
||||
echo "${schedule} ${run_as} ${cmd}" > /etc/cron.d/"$service_name"
|
||||
}
|
||||
|
||||
########################
|
||||
# Generate a monit configuration file for a given service
|
||||
# Arguments:
|
||||
# $1 - Service name
|
||||
# $2 - Pid file
|
||||
# $3 - Start command
|
||||
# $4 - Stop command
|
||||
# Flags:
|
||||
# --disabled - Whether to disable the monit configuration
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
generate_monit_conf() {
|
||||
local service_name="${1:?service name is missing}"
|
||||
local pid_file="${2:?pid file is missing}"
|
||||
local start_command="${3:?start command is missing}"
|
||||
local stop_command="${4:?stop command is missing}"
|
||||
local monit_conf_dir="/etc/monit/conf.d"
|
||||
local disabled="no"
|
||||
|
||||
# Parse optional CLI flags
|
||||
shift 4
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--disabled)
|
||||
shift
|
||||
disabled="$1"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag ${1}" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
is_boolean_yes "$disabled" && conf_suffix=".disabled"
|
||||
mkdir -p "$monit_conf_dir"
|
||||
cat >"${monit_conf_dir}/${service_name}.conf${conf_suffix:-}" <<EOF
|
||||
check process ${service_name}
|
||||
with pidfile "${pid_file}"
|
||||
start program = "${start_command}" with timeout 90 seconds
|
||||
stop program = "${stop_command}" with timeout 90 seconds
|
||||
EOF
|
||||
}
|
||||
|
||||
########################
|
||||
# Generate a logrotate configuration file
|
||||
# Arguments:
|
||||
# $1 - Service name
|
||||
# $2 - Log files pattern
|
||||
# Flags:
|
||||
# --period - Period
|
||||
# --rotations - Number of rotations to store
|
||||
# --extra - Extra options (Optional)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
generate_logrotate_conf() {
|
||||
local service_name="${1:?service name is missing}"
|
||||
local log_path="${2:?log path is missing}"
|
||||
local period="weekly"
|
||||
local rotations="150"
|
||||
local extra=""
|
||||
local logrotate_conf_dir="/etc/logrotate.d"
|
||||
local var_name
|
||||
# Parse optional CLI flags
|
||||
shift 2
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--period|--rotations|--extra)
|
||||
var_name="$(echo "$1" | sed -e "s/^--//" -e "s/-/_/g")"
|
||||
shift
|
||||
declare "$var_name"="${1:?"$var_name" is missing}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag ${1}" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
mkdir -p "$logrotate_conf_dir"
|
||||
cat <<EOF | sed '/^\s*$/d' >"${logrotate_conf_dir}/${service_name}"
|
||||
${log_path} {
|
||||
${period}
|
||||
rotate ${rotations}
|
||||
dateext
|
||||
compress
|
||||
copytruncate
|
||||
missingok
|
||||
$(indent "$extra" 2)
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
|
@ -0,0 +1,248 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Validation functions library
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Check if the provided argument is an integer
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_int() {
|
||||
local -r int="${1:?missing value}"
|
||||
if [[ "$int" =~ ^-?[0-9]+ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a positive integer
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_positive_int() {
|
||||
local -r int="${1:?missing value}"
|
||||
if is_int "$int" && (( "${int}" >= 0 )); then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean or is the string 'yes/true'
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_boolean_yes() {
|
||||
local -r bool="${1:-}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean yes/no value
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_yes_no_value() {
|
||||
local -r bool="${1:-}"
|
||||
if [[ "$bool" =~ ^(yes|no)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean true/false value
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_true_false_value() {
|
||||
local -r bool="${1:-}"
|
||||
if [[ "$bool" =~ ^(true|false)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is an empty string or not defined
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_empty_value() {
|
||||
local -r val="${1:-}"
|
||||
if [[ -z "$val" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate if the provided argument is a valid port
|
||||
# Arguments:
|
||||
# $1 - Port to validate
|
||||
# Returns:
|
||||
# Boolean and error message
|
||||
#########################
|
||||
validate_port() {
|
||||
local value
|
||||
local unprivileged=0
|
||||
|
||||
# Parse flags
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-unprivileged)
|
||||
unprivileged=1
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
stderr_print "unrecognized flag $1"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [[ "$#" -gt 1 ]]; then
|
||||
echo "too many arguments provided"
|
||||
return 2
|
||||
elif [[ "$#" -eq 0 ]]; then
|
||||
stderr_print "missing port argument"
|
||||
return 1
|
||||
else
|
||||
value=$1
|
||||
fi
|
||||
|
||||
if [[ -z "$value" ]]; then
|
||||
echo "the value is empty"
|
||||
return 1
|
||||
else
|
||||
if ! is_int "$value"; then
|
||||
echo "value is not an integer"
|
||||
return 2
|
||||
elif [[ "$value" -lt 0 ]]; then
|
||||
echo "negative value provided"
|
||||
return 2
|
||||
elif [[ "$value" -gt 65535 ]]; then
|
||||
echo "requested port is greater than 65535"
|
||||
return 2
|
||||
elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then
|
||||
echo "privileged port requested"
|
||||
return 3
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate if the provided argument is a valid IPv4 address
|
||||
# Arguments:
|
||||
# $1 - IP to validate
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
validate_ipv4() {
|
||||
local ip="${1:?ip is missing}"
|
||||
local stat=1
|
||||
|
||||
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")"
|
||||
[[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \
|
||||
&& ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]]
|
||||
stat=$?
|
||||
fi
|
||||
return $stat
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate a string format
|
||||
# Arguments:
|
||||
# $1 - String to validate
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
validate_string() {
|
||||
local string
|
||||
local min_length=-1
|
||||
local max_length=-1
|
||||
|
||||
# Parse flags
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-min-length)
|
||||
shift
|
||||
min_length=${1:-}
|
||||
;;
|
||||
-max-length)
|
||||
shift
|
||||
max_length=${1:-}
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
stderr_print "unrecognized flag $1"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$#" -gt 1 ]; then
|
||||
stderr_print "too many arguments provided"
|
||||
return 2
|
||||
elif [ "$#" -eq 0 ]; then
|
||||
stderr_print "missing string"
|
||||
return 1
|
||||
else
|
||||
string=$1
|
||||
fi
|
||||
|
||||
if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then
|
||||
echo "string length is less than $min_length"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then
|
||||
echo "string length is great than $max_length"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing versions strings
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
# Functions
|
||||
########################
|
||||
# Gets semantic version
|
||||
# Arguments:
|
||||
# $1 - version: string to extract major.minor.patch
|
||||
# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch
|
||||
# Returns:
|
||||
# array with the major, minor and release
|
||||
#########################
|
||||
get_sematic_version () {
|
||||
local version="${1:?version is required}"
|
||||
local section="${2:?section is required}"
|
||||
local -a version_sections
|
||||
|
||||
#Regex to parse versions: x.y.z
|
||||
local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?'
|
||||
|
||||
if [[ "$version" =~ $regex ]]; then
|
||||
local i=1
|
||||
local j=1
|
||||
local n=${#BASH_REMATCH[*]}
|
||||
|
||||
while [[ $i -lt $n ]]; do
|
||||
if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then
|
||||
version_sections[$j]=${BASH_REMATCH[$i]}
|
||||
((j++))
|
||||
fi
|
||||
((i++))
|
||||
done
|
||||
|
||||
local number_regex='^[0-9]+$'
|
||||
if [[ "$section" =~ $number_regex ]] && (( $section > 0 )) && (( $section <= 3 )); then
|
||||
echo "${version_sections[$section]}"
|
||||
return
|
||||
else
|
||||
stderr_print "Section allowed values are: 1, 2, and 3"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
|
@ -0,0 +1,421 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Bitnami web server handler library
|
||||
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
|
||||
# Load generic libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
|
||||
########################
|
||||
# Execute a command (or list of commands) with the web server environment and library loaded
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_execute() {
|
||||
local -r web_server="${1:?missing web server}"
|
||||
shift
|
||||
# Run program in sub-shell to avoid web server environment getting loaded when not necessary
|
||||
(
|
||||
. "/opt/bitnami/scripts/lib${web_server}.sh"
|
||||
. "/opt/bitnami/scripts/${web_server}-env.sh"
|
||||
"$@"
|
||||
)
|
||||
}
|
||||
|
||||
########################
|
||||
# Prints the list of enabled web servers
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_list() {
|
||||
local -r -a supported_web_servers=(apache nginx)
|
||||
local -a existing_web_servers=()
|
||||
for web_server in "${supported_web_servers[@]}"; do
|
||||
[[ -f "/opt/bitnami/scripts/${web_server}-env.sh" ]] && existing_web_servers+=("$web_server")
|
||||
done
|
||||
echo "${existing_web_servers[@]:-}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Prints the currently-enabled web server type (only one, in order of preference)
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_type() {
|
||||
local -a web_servers
|
||||
read -r -a web_servers <<< "$(web_server_list)"
|
||||
echo "${web_servers[0]:-}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate that a supported web server is configured
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_validate() {
|
||||
local error_code=0
|
||||
local supported_web_servers=("apache" "nginx")
|
||||
|
||||
# Auxiliary functions
|
||||
print_validation_error() {
|
||||
error "$1"
|
||||
error_code=1
|
||||
}
|
||||
|
||||
if [[ -z "$(web_server_type)" || ! " ${supported_web_servers[*]} " == *" $(web_server_type) "* ]]; then
|
||||
print_validation_error "Could not detect any supported web servers. It must be one of: ${supported_web_servers[*]}"
|
||||
elif ! web_server_execute "$(web_server_type)" type -t "is_$(web_server_type)_running" >/dev/null; then
|
||||
print_validation_error "Could not load the $(web_server_type) web server library from /opt/bitnami/scripts. Check that it exists and is readable."
|
||||
fi
|
||||
|
||||
return "$error_code"
|
||||
}
|
||||
|
||||
########################
|
||||
# Check whether the web server is running
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# true if the web server is running, false otherwise
|
||||
#########################
|
||||
is_web_server_running() {
|
||||
"is_$(web_server_type)_running"
|
||||
}
|
||||
|
||||
########################
|
||||
# Start web server
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_start() {
|
||||
info "Starting $(web_server_type) in background"
|
||||
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/start.sh"
|
||||
}
|
||||
|
||||
########################
|
||||
# Stop web server
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_stop() {
|
||||
info "Stopping $(web_server_type)"
|
||||
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/stop.sh"
|
||||
}
|
||||
|
||||
########################
|
||||
# Restart web server
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_restart() {
|
||||
info "Restarting $(web_server_type)"
|
||||
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/restart.sh"
|
||||
}
|
||||
|
||||
########################
|
||||
# Reload web server
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_reload() {
|
||||
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/reload.sh"
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure a web server application configuration exists (i.e. Apache virtual host format or NGINX server block)
|
||||
# It serves as a wrapper for the specific web server function
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# $1 - App name
|
||||
# Flags:
|
||||
# --hosts - Hosts to enable
|
||||
# --type - Application type, which has an effect on which configuration template to use
|
||||
# --allow-remote-connections - Whether to allow remote connections or to require local connections
|
||||
# --disabled - Whether to render the file with a .disabled prefix
|
||||
# --enable-https - Enable app configuration on HTTPS port
|
||||
# --http-port - HTTP port number
|
||||
# --https-port - HTTPS port number
|
||||
# --document-root - Path to document root directory
|
||||
# Apache-specific flags:
|
||||
# --apache-additional-configuration - Additional vhost configuration (no default)
|
||||
# --apache-before-vhost-configuration - Configuration to add before the <VirtualHost> directive (no default)
|
||||
# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no')
|
||||
# --apache-extra-directory-configuration - Extra configuration for the document root directory
|
||||
# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup
|
||||
# NGINX-specific flags:
|
||||
# --nginx-additional-configuration - Additional server block configuration (no default)
|
||||
# Returns:
|
||||
# true if the configuration was enabled, false otherwise
|
||||
########################
|
||||
ensure_web_server_app_configuration_exists() {
|
||||
local app="${1:?missing app}"
|
||||
shift
|
||||
local -a apache_args nginx_args web_servers args_var
|
||||
apache_args=("$app")
|
||||
nginx_args=("$app")
|
||||
# Validate arguments
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
# Common flags
|
||||
--hosts \
|
||||
| --type \
|
||||
| --allow-remote-connections \
|
||||
| --disabled \
|
||||
| --enable-https \
|
||||
| --http-port \
|
||||
| --https-port \
|
||||
| --document-root \
|
||||
)
|
||||
apache_args+=("$1" "${2:?missing value}")
|
||||
nginx_args+=("$1" "${2:?missing value}")
|
||||
shift
|
||||
;;
|
||||
|
||||
# Specific Apache flags
|
||||
--apache-additional-configuration \
|
||||
| --apache-before-vhost-configuration \
|
||||
| --apache-allow-override \
|
||||
| --apache-extra-directory-configuration \
|
||||
| --apache-move-htaccess \
|
||||
)
|
||||
apache_args+=("${1//apache-/}" "${2:?missing value}")
|
||||
shift
|
||||
;;
|
||||
|
||||
# Specific NGINX flags
|
||||
--nginx-additional-configuration)
|
||||
nginx_args+=("${1//nginx-/}" "${2:?missing value}")
|
||||
shift
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
read -r -a web_servers <<< "$(web_server_list)"
|
||||
for web_server in "${web_servers[@]}"; do
|
||||
args_var="${web_server}_args[@]"
|
||||
web_server_execute "$web_server" "ensure_${web_server}_app_configuration_exists" "${!args_var}"
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure a web server application configuration does not exist anymore (i.e. Apache virtual host format or NGINX server block)
|
||||
# It serves as a wrapper for the specific web server function
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# $1 - App name
|
||||
# Returns:
|
||||
# true if the configuration was disabled, false otherwise
|
||||
########################
|
||||
ensure_web_server_app_configuration_not_exists() {
|
||||
local app="${1:?missing app}"
|
||||
local -a web_servers
|
||||
read -r -a web_servers <<< "$(web_server_list)"
|
||||
for web_server in "${web_servers[@]}"; do
|
||||
web_server_execute "$web_server" "ensure_${web_server}_app_configuration_not_exists" "$app"
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure the web server loads the configuration for an application in a URL prefix
|
||||
# It serves as a wrapper for the specific web server function
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# $1 - App name
|
||||
# Flags:
|
||||
# --allow-remote-connections - Whether to allow remote connections or to require local connections
|
||||
# --document-root - Path to document root directory
|
||||
# --prefix - URL prefix from where it will be accessible (i.e. /myapp)
|
||||
# --type - Application type, which has an effect on what configuration template will be used
|
||||
# Apache-specific flags:
|
||||
# --apache-additional-configuration - Additional vhost configuration (no default)
|
||||
# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no')
|
||||
# --apache-extra-directory-configuration - Extra configuration for the document root directory
|
||||
# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup
|
||||
# NGINX-specific flags:
|
||||
# --nginx-additional-configuration - Additional server block configuration (no default)
|
||||
# Returns:
|
||||
# true if the configuration was enabled, false otherwise
|
||||
########################
|
||||
ensure_web_server_prefix_configuration_exists() {
|
||||
local app="${1:?missing app}"
|
||||
shift
|
||||
local -a apache_args nginx_args web_servers args_var
|
||||
apache_args=("$app")
|
||||
nginx_args=("$app")
|
||||
# Validate arguments
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
# Common flags
|
||||
--allow-remote-connections \
|
||||
| --document-root \
|
||||
| --prefix \
|
||||
| --type \
|
||||
)
|
||||
apache_args+=("$1" "${2:?missing value}")
|
||||
nginx_args+=("$1" "${2:?missing value}")
|
||||
shift
|
||||
;;
|
||||
|
||||
# Specific Apache flags
|
||||
--apache-additional-configuration \
|
||||
| --apache-allow-override \
|
||||
| --apache-extra-directory-configuration \
|
||||
| --apache-move-htaccess \
|
||||
)
|
||||
apache_args+=("${1//apache-/}" "$2")
|
||||
shift
|
||||
;;
|
||||
|
||||
# Specific NGINX flags
|
||||
--nginx-additional-configuration)
|
||||
nginx_args+=("${1//nginx-/}" "$2")
|
||||
shift
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
read -r -a web_servers <<< "$(web_server_list)"
|
||||
for web_server in "${web_servers[@]}"; do
|
||||
args_var="${web_server}_args[@]"
|
||||
web_server_execute "$web_server" "ensure_${web_server}_prefix_configuration_exists" "${!args_var}"
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure a web server application configuration is updated with the runtime configuration (i.e. ports)
|
||||
# It serves as a wrapper for the specific web server function
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# $1 - App name
|
||||
# Flags:
|
||||
# --hosts - Hosts to enable
|
||||
# --enable-https - Update HTTPS app configuration
|
||||
# --http-port - HTTP port number
|
||||
# --https-port - HTTPS port number
|
||||
# Returns:
|
||||
# true if the configuration was updated, false otherwise
|
||||
########################
|
||||
web_server_update_app_configuration() {
|
||||
local app="${1:?missing app}"
|
||||
shift
|
||||
local -a args web_servers
|
||||
args=("$app")
|
||||
# Validate arguments
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
# Common flags
|
||||
--hosts \
|
||||
| --enable-https \
|
||||
| --http-port \
|
||||
| --https-port \
|
||||
)
|
||||
args+=("$1" "${2:?missing value}")
|
||||
shift
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
read -r -a web_servers <<< "$(web_server_list)"
|
||||
for web_server in "${web_servers[@]}"; do
|
||||
web_server_execute "$web_server" "${web_server}_update_app_configuration" "${args[@]}"
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Enable loading page, which shows users that the initialization process is not yet completed
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_enable_loading_page() {
|
||||
ensure_web_server_app_configuration_exists "__loading" --hosts "_default_" \
|
||||
--apache-additional-configuration "
|
||||
# Show a HTTP 503 Service Unavailable page by default
|
||||
RedirectMatch 503 ^/$
|
||||
# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes
|
||||
ErrorDocument 404 /index.html
|
||||
ErrorDocument 503 /index.html" \
|
||||
--nginx-additional-configuration "
|
||||
# Show a HTTP 503 Service Unavailable page by default
|
||||
location / {
|
||||
return 503;
|
||||
}
|
||||
# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes
|
||||
error_page 404 @installing;
|
||||
error_page 503 @installing;
|
||||
location @installing {
|
||||
rewrite ^(.*)$ /index.html break;
|
||||
}"
|
||||
web_server_reload
|
||||
}
|
||||
|
||||
########################
|
||||
# Enable loading page, which shows users that the initialization process is not yet completed
|
||||
# Globals:
|
||||
# *
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
web_server_disable_install_page() {
|
||||
ensure_web_server_app_configuration_not_exists "__loading"
|
||||
web_server_reload
|
||||
}
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
set -u
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
n=0
|
||||
max=2
|
||||
until [ $n -gt $max ]; do
|
||||
set +e
|
||||
(
|
||||
apt-get update -qq &&
|
||||
apt-get install -y --no-install-recommends "$@"
|
||||
)
|
||||
CODE=$?
|
||||
set -e
|
||||
if [ $CODE -eq 0 ]; then
|
||||
break
|
||||
fi
|
||||
if [ $n -eq $max ]; then
|
||||
exit $CODE
|
||||
fi
|
||||
echo "apt failed, retrying"
|
||||
n=$(($n + 1))
|
||||
done
|
||||
rm -r /var/lib/apt/lists /var/cache/apt/archives
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
# set -o xtrace # Uncomment this line for debugging purposes
|
||||
|
||||
# Load libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libbitnami.sh
|
||||
. /opt/bitnami/scripts/libinfluxdb.sh
|
||||
|
||||
# Load InfluxDB environment variables
|
||||
eval "$(influxdb_env)"
|
||||
|
||||
print_welcome_page
|
||||
|
||||
if [[ "$*" = *"/opt/bitnami/scripts/influxdb/run.sh"* ]]; then
|
||||
info "** Starting InfluxDB setup **"
|
||||
/opt/bitnami/scripts/influxdb/setup.sh
|
||||
info "** InfluxDB setup finished! **"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
exec "$@"
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /opt/bitnami/scripts/libfs.sh
|
||||
. /opt/bitnami/scripts/libinfluxdb.sh
|
||||
|
||||
# Load InfluxDB environment variables
|
||||
eval "$(influxdb_env)"
|
||||
|
||||
# Ensure directories used by InfluxDB exist and have proper ownership and permissions
|
||||
for dir in "$INFLUXDB_VOLUME_DIR" "$INFLUXDB_DATA_DIR" "$INFLUXDB_DATA_WAL_DIR" "$INFLUXDB_META_DIR" "$INFLUXDB_CONF_DIR" "$INFLUXDB_INITSCRIPTS_DIR"; do
|
||||
ensure_dir_exists "$dir"
|
||||
chmod -R g+rwX "$dir"
|
||||
done
|
||||
|
||||
touch "$HOME/.influx_history" && chmod g+rwX "$HOME/.influx_history"
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
# set -o xtrace # Uncomment this line for debugging purposes
|
||||
|
||||
# Load libraries
|
||||
. /opt/bitnami/scripts/libos.sh
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libinfluxdb.sh
|
||||
|
||||
# Load InfluxDB environment variables
|
||||
eval "$(influxdb_env)"
|
||||
|
||||
info "** Starting InfluxDB **"
|
||||
start_command=("${INFLUXDB_BIN_DIR}/influxd" "$@")
|
||||
[[ "$(influxdb_branch)" = "1" ]] && start_command=("${start_command[@]}" "-config" "$INFLUXDB_CONF_FILE")
|
||||
am_i_root && start_command=("gosu" "$INFLUXDB_DAEMON_USER" "${start_command[@]}")
|
||||
|
||||
if [[ -f "$INFLUXDB_CONF_FILE" ]]; then
|
||||
export INFLUXD_CONFIG_PATH=${INFLUXDB_CONF_FILE:-}
|
||||
fi
|
||||
|
||||
export HOME=/bitnami/influxdb/
|
||||
|
||||
exec "${start_command[@]}"
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
# set -o xtrace # Uncomment this line for debugging purposes
|
||||
|
||||
# Load libraries
|
||||
. /opt/bitnami/scripts/libfs.sh
|
||||
. /opt/bitnami/scripts/libos.sh
|
||||
. /opt/bitnami/scripts/libinfluxdb.sh
|
||||
|
||||
# Load InfluxDB environment variables
|
||||
eval "$(influxdb_env)"
|
||||
|
||||
# Ensure InfluxDB environment variables are valid
|
||||
influxdb_validate
|
||||
# Ensure InfluxDB user and group exist when running as 'root'
|
||||
if am_i_root; then
|
||||
ensure_user_exists "$INFLUXDB_DAEMON_USER" --group "$INFLUXDB_DAEMON_GROUP"
|
||||
chown -R "$INFLUXDB_DAEMON_USER" "$INFLUXDB_DATA_DIR" "$INFLUXDB_CONF_DIR"
|
||||
fi
|
||||
# Ensure InfluxDB is stopped when this script ends.
|
||||
trap "influxdb_stop" EXIT
|
||||
# Ensure InfluxDB is initialized
|
||||
influxdb_initialize
|
||||
# Allow running custom initialization scripts
|
||||
influxdb_custom_init_scripts
|
||||
|
|
@ -0,0 +1,680 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Bitnami InfluxDB library
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
# shellcheck disable=SC1090
|
||||
|
||||
# Load Generic Libraries
|
||||
. /opt/bitnami/scripts/liblog.sh
|
||||
. /opt/bitnami/scripts/libos.sh
|
||||
. /opt/bitnami/scripts/libvalidations.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Load global variables used on InfluxDB configuration
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Series of exports to be used as 'eval' arguments
|
||||
#########################
|
||||
influxdb_env() {
|
||||
cat <<"EOF"
|
||||
# Format log messages
|
||||
export MODULE="influxdb"
|
||||
export BITNAMI_DEBUG="${BITNAMI_DEBUG:-false}"
|
||||
# Paths
|
||||
export INFLUXDB_BASE_DIR="/opt/bitnami/influxdb"
|
||||
export INFLUXDB_VOLUME_DIR="/bitnami/influxdb"
|
||||
export INFLUXDB_BIN_DIR="${INFLUXDB_BASE_DIR}/bin"
|
||||
export INFLUXDB_DATA_DIR="${INFLUXDB_DATA_DIR:-${INFLUXDB_VOLUME_DIR}/data}"
|
||||
export INFLUXDB_DATA_WAL_DIR="${INFLUXDB_DATA_WAL_DIR:-${INFLUXDB_VOLUME_DIR}/wal}"
|
||||
export INFLUXDB_META_DIR="${INFLUXDB_META_DIR:-${INFLUXDB_VOLUME_DIR}/meta}"
|
||||
export INFLUXDB_CONF_DIR="${INFLUXDB_BASE_DIR}/etc"
|
||||
export INFLUXDB_CONF_FILE="${INFLUXDB_CONF_DIR}/influxdb.conf"
|
||||
export INFLUXDB_INITSCRIPTS_DIR="/docker-entrypoint-initdb.d"
|
||||
# Users
|
||||
export INFLUXDB_DAEMON_USER="influxdb"
|
||||
export INFLUXDB_DAEMON_GROUP="influxdb"
|
||||
# InfluxDB settings
|
||||
export INFLUXDB_REPORTING_DISABLED="${INFLUXDB_REPORTING_DISABLED:-true}"
|
||||
export INFLUXDB_HTTP_PORT_NUMBER="${INFLUXDB_HTTP_PORT_NUMBER:-8086}"
|
||||
export INFLUXDB_HTTP_BIND_ADDRESS="${INFLUXDB_HTTP_BIND_ADDRESS:-0.0.0.0:${INFLUXDB_HTTP_PORT_NUMBER}}"
|
||||
export INFLUXDB_HTTP_READINESS_TIMEOUT="${INFLUXDB_HTTP_READINESS_TIMEOUT:-60}"
|
||||
export INFLUXDB_PORT_NUMBER="${INFLUXDB_PORT_NUMBER:-8088}"
|
||||
export INFLUXDB_BIND_ADDRESS="${INFLUXDB_BIND_ADDRESS:-0.0.0.0:${INFLUXDB_PORT_NUMBER}}"
|
||||
# Authentication
|
||||
export INFLUXDB_ADMIN_USER="${INFLUXDB_ADMIN_USER:-admin}"
|
||||
export INFLUXDB_ADMIN_CONFIG_NAME="${INFLUXDB_ADMIN_CONFIG_NAME:-default}"
|
||||
export INFLUXDB_ADMIN_ORG="${INFLUXDB_ADMIN_ORG:-primary}"
|
||||
export INFLUXDB_ADMIN_BUCKET="${INFLUXDB_ADMIN_BUCKET:-primary}"
|
||||
export INFLUXDB_ADMIN_RETENTION="${INFLUXDB_ADMIN_RETENTION:-0}"
|
||||
export INFLUXDB_USER="${INFLUXDB_USER:-}"
|
||||
export INFLUXDB_USER_ORG="${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}"
|
||||
export INFLUXDB_USER_BUCKET="${INFLUXDB_USER_BUCKET:-}"
|
||||
export INFLUXDB_CREATE_USER_TOKEN="${INFLUXDB_CREATE_USER_TOKEN:-no}"
|
||||
export INFLUXDB_READ_USER="${INFLUXDB_READ_USER:-}"
|
||||
export INFLUXDB_WRITE_USER="${INFLUXDB_WRITE_USER:-}"
|
||||
export INFLUXDB_DB="${INFLUXDB_DB:-}"
|
||||
|
||||
# V2 required env vars aliases
|
||||
export INFLUXD_ENGINE_PATH="${INFLUXDB_VOLUME_DIR}"
|
||||
export INFLUXD_BOLT_PATH="${INFLUXDB_VOLUME_DIR}/influxd.bolt"
|
||||
export INFLUXD_CONFIG_PATH="${INFLUXDB_CONF_DIR}/influxdb.conf"
|
||||
export INFLUX_CONFIGS_PATH="${INFLUXDB_VOLUME_DIR}/configs"
|
||||
|
||||
export INFLUXD_HTTP_BIND_ADDRESS="${INFLUXDB_HTTP_BIND_ADDRESS}"
|
||||
|
||||
EOF
|
||||
# The configuration can be provided in a configuration file or environment variables
|
||||
# This setting is necessary to determine certain validations/actions during the
|
||||
# initialization, so we need to check the config file when existing.
|
||||
if [[ -f "/opt/bitnami/influxdb/etc/influxdb.conf" ]]; then
|
||||
cat <<"EOF"
|
||||
INFLUXDB_HTTP_AUTH_ENABLED="${INFLUXDB_HTTP_AUTH_ENABLED:-$(influxdb_conf_get "auth-enabled")}"
|
||||
export INFLUXDB_HTTP_AUTH_ENABLED="${INFLUXDB_HTTP_AUTH_ENABLED:-true}"
|
||||
EOF
|
||||
else
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_HTTP_AUTH_ENABLED="${INFLUXDB_HTTP_AUTH_ENABLED:-true}"
|
||||
EOF
|
||||
fi
|
||||
# Credentials should be allowed to be mounted as files to avoid sensitive data
|
||||
# in the environment variables
|
||||
if [[ -f "${INFLUXDB_ADMIN_USER_PASSWORD_FILE:-}" ]]; then
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_ADMIN_USER_PASSWORD="$(< "${INFLUXDB_ADMIN_USER_PASSWORD_FILE}")"
|
||||
EOF
|
||||
else
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_ADMIN_USER_PASSWORD="${INFLUXDB_ADMIN_USER_PASSWORD:-}"
|
||||
EOF
|
||||
fi
|
||||
if [[ -f "${INFLUXDB_ADMIN_USER_TOKEN_FILE:-}" ]]; then
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_ADMIN_USER_TOKEN="$(< "${INFLUXDB_ADMIN_USER_TOKEN_FILE}")"
|
||||
EOF
|
||||
else
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_ADMIN_USER_TOKEN="${INFLUXDB_ADMIN_USER_TOKEN:-}"
|
||||
EOF
|
||||
fi
|
||||
if [[ -f "${INFLUXDB_USER_PASSWORD_FILE:-}" ]]; then
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_USER_PASSWORD="$(< "${INFLUXDB_USER_PASSWORD_FILE}")"
|
||||
EOF
|
||||
else
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_USER_PASSWORD="${INFLUXDB_USER_PASSWORD:-}"
|
||||
EOF
|
||||
fi
|
||||
if [[ -f "${INFLUXDB_READ_USER_PASSWORD_FILE:-}" ]]; then
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_READ_USER_PASSWORD="$(< "${INFLUXDB_READ_USER_PASSWORD_FILE}")"
|
||||
EOF
|
||||
else
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_READ_USER_PASSWORD="${INFLUXDB_READ_USER_PASSWORD:-}"
|
||||
EOF
|
||||
fi
|
||||
if [[ -f "${INFLUXDB_WRITE_USER_PASSWORD_FILE:-}" ]]; then
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_WRITE_USER_PASSWORD="$(< "${INFLUXDB_WRITE_USER_PASSWORD_FILE}")"
|
||||
EOF
|
||||
else
|
||||
cat <<"EOF"
|
||||
export INFLUXDB_WRITE_USER_PASSWORD="${INFLUXDB_WRITE_USER_PASSWORD:-}"
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Get InfluxDB branch (for compatibility purposes)
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_branch() {
|
||||
local -r version=$(influx version 2>/dev/null || influx -version)
|
||||
debug "Calculate branch with: ${version}"
|
||||
|
||||
local branch
|
||||
if [[ "${version}" =~ 2\.[0-9]+\.[0-9]+ ]]; then
|
||||
branch="2"
|
||||
elif [[ "${version}" =~ 1\.[0-9]+\.[0-9]+ ]]; then
|
||||
branch="1"
|
||||
else
|
||||
error "not supported branch: ${version}"
|
||||
fi
|
||||
|
||||
echo "${branch}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate settings in INFLUXDB_* env vars
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_validate() {
|
||||
local -r branch="$(influxdb_branch)"
|
||||
local error_code=0
|
||||
debug "Validating settings in INFLUXDB_* env vars..."
|
||||
|
||||
# Auxiliary functions
|
||||
print_validation_error() {
|
||||
error "$1"
|
||||
error_code=1
|
||||
}
|
||||
check_password_file() {
|
||||
if ! is_empty_value "${!1:-}" && ! [[ -f "${!1:-}" ]]; then
|
||||
print_validation_error "The variable $1 is defined but the file ${!1} is not accessible or does not exist."
|
||||
fi
|
||||
}
|
||||
check_true_false_value() {
|
||||
if ! is_true_false_value "${!1}"; then
|
||||
print_validation_error "The allowed values for $1 are [true, false]"
|
||||
fi
|
||||
}
|
||||
check_conflicting_ports() {
|
||||
local -r total="$#"
|
||||
for i in $(seq 1 "$((total - 1))"); do
|
||||
for j in $(seq "$((i + 1))" "$total"); do
|
||||
if [[ "${!i}" -eq "${!j}" ]]; then
|
||||
print_validation_error "${!i} and ${!j} are bound to the same port"
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
# InfluxDB secret files validations
|
||||
local -a user_envs=("INFLUXDB_ADMIN_USER" "INFLUXDB_USER" "INFLUXDB_READ_USER" "INFLUXDB_WRITE_USER")
|
||||
local -a pwd_file_envs=("${user_envs[@]/%/_PASSWORD_FILE}")
|
||||
if [[ "${branch}" = "2" ]]; then
|
||||
pwd_file_envs=("INFLUXDB_ADMIN_USER_PASSWORD_FILE" "INFLUXDB_ADMIN_USER_TOKEN_FILE" "INFLUXDB_USER_PASSWORD_FILE")
|
||||
fi
|
||||
for pwd_file in "${pwd_file_envs[@]}"; do
|
||||
check_password_file "$pwd_file"
|
||||
done
|
||||
|
||||
# InfluxDB booleans validations
|
||||
if [[ "${branch}" = "1" ]]; then
|
||||
read -r -a boolean_envs <<<"$(compgen -A variable | grep -E "INFLUXDB_.*_(ENABLED|DISABLED)" | tr '\r\n' ' ')"
|
||||
for boolean_env in "${boolean_envs[@]}"; do
|
||||
check_true_false_value "$boolean_env"
|
||||
done
|
||||
fi
|
||||
|
||||
# InfluxDB authentication validations
|
||||
if [[ "$branch" = "2" ]]; then
|
||||
if [[ -z "${INFLUXDB_ADMIN_USER_PASSWORD:-}" ]]; then
|
||||
print_validation_error "Primary config authentication is required. Please, specify a password for the ${INFLUXDB_ADMIN_USER} user by setting the 'INFLUXDB_ADMIN_USER_PASSWORD' or 'INFLUXDB_ADMIN_USER_PASSWORD_FILE' environment variables."
|
||||
fi
|
||||
if [[ -z "${INFLUXDB_ADMIN_USER_TOKEN:-}" ]]; then
|
||||
print_validation_error "Primary config authentication is required. Please, specify a token for the ${INFLUXDB_ADMIN_USER} user by setting the 'INFLUXDB_ADMIN_USER_TOKEN' or 'INFLUXDB_ADMIN_USER_TOKEN_FILE' environment variables."
|
||||
fi
|
||||
|
||||
if [[ -n "${INFLUXDB_USER:-}" ]] && [[ -z "${INFLUXDB_USER_PASSWORD:-}" ]]; then
|
||||
print_validation_error "User authentication is required. Please, specify a password for the ${INFLUXDB_USER} user by setting the 'INFLUXDB_USER_PASSWORD' or 'INFLUXDB_USER_PASSWORD_FILE' environment variables."
|
||||
fi
|
||||
else
|
||||
if ! is_boolean_yes "$INFLUXDB_HTTP_AUTH_ENABLED"; then
|
||||
warn "Authentication is disabled over HTTP and HTTPS. For safety reasons, enable it in a production environment."
|
||||
for user in "${user_envs[@]}"; do
|
||||
if [[ -n "${!user:-}" ]]; then
|
||||
warn "The ${user} environment variable will be ignored since authentication is disabled."
|
||||
fi
|
||||
done
|
||||
else
|
||||
for user in "${user_envs[@]}"; do
|
||||
pwd="${user/%/_PASSWORD}"
|
||||
if [[ -n "${!user:-}" ]] && [[ -z "${!pwd:-}" ]]; then
|
||||
print_validation_error "Authentication is enabled over HTTP and HTTPS and you did not provide a password for the ${!user} user. Please, specify a password for the ${!user} user by setting the '${user/%/_PASSWORD}' or '${user/%/_PASSWORD_FILE}' environment variables."
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# InfluxDB port validations
|
||||
local -a ports_envs=("INFLUXDB_PORT_NUMBER" "INFLUXDB_HTTP_PORT_NUMBER")
|
||||
for p in "${ports_envs[@]}"; do
|
||||
if ! is_empty_value "${!p}" && ! err=$(validate_port -unprivileged "${!p}"); then
|
||||
print_validation_error "An invalid port was specified in the environment variable ${p}: ${err}"
|
||||
fi
|
||||
done
|
||||
check_conflicting_ports "${ports_envs[@]}"
|
||||
|
||||
[[ "$error_code" -eq 0 ]] || exit "$error_code"
|
||||
}
|
||||
|
||||
########################
|
||||
# Get a property's value from the the influxdb.conf file
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# $1 - key
|
||||
# $2 - section
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
# TODO: use a golan binary (toml-parser)
|
||||
influxdb_conf_get() {
|
||||
local -r key="${1:?missing key}"
|
||||
# local -r section="${2:?missing section}"
|
||||
|
||||
sed -n -e "s/^ *$key *= *//p" "$INFLUXDB_CONF_FILE"
|
||||
# toml-parser -r "$section" "$key" "$INFLUXDB_CONF_FILE"
|
||||
}
|
||||
|
||||
########################
|
||||
# Create basic influxdb.conf file using the example provided in the etc/ folder
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_create_config() {
|
||||
local -r branch="${1:?branch is missing}"
|
||||
local config_file="${INFLUXDB_CONF_FILE}"
|
||||
|
||||
if [[ "${branch}" = "2" ]]; then
|
||||
config_file="${INFLUXD_CONFIG_PATH}"
|
||||
fi
|
||||
|
||||
if [[ -f "${config_file}" ]]; then
|
||||
info "Custom configuration ${INFLUXDB_CONF_FILE} detected!"
|
||||
warn "The 'INFLUXDB_' environment variables override the equivalent options in the configuration file."
|
||||
warn "If a configuration option is not specified in either the configuration file or in an environment variable, InfluxDB uses its internal default configuration"
|
||||
else
|
||||
info "No injected configuration files found. Creating default config files..."
|
||||
if [[ "${branch}" = "2" ]]; then
|
||||
touch "${config_file}"
|
||||
else
|
||||
cp "${INFLUXDB_CONF_DIR}/influxdb.conf.default" "$INFLUXDB_CONF_FILE"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Create primary setup (only for InfluxDB 2.0)
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_v2_create_primary_setup() {
|
||||
"${INFLUXDB_BIN_DIR}/influx" setup -f --name "${INFLUXDB_ADMIN_CONFIG_NAME}" \
|
||||
--org "${INFLUXDB_ADMIN_ORG}" \
|
||||
--bucket "${INFLUXDB_ADMIN_BUCKET}" \
|
||||
--username "${INFLUXDB_ADMIN_USER}" \
|
||||
--password "${INFLUXDB_ADMIN_USER_PASSWORD}" \
|
||||
--token "${INFLUXDB_ADMIN_USER_TOKEN}" \
|
||||
--retention "${INFLUXDB_ADMIN_RETENTION}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Create organization (only for InfluxDB 2.0)
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_v2_create_org() {
|
||||
INFLUX_ACTIVE_CONFIG="${INFLUXDB_ADMIN_CONFIG_NAME}" "${INFLUXDB_BIN_DIR}/influx" org create --name "${INFLUXDB_USER_ORG}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Create bucket (only for InfluxDB 2.0)
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_v2_create_bucket() {
|
||||
INFLUX_ACTIVE_CONFIG="${INFLUXDB_ADMIN_CONFIG_NAME}" "${INFLUXDB_BIN_DIR}/influx" bucket create \
|
||||
"--org" "${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}" \
|
||||
"--name" "${INFLUXDB_USER_BUCKET}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Create user (only for InfluxDB 2.0)
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_v2_create_user() {
|
||||
local username=${1:?missing username}
|
||||
local password=${2:?missing password}
|
||||
local kind=${3:-"admin"}
|
||||
|
||||
local params=("--org" "${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}" "--name" "${username}" "--password" "${password}")
|
||||
INFLUX_ACTIVE_CONFIG="${INFLUXDB_ADMIN_CONFIG_NAME}" "${INFLUXDB_BIN_DIR}/influx" user create "${params[@]}"
|
||||
|
||||
if is_boolean_yes "${INFLUXDB_CREATE_USER_TOKEN}"; then
|
||||
local read_grants=("--read-buckets" "--read-checks" "--read-dashboards" "--read-dbrps" "--read-notificationEndpoints" "--read-notificationRules" "--read-orgs" "--read-tasks")
|
||||
local write_grants=("--write-buckets" "--write-checks" "--write-dashboards" "--write-dbrps" "--write-notificationEndpoints" "--write-notificationRules" "--write-orgs" "--write-tasks")
|
||||
|
||||
local grants
|
||||
if [[ ${kind} = "admin" ]] || [[ ${kind} = "write" ]]; then
|
||||
grants=(${read_grants[@]} ${write_grants[@]})
|
||||
elif [[ ${kind} = "read" ]]; then
|
||||
grants=(${read_grants[@]})
|
||||
else
|
||||
echo "not supported user kind: ${kind}" && exit 1
|
||||
fi
|
||||
|
||||
INFLUX_ACTIVE_CONFIG="${INFLUXDB_ADMIN_CONFIG_NAME}" "${INFLUXDB_BIN_DIR}/influx" auth create \
|
||||
--user "${username}" \
|
||||
--org "${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}" "${grants[@]}"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Start InfluxDB in background disabling authentication and waits until it's ready
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_start_bg_noauth() {
|
||||
local -r branch="${1:?branch is missing}"
|
||||
|
||||
info "Starting InfluxDB in background..."
|
||||
|
||||
local start_command=("${INFLUXDB_BIN_DIR}/influxd")
|
||||
# if branch 1 then add config file flag that is required
|
||||
[[ "${branch}" = "1" ]] && start_command=("${start_command[@]}" "-config" "$INFLUXDB_CONF_FILE")
|
||||
# if root user then run it with gosu
|
||||
am_i_root && start_command=("gosu" "$INFLUXDB_DAEMON_USER" "${start_command[@]}")
|
||||
|
||||
INFLUXDB_HTTP_HTTPS_ENABLED=false INFLUXDB_HTTP_BIND_ADDRESS="127.0.0.1:${INFLUXDB_HTTP_PORT_NUMBER}" debug_execute "${start_command[@]}" &
|
||||
|
||||
# branch 2 does not lisent on this port
|
||||
[[ "${branch}" = "1" ]] && wait-for-port "$INFLUXDB_PORT_NUMBER"
|
||||
wait-for-port "$INFLUXDB_HTTP_PORT_NUMBER"
|
||||
|
||||
wait-for-influxdb
|
||||
}
|
||||
|
||||
########################
|
||||
# Waits for InfluxDB to be ready
|
||||
# Times out after 60 seconds
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
########################
|
||||
wait-for-influxdb() {
|
||||
curl -sSL -I "127.0.0.1:${INFLUXDB_HTTP_PORT_NUMBER}/ping?wait_for_leader=${INFLUXDB_HTTP_READINESS_TIMEOUT}s" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if InfluxDB is running
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_influxdb_running() {
|
||||
if pgrep "influxd" >/dev/null 2>&1; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Stop InfluxDB
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_stop() {
|
||||
info "Stopping InfluxDB..."
|
||||
! is_influxdb_running && return
|
||||
pkill --full --signal TERM "$INFLUXDB_BASE_DIR"
|
||||
wait-for-port --state free "$INFLUXDB_PORT_NUMBER"
|
||||
}
|
||||
|
||||
########################
|
||||
# Execute an arbitrary query using InfluxDB CLI
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# $1 - Query to execute
|
||||
# $2 - Whether to use admin credentials to run the command or not
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_execute_query() {
|
||||
local -r query="${1:-query is required}"
|
||||
local authenticate="${2:-false}"
|
||||
local flags=("-host" "127.0.0.1" "-port" "$INFLUXDB_HTTP_PORT_NUMBER")
|
||||
|
||||
is_boolean_yes "$authenticate" && flags+=("-username" "${INFLUXDB_ADMIN_USER}" "-password" "${INFLUXDB_ADMIN_USER_PASSWORD}")
|
||||
debug_execute "${INFLUXDB_BIN_DIR}/influx" "${flags[@]}" "-execute" "$query"
|
||||
}
|
||||
|
||||
########################
|
||||
# Creates the admin user
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_create_admin_user() {
|
||||
debug "Creating admin user..."
|
||||
influxdb_execute_query "CREATE USER \"${INFLUXDB_ADMIN_USER}\" WITH PASSWORD '${INFLUXDB_ADMIN_USER_PASSWORD}' WITH ALL PRIVILEGES"
|
||||
}
|
||||
|
||||
########################
|
||||
# Creates a database
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# $1 - Database name
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_create_db() {
|
||||
local -r db="${1:?db is required}"
|
||||
debug "Creating database \"${db}\"..."
|
||||
influxdb_execute_query "CREATE DATABASE ${db}" "true"
|
||||
}
|
||||
|
||||
########################
|
||||
# Creates an user
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# $1 - User name
|
||||
# $2 - User password
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_create_user() {
|
||||
local -r user="${1:?user is required}"
|
||||
local -r pwd="${2:?pwd is required}"
|
||||
debug "Creating user \"${user}\"..."
|
||||
influxdb_execute_query "CREATE USER \"${user}\" WITH PASSWORD '${pwd}'" "true"
|
||||
influxdb_execute_query "REVOKE ALL PRIVILEGES FROM \"${user}\"" "true"
|
||||
}
|
||||
|
||||
########################
|
||||
# Creates a database
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# $1 - User name
|
||||
# $2 - Database name
|
||||
# $3 - Role
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_grant() {
|
||||
local -r user="${1:?user is required}"
|
||||
local -r db="${2:?db is required}"
|
||||
local -r role="${3:?role is required}"
|
||||
debug "Granting \"${role}\" permissions to user ${user} on database \"${db}\"..."
|
||||
influxdb_execute_query "GRANT ${role} ON \"${db}\" TO \"${user}\"" "true"
|
||||
}
|
||||
|
||||
########################
|
||||
# Gets the role for an user
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# Returns:
|
||||
# String
|
||||
#########################
|
||||
influxdb_user_role() {
|
||||
local role
|
||||
local -r user="${1:?user is required}"
|
||||
role="${user//_/}"
|
||||
role="${role%USER}"
|
||||
role="${role#INFLUXDB}"
|
||||
echo "${role:-ALL}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure InfluxDB is initialized
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_initialize() {
|
||||
local -r branch="$(influxdb_branch)"
|
||||
info "Initializing InfluxDB in branch: ${branch}..."
|
||||
|
||||
influxdb_create_config "${branch}"
|
||||
|
||||
if [[ "${branch}" = "2" ]]; then
|
||||
if [[ ! -f "${INFLUX_CONFIGS_PATH}" ]]; then
|
||||
influxdb_start_bg_noauth "${branch}"
|
||||
info "Deploying InfluxDB from scratch"
|
||||
info "Creating primary setup..."
|
||||
influxdb_v2_create_primary_setup
|
||||
|
||||
if [[ -n "${INFLUXDB_USER_ORG}" ]] && [[ "${INFLUXDB_USER_ORG}" != "${INFLUXDB_ADMIN_ORG}" ]]; then
|
||||
info "Creating custom org with id: ${INFLUXDB_USER_ORG}..."
|
||||
influxdb_v2_create_org
|
||||
fi
|
||||
|
||||
if [[ -n "${INFLUXDB_USER_BUCKET}" ]]; then
|
||||
info "Creating custom bucket with id: ${INFLUXDB_USER_BUCKET} in org with id: ${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}..."
|
||||
influxdb_v2_create_bucket
|
||||
fi
|
||||
|
||||
if [[ -n "${INFLUXDB_USER}" ]]; then
|
||||
info "Creating custom user with username: ${INFLUXDB_USER} in org with id: ${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}..."
|
||||
influxdb_v2_create_user "${INFLUXDB_USER}" "${INFLUXDB_USER_PASSWORD}"
|
||||
fi
|
||||
if [[ -n "${INFLUXDB_READ_USER}" ]]; then
|
||||
info "Creating custom user with username: ${INFLUXDB_READ_USER} in org with id: ${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}..."
|
||||
influxdb_v2_create_user "${INFLUXDB_READ_USER}" "${INFLUXDB_READ_USER_PASSWORD}" "read"
|
||||
fi
|
||||
if [[ -n "${INFLUXDB_WRITE_USER}" ]]; then
|
||||
info "Creating custom user with username: ${INFLUXDB_WRITE_USER} in org with id: ${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}..."
|
||||
influxdb_v2_create_user "${INFLUXDB_WRITE_USER}" "${INFLUXDB_WRITE_USER_PASSWORD}" "write"
|
||||
fi
|
||||
else
|
||||
info "influx CLI configuration ${INFLUXDB_CONF_FILE} detected!"
|
||||
info "Deploying InfluxDB with persisted data"
|
||||
fi
|
||||
else
|
||||
if is_dir_empty "$INFLUXDB_DATA_DIR"; then
|
||||
info "Deploying InfluxDB from scratch"
|
||||
if is_boolean_yes "$INFLUXDB_HTTP_AUTH_ENABLED"; then
|
||||
influxdb_start_bg_noauth "${branch}"
|
||||
info "Creating users and databases..."
|
||||
influxdb_create_admin_user
|
||||
[[ -n "$INFLUXDB_DB" ]] && influxdb_create_db "$INFLUXDB_DB"
|
||||
local -a user_envs=("INFLUXDB_USER" "INFLUXDB_READ_USER" "INFLUXDB_WRITE_USER")
|
||||
for user in "${user_envs[@]}"; do
|
||||
pwd="${user/%/_PASSWORD}"
|
||||
if [[ -n "${!user}" ]]; then
|
||||
influxdb_create_user "${!user}" "${!pwd}"
|
||||
[[ -n "$INFLUXDB_DB" ]] && influxdb_grant "${!user}" "$INFLUXDB_DB" "$(influxdb_user_role "$user")"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
info "Deploying InfluxDB with persisted data"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Run custom initialization scripts
|
||||
# Globals:
|
||||
# INFLUXDB_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
influxdb_custom_init_scripts() {
|
||||
local -r branch="$(influxdb_branch)"
|
||||
|
||||
if [[ -n $(find "${INFLUXDB_INITSCRIPTS_DIR}/" -type f -regex ".*\.\(sh\|txt\)") ]] && [[ ! -f "${INFLUXDB_INITSCRIPTS_DIR}/.user_scripts_initialized" ]]; then
|
||||
info "Loading user's custom files from ${INFLUXDB_INITSCRIPTS_DIR} ..."
|
||||
local -r tmp_file="/tmp/filelist"
|
||||
if ! is_influxdb_running; then
|
||||
influxdb_start_bg_noauth "${branch}"
|
||||
fi
|
||||
find "${INFLUXDB_INITSCRIPTS_DIR}/" -type f -regex ".*\.\(sh\|txt\)" | sort >"$tmp_file"
|
||||
while read -r f; do
|
||||
case "$f" in
|
||||
*.sh)
|
||||
if [[ -x "$f" ]]; then
|
||||
debug "Executing $f"
|
||||
"$f"
|
||||
else
|
||||
debug "Sourcing $f"
|
||||
. "$f"
|
||||
fi
|
||||
;;
|
||||
*.txt)
|
||||
debug "Executing $f"
|
||||
influxdb_execute_query "$(<"$f")"
|
||||
;;
|
||||
*) debug "Ignoring $f" ;;
|
||||
esac
|
||||
done <$tmp_file
|
||||
rm -f "$tmp_file"
|
||||
touch "$INFLUXDB_VOLUME_DIR"/.user_scripts_initialized
|
||||
fi
|
||||
}
|
||||
|
|
@ -37,7 +37,8 @@ Bitnami containers can be used with [Kubeapps](https://kubeapps.com/) for deploy
|
|||
Learn more about the Bitnami tagging policy and the difference between rolling tags and immutable tags [in our documentation page](https://docs.bitnami.com/tutorials/understand-rolling-tags-containers/).
|
||||
|
||||
|
||||
* [`1`, `1-debian-10`, `1.8.4`, `1.8.4-debian-10-r0`, `latest` (1/debian-10/Dockerfile)](https://github.com/bitnami/bitnami-docker-influxdb/blob/1.8.4-debian-10-r0/1/debian-10/Dockerfile)
|
||||
* [`2`, `2-debian-10`, `2.0.2`, `2.0.2-debian-10-r0`, `latest` (2/debian-10/Dockerfile)](https://github.com/bitnami/bitnami-docker-influxdb/blob/2.0.2-debian-10-r0/2/debian-10/Dockerfile)
|
||||
* [`1`, `1-debian-10`, `1.8.4`, `1.8.4-debian-10-r0` (1/debian-10/Dockerfile)](https://github.com/bitnami/bitnami-docker-influxdb/blob/1.8.4-debian-10-r0/1/debian-10/Dockerfile)
|
||||
|
||||
Subscribe to project updates by watching the [bitnami/influxdb GitHub repo](https://github.com/bitnami/bitnami-docker-influxdb).
|
||||
|
||||
|
|
@ -58,7 +59,7 @@ $ docker pull bitnami/influxdb:[TAG]
|
|||
If you wish, you can also build the image yourself.
|
||||
|
||||
```console
|
||||
$ docker build -t bitnami/influxdb:latest 'https://github.com/bitnami/bitnami-docker-influxdb.git#master:1/debian-10'
|
||||
$ docker build -t bitnami/influxdb:latest 'https://github.com/bitnami/bitnami-docker-influxdb.git#master:2/debian-10'
|
||||
```
|
||||
|
||||
# Persisting your application
|
||||
|
|
@ -158,12 +159,13 @@ $ docker-compose up -d
|
|||
|
||||
# Configuration
|
||||
|
||||
InfluxDB (TM) can be configured via environment variables (prefixed with `INFLUXDB_`) or using a configuration file (`influxdb.conf`). If a configuration option is not specified in either the configuration file or in an environment variable, InfluxDB (TM) uses its internal default configuration.
|
||||
InfluxDB (TM) can be configured via environment variables or using a configuration file (`influxdb.conf`). If a configuration option is not specified in either the configuration file or in an environment variable, InfluxDB (TM) uses its internal default configuration.
|
||||
|
||||
- If you are using v1, variables must be prefixed by `INFLUXDB_`, find more [here](https://docs.influxdata.com/influxdb/v1.8/administration/config/).
|
||||
- If you are using v2, variables must be prefixed by `INFLUXD_`, find more [here](https://docs.influxdata.com/influxdb/v2.0/reference/config-options).
|
||||
|
||||
> Note: The settings at the environment variables override the equivalent options in the configuration file."
|
||||
|
||||
Find more information about all the available configuration options in the [official documentation](https://docs.influxdata.com/influxdb/v1.7/administration/config/).
|
||||
|
||||
## Configuration file
|
||||
|
||||
The configuration can easily be setup by mounting your own configuration file (`influxdb.conf`) on the directory `/opt/bitnami/influxdb/etc/`:
|
||||
|
|
@ -217,11 +219,11 @@ services:
|
|||
...
|
||||
```
|
||||
|
||||
**Warning** In case you want to allow users to access the database without credentials, set the environment variable `INFLUXDB_HTTP_AUTH_ENABLED=false`. **This is recommended only for development**.
|
||||
**Warning** In case you want to allow users to access the database without credentials, set the environment variable `INFLUXDB_HTTP_AUTH_ENABLED=false`. **This is recommended only for development**. If you are using InfluxDB (TM) v2 authentication is required and `INFLUXDB_HTTP_AUTH_ENABLED` will be ignored.
|
||||
|
||||
## Allowing empty passwords
|
||||
|
||||
By default the InfluxDB (TM) image expects all the available passwords to be set. In order to allow empty passwords, it is necessary to set the `INFLUXDB_HTTP_AUTH_ENABLED=false` env variable. This env variable is only recommended for testing or development purposes. We strongly recommend specifying the `INFLUXDB_ADMIN_USER_PASSWORD` for any other scenario.
|
||||
By default the InfluxDB (TM) image expects all the available passwords to be set. In order to allow empty passwords, it is necessary to set the `INFLUXDB_HTTP_AUTH_ENABLED=false` env variable. This env variable is only recommended for testing or development purposes. We strongly recommend specifying the `INFLUXDB_ADMIN_USER_PASSWORD` for any other scenario. If you are using InfluxDB (TM) v2, authentication is required and `INFLUXDB_HTTP_AUTH_ENABLED` will be ignored.
|
||||
|
||||
```console
|
||||
$ docker run --name influxdb --env INFLUXDB_HTTP_AUTH_ENABLED=false bitnami/influxdb:latest
|
||||
|
|
@ -240,13 +242,13 @@ services:
|
|||
|
||||
## Creating a database on first run
|
||||
|
||||
By passing the `INFLUXDB_DB` environment variable when running the image for the first time, a database will be created. This is useful if your application requires that a database already exists, saving you from having to manually create the database using the InfluxDB (TM) client.
|
||||
If you are using InfluxDB (TM) v1 you can pass `INFLUXDB_DB` environment variable when running the image for the first time, a database will be created. This is useful if your application requires that a database already exists, saving you from having to manually create the database using the InfluxDB (TM) client.
|
||||
|
||||
```console
|
||||
$ docker run --name influxdb \
|
||||
-e INFLUXDB_ADMIN_USER_PASSWORD=password123 \
|
||||
-e INFLUXDB_DB=my_database \
|
||||
bitnami/influxdb:latest
|
||||
bitnami/influxdb:1-debian-10
|
||||
```
|
||||
|
||||
or by modifying the [`docker-compose.yml`](https://github.com/bitnami/bitnami-docker-influxdb/blob/master/docker-compose.yml) file present in this repository:
|
||||
|
|
@ -261,6 +263,15 @@ services:
|
|||
...
|
||||
```
|
||||
|
||||
For If you are using InfluxDB (TM) v2 you can pass `INFLUXDB_USER_BUCKET` environment variable when running the image for the first time, a new bucket will be created. This is useful if your application requires that a bucket already exists, saving you from having to manually create the bucket using the InfluxDB (TM) CLI.
|
||||
|
||||
```console
|
||||
$ docker run --name influxdb \
|
||||
-e INFLUXDB_ADMIN_USER_PASSWORD=password123 \
|
||||
-e INFLUXDB_USER_BUCKET=my_bucket \
|
||||
bitnami/influxdb:latest
|
||||
```
|
||||
|
||||
## Creating a database user on first run
|
||||
|
||||
You can create a restricted database user that only has permissions for the database created with the [`INFLUXDB_DB`](#creating-a-database-on-first-run) environment variable. To do this, provide the `INFLUXDB_USER` environment variable and to set a password for the database user provide the `INFLUXDB_USER_PASSWORD` variable.
|
||||
|
|
@ -326,7 +337,6 @@ services:
|
|||
|
||||
- `INFLUXDB_HTTP_READINESS_TIMEOUT`: Spacify the time to wait until the HTTP endpoint is ready in seconds. Default: 60
|
||||
|
||||
|
||||
# Logging
|
||||
|
||||
The Bitnami InfluxDB (TM) Docker image sends the container logs to `stdout`. To view the logs:
|
||||
|
|
|
|||
|
|
@ -1,10 +1,13 @@
|
|||
version: '2'
|
||||
services:
|
||||
influxdb:
|
||||
image: docker.io/bitnami/influxdb:1-debian-10
|
||||
image: docker.io/bitnami/influxdb:2-debian-10
|
||||
ports:
|
||||
- 8086:8086
|
||||
- 8088:8088
|
||||
environment:
|
||||
- INFLUXDB_ADMIN_USER_PASSWORD=bitnami123
|
||||
- INFLUXDB_ADMIN_USER_TOKEN=admintoken123
|
||||
volumes:
|
||||
- influxdb_data:/bitnami/influxdb
|
||||
volumes:
|
||||
|
|
|
|||
Loading…
Reference in New Issue