Remove debian-10 directory

Signed-off-by: Carlos Rodriguez Hernandez <carlosrh@vmware.com>
This commit is contained in:
Carlos Rodriguez Hernandez 2022-06-22 10:53:50 +00:00 committed by Bitnami Containers
parent 4960c17826
commit c3965af9e6
52 changed files with 0 additions and 6726 deletions

View File

@ -1,33 +0,0 @@
FROM docker.io/bitnami/minideb:buster
LABEL maintainer "Bitnami <containers@bitnami.com>"
ENV HOME="/" \
OS_ARCH="amd64" \
OS_FLAVOUR="debian-10" \
OS_NAME="linux"
ARG JAVA_EXTRA_SECURITY_DIR="/bitnami/java/extra-security"
COPY prebuildfs /
# Install required system packages and dependencies
RUN install_packages acl ca-certificates curl gzip libc6 lsof netcat-traditional procps tar zlib1g
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "java" "11.0.15-0" --checksum 255bc547614ce8052f416a74c58cfb916f724876315f15b816e0eb98b2bded35
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "solr" "8.11.1-8" --checksum dc6035aa6abf722ac025ff953b57f0e121e781a26cf412e176a57574c86487b1
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "gosu" "1.14.0-7" --checksum d6280b6f647a62bf6edc74dc8e526bfff63ddd8067dcb8540843f47203d9ccf1
RUN apt-get update && apt-get upgrade -y && \
rm -r /var/lib/apt/lists /var/cache/apt/archives
RUN chmod g+rwX /opt/bitnami
COPY rootfs /
RUN /opt/bitnami/scripts/java/postunpack.sh
RUN /opt/bitnami/scripts/solr/postunpack.sh
ENV APP_VERSION="8.11.1" \
BITNAMI_APP_NAME="solr" \
JAVA_HOME="/opt/bitnami/java" \
PATH="/opt/bitnami/java/bin:/opt/bitnami/solr/bin:/opt/bitnami/common/bin:$PATH"
EXPOSE 8983
USER 1001
ENTRYPOINT [ "/opt/bitnami/scripts/solr/entrypoint.sh" ]
CMD [ "/opt/bitnami/scripts/solr/run.sh" ]

View File

@ -1,13 +0,0 @@
version: '2'
services:
solr:
image: docker.io/bitnami/solr:8
environment:
- SOLR_ENABLE_AUTHENTICATION=yes
ports:
- '8983:8983'
volumes:
- 'solr_data:/bitnami/solr'
volumes:
solr_data:
driver: local

View File

@ -1,23 +0,0 @@
{
"gosu": {
"arch": "amd64",
"digest": "d6280b6f647a62bf6edc74dc8e526bfff63ddd8067dcb8540843f47203d9ccf1",
"distro": "debian-10",
"type": "NAMI",
"version": "1.14.0-7"
},
"java": {
"arch": "amd64",
"digest": "255bc547614ce8052f416a74c58cfb916f724876315f15b816e0eb98b2bded35",
"distro": "debian-10",
"type": "NAMI",
"version": "11.0.15-0"
},
"solr": {
"arch": "amd64",
"digest": "dc6035aa6abf722ac025ff953b57f0e121e781a26cf412e176a57574c86487b1",
"distro": "debian-10",
"type": "NAMI",
"version": "8.11.1-8"
}
}

View File

@ -1,3 +0,0 @@
Bitnami containers ship with software bundles. You can find the licenses under:
/opt/bitnami/nami/COPYING
/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt

View File

@ -1,51 +0,0 @@
#!/bin/bash
#
# Bitnami custom library
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Constants
BOLD='\033[1m'
# Functions
########################
# Print the welcome page
# Globals:
# DISABLE_WELCOME_MESSAGE
# BITNAMI_APP_NAME
# Arguments:
# None
# Returns:
# None
#########################
print_welcome_page() {
if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then
if [[ -n "$BITNAMI_APP_NAME" ]]; then
print_image_welcome_page
fi
fi
}
########################
# Print the welcome page for a Bitnami Docker image
# Globals:
# BITNAMI_APP_NAME
# Arguments:
# None
# Returns:
# None
#########################
print_image_welcome_page() {
local github_url="https://github.com/bitnami/bitnami-docker-${BITNAMI_APP_NAME}"
log ""
log "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}"
log "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}"
log "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}"
log ""
}

View File

@ -1,65 +0,0 @@
#!/bin/bash
#
# Library for managing Bitnami components
# Constants
CACHE_ROOT="/tmp/bitnami/pkg/cache"
DOWNLOAD_URL="https://downloads.bitnami.com/files/stacksmith"
# Functions
########################
# Download and unpack a Bitnami package
# Globals:
# OS_NAME
# OS_ARCH
# OS_FLAVOUR
# Arguments:
# $1 - component's name
# $2 - component's version
# Returns:
# None
#########################
component_unpack() {
local name="${1:?name is required}"
local version="${2:?version is required}"
local base_name="${name}-${version}-${OS_NAME}-${OS_ARCH}-${OS_FLAVOUR}"
local package_sha256=""
local directory="/opt/bitnami"
# Validate arguments
shift 2
while [ "$#" -gt 0 ]; do
case "$1" in
-c|--checksum)
shift
package_sha256="${1:?missing package checksum}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
echo "Downloading $base_name package"
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz" ]; then
echo "${CACHE_ROOT}/${base_name}.tar.gz already exists, skipping download."
cp "${CACHE_ROOT}/${base_name}.tar.gz" .
rm "${CACHE_ROOT}/${base_name}.tar.gz"
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz.sha256" ]; then
echo "Using the local sha256 from ${CACHE_ROOT}/${base_name}.tar.gz.sha256"
package_sha256="$(< "${CACHE_ROOT}/${base_name}.tar.gz.sha256")"
rm "${CACHE_ROOT}/${base_name}.tar.gz.sha256"
fi
else
curl --remote-name --silent --show-error --fail "${DOWNLOAD_URL}/${base_name}.tar.gz"
fi
if [ -n "$package_sha256" ]; then
echo "Verifying package integrity"
echo "$package_sha256 ${base_name}.tar.gz" | sha256sum --check - || exit "$?"
fi
tar --directory "${directory}" --extract --gunzip --file "${base_name}.tar.gz" --no-same-owner --strip-components=2
rm "${base_name}.tar.gz"
}

View File

@ -1,139 +0,0 @@
#!/bin/bash
#
# Library for managing files
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libos.sh
# Functions
########################
# Replace a regex-matching string in a file
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - substitute regex
# $4 - use POSIX regex. Default: true
# Returns:
# None
#########################
replace_in_file() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local substitute_regex="${3:?substitute regex is required}"
local posix_regex=${4:-true}
local result
# We should avoid using 'sed in-place' substitutions
# 1) They are not compatible with files mounted from ConfigMap(s)
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
if [[ $posix_regex = true ]]; then
result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
else
result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
fi
echo "$result" > "$filename"
}
########################
# Replace a regex-matching multiline string in a file
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - substitute regex
# Returns:
# None
#########################
replace_in_file_multiline() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local substitute_regex="${3:?substitute regex is required}"
local result
local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
result="$(perl -pe "BEGIN{undef $/;} s${del}${match_regex}${del}${substitute_regex}${del}sg" "$filename")"
echo "$result" > "$filename"
}
########################
# Remove a line in a file based on a regex
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - use POSIX regex. Default: true
# Returns:
# None
#########################
remove_in_file() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local posix_regex=${3:-true}
local result
# We should avoid using 'sed in-place' substitutions
# 1) They are not compatible with files mounted from ConfigMap(s)
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
if [[ $posix_regex = true ]]; then
result="$(sed -E "/$match_regex/d" "$filename")"
else
result="$(sed "/$match_regex/d" "$filename")"
fi
echo "$result" > "$filename"
}
########################
# Appends text after the last line matching a pattern
# Arguments:
# $1 - file
# $2 - match regex
# $3 - contents to add
# Returns:
# None
#########################
append_file_after_last_match() {
local file="${1:?missing file}"
local match_regex="${2:?missing pattern}"
local value="${3:?missing value}"
# We read the file in reverse, replace the first match (0,/pattern/s) and then reverse the results again
result="$(tac "$file" | sed -E "0,/($match_regex)/s||${value}\n\1|" | tac)"
echo "$result" > "$file"
}
########################
# Wait until certain entry is present in a log file
# Arguments:
# $1 - entry to look for
# $2 - log file
# $3 - max retries. Default: 12
# $4 - sleep between retries (in seconds). Default: 5
# Returns:
# Boolean
#########################
wait_for_log_entry() {
local -r entry="${1:-missing entry}"
local -r log_file="${2:-missing log file}"
local -r retries="${3:-12}"
local -r interval_time="${4:-5}"
local attempt=0
check_log_file_for_entry() {
if ! grep -qE "$entry" "$log_file"; then
debug "Entry \"${entry}\" still not present in ${log_file} (attempt $((++attempt))/${retries})"
return 1
fi
}
debug "Checking that ${log_file} log file contains entry \"${entry}\""
if retry_while check_log_file_for_entry "$retries" "$interval_time"; then
debug "Found entry \"${entry}\" in ${log_file}"
true
else
error "Could not find entry \"${entry}\" in ${log_file} after ${retries} retries"
debug_execute cat "$log_file"
return 1
fi
}

View File

@ -1,190 +0,0 @@
#!/bin/bash
#
# Library for file system actions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Ensure a file/directory is owned (user and group) but the given user
# Arguments:
# $1 - filepath
# $2 - owner
# Returns:
# None
#########################
owned_by() {
local path="${1:?path is missing}"
local owner="${2:?owner is missing}"
local group="${3:-}"
if [[ -n $group ]]; then
chown "$owner":"$group" "$path"
else
chown "$owner":"$owner" "$path"
fi
}
########################
# Ensure a directory exists and, optionally, is owned by the given user
# Arguments:
# $1 - directory
# $2 - owner
# Returns:
# None
#########################
ensure_dir_exists() {
local dir="${1:?directory is missing}"
local owner_user="${2:-}"
local owner_group="${3:-}"
mkdir -p "${dir}"
if [[ -n $owner_user ]]; then
owned_by "$dir" "$owner_user" "$owner_group"
fi
}
########################
# Checks whether a directory is empty or not
# arguments:
# $1 - directory
# returns:
# boolean
#########################
is_dir_empty() {
local -r path="${1:?missing directory}"
# Calculate real path in order to avoid issues with symlinks
local -r dir="$(realpath "$path")"
if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then
true
else
false
fi
}
########################
# Checks whether a mounted directory is empty or not
# arguments:
# $1 - directory
# returns:
# boolean
#########################
is_mounted_dir_empty() {
local dir="${1:?missing directory}"
if is_dir_empty "$dir" || find "$dir" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" -exec false {} +; then
true
else
false
fi
}
########################
# Checks whether a file can be written to or not
# arguments:
# $1 - file
# returns:
# boolean
#########################
is_file_writable() {
local file="${1:?missing file}"
local dir
dir="$(dirname "$file")"
if [[ (-f "$file" && -w "$file") || (! -f "$file" && -d "$dir" && -w "$dir") ]]; then
true
else
false
fi
}
########################
# Relativize a path
# arguments:
# $1 - path
# $2 - base
# returns:
# None
#########################
relativize() {
local -r path="${1:?missing path}"
local -r base="${2:?missing base}"
pushd "$base" >/dev/null || exit
realpath -q --no-symlinks --relative-base="$base" "$path" | sed -e 's|^/$|.|' -e 's|^/||'
popd >/dev/null || exit
}
########################
# Configure permisions and ownership recursively
# Globals:
# None
# Arguments:
# $1 - paths (as a string).
# Flags:
# -f|--file-mode - mode for directories.
# -d|--dir-mode - mode for files.
# -u|--user - user
# -g|--group - group
# Returns:
# None
#########################
configure_permissions_ownership() {
local -r paths="${1:?paths is missing}"
local dir_mode=""
local file_mode=""
local user=""
local group=""
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-f | --file-mode)
shift
file_mode="${1:?missing mode for files}"
;;
-d | --dir-mode)
shift
dir_mode="${1:?missing mode for directories}"
;;
-u | --user)
shift
user="${1:?missing user}"
;;
-g | --group)
shift
group="${1:?missing group}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a filepaths <<<"$paths"
for p in "${filepaths[@]}"; do
if [[ -e "$p" ]]; then
if [[ -n $dir_mode ]]; then
find -L "$p" -type d -exec chmod "$dir_mode" {} \;
fi
if [[ -n $file_mode ]]; then
find -L "$p" -type f -exec chmod "$file_mode" {} \;
fi
if [[ -n $user ]] && [[ -n $group ]]; then
chown -LR "$user":"$group" "$p"
elif [[ -n $user ]] && [[ -z $group ]]; then
chown -LR "$user" "$p"
elif [[ -z $user ]] && [[ -n $group ]]; then
chgrp -LR "$group" "$p"
fi
else
stderr_print "$p does not exist"
fi
done
}

View File

@ -1,16 +0,0 @@
#!/bin/bash
#
# Library to use for scripts expected to be used as Kubernetes lifecycle hooks
# shellcheck disable=SC1091
# Load generic libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libos.sh
# Override functions that log to stdout/stderr of the current process, so they print to process 1
for function_to_override in stderr_print debug_execute; do
# Output is sent to output of process 1 and thus end up in the container log
# The hook output in general isn't saved
eval "$(declare -f "$function_to_override") >/proc/1/fd/1 2>/proc/1/fd/2"
done

View File

@ -1,112 +0,0 @@
#!/bin/bash
#
# Library for logging functions
# Constants
RESET='\033[0m'
RED='\033[38;5;1m'
GREEN='\033[38;5;2m'
YELLOW='\033[38;5;3m'
MAGENTA='\033[38;5;5m'
CYAN='\033[38;5;6m'
# Functions
########################
# Print to STDERR
# Arguments:
# Message to print
# Returns:
# None
#########################
stderr_print() {
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
local bool="${BITNAMI_QUIET:-false}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if ! [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
printf "%b\\n" "${*}" >&2
fi
}
########################
# Log message
# Arguments:
# Message to log
# Returns:
# None
#########################
log() {
stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}"
}
########################
# Log an 'info' message
# Arguments:
# Message to log
# Returns:
# None
#########################
info() {
log "${GREEN}INFO ${RESET} ==> ${*}"
}
########################
# Log message
# Arguments:
# Message to log
# Returns:
# None
#########################
warn() {
log "${YELLOW}WARN ${RESET} ==> ${*}"
}
########################
# Log an 'error' message
# Arguments:
# Message to log
# Returns:
# None
#########################
error() {
log "${RED}ERROR${RESET} ==> ${*}"
}
########################
# Log a 'debug' message
# Globals:
# BITNAMI_DEBUG
# Arguments:
# None
# Returns:
# None
#########################
debug() {
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
local bool="${BITNAMI_DEBUG:-false}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
log "${MAGENTA}DEBUG${RESET} ==> ${*}"
fi
}
########################
# Indent a string
# Arguments:
# $1 - string
# $2 - number of indentation characters (default: 4)
# $3 - indentation character (default: " ")
# Returns:
# None
#########################
indent() {
local string="${1:-}"
local num="${2:?missing num}"
local char="${3:-" "}"
# Build the indentation unit string
local indent_unit=""
for ((i = 0; i < num; i++)); do
indent_unit="${indent_unit}${char}"
done
# shellcheck disable=SC2001
# Complex regex, see https://github.com/koalaman/shellcheck/wiki/SC2001#exceptions
echo "$string" | sed "s/^/${indent_unit}/"
}

View File

@ -1,163 +0,0 @@
#!/bin/bash
#
# Library for network functions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Resolve IP address for a host/domain (i.e. DNS lookup)
# Arguments:
# $1 - Hostname to resolve
# $2 - IP address version (v4, v6), leave empty for resolving to any version
# Returns:
# IP
#########################
dns_lookup() {
local host="${1:?host is missing}"
local ip_version="${2:-}"
getent "ahosts${ip_version}" "$host" | awk '/STREAM/ {print $1 }' | head -n 1
}
#########################
# Wait for a hostname and return the IP
# Arguments:
# $1 - hostname
# $2 - number of retries
# $3 - seconds to wait between retries
# Returns:
# - IP address that corresponds to the hostname
#########################
wait_for_dns_lookup() {
local hostname="${1:?hostname is missing}"
local retries="${2:-5}"
local seconds="${3:-1}"
check_host() {
if [[ $(dns_lookup "$hostname") == "" ]]; then
false
else
true
fi
}
# Wait for the host to be ready
retry_while "check_host ${hostname}" "$retries" "$seconds"
dns_lookup "$hostname"
}
########################
# Get machine's IP
# Arguments:
# None
# Returns:
# Machine IP
#########################
get_machine_ip() {
local -a ip_addresses
local hostname
hostname="$(hostname)"
read -r -a ip_addresses <<< "$(dns_lookup "$hostname" | xargs echo)"
if [[ "${#ip_addresses[@]}" -gt 1 ]]; then
warn "Found more than one IP address associated to hostname ${hostname}: ${ip_addresses[*]}, will use ${ip_addresses[0]}"
elif [[ "${#ip_addresses[@]}" -lt 1 ]]; then
error "Could not find any IP address associated to hostname ${hostname}"
exit 1
fi
echo "${ip_addresses[0]}"
}
########################
# Check if the provided argument is a resolved hostname
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_hostname_resolved() {
local -r host="${1:?missing value}"
if [[ -n "$(dns_lookup "$host")" ]]; then
true
else
false
fi
}
########################
# Parse URL
# Globals:
# None
# Arguments:
# $1 - uri - String
# $2 - component to obtain. Valid options (scheme, authority, userinfo, host, port, path, query or fragment) - String
# Returns:
# String
parse_uri() {
local uri="${1:?uri is missing}"
local component="${2:?component is missing}"
# Solution based on https://tools.ietf.org/html/rfc3986#appendix-B with
# additional sub-expressions to split authority into userinfo, host and port
# Credits to Patryk Obara (see https://stackoverflow.com/a/45977232/6694969)
local -r URI_REGEX='^(([^:/?#]+):)?(//((([^@/?#]+)@)?([^:/?#]+)(:([0-9]+))?))?(/([^?#]*))?(\?([^#]*))?(#(.*))?'
# || | ||| | | | | | | | | |
# |2 scheme | ||6 userinfo 7 host | 9 port | 11 rpath | 13 query | 15 fragment
# 1 scheme: | |5 userinfo@ 8 :... 10 path 12 ?... 14 #...
# | 4 authority
# 3 //...
local index=0
case "$component" in
scheme)
index=2
;;
authority)
index=4
;;
userinfo)
index=6
;;
host)
index=7
;;
port)
index=9
;;
path)
index=10
;;
query)
index=13
;;
fragment)
index=14
;;
*)
stderr_print "unrecognized component $component"
return 1
;;
esac
[[ "$uri" =~ $URI_REGEX ]] && echo "${BASH_REMATCH[${index}]}"
}
########################
# Wait for a HTTP connection to succeed
# Globals:
# *
# Arguments:
# $1 - URL to wait for
# $2 - Maximum amount of retries (optional)
# $3 - Time between retries (optional)
# Returns:
# true if the HTTP connection succeeded, false otherwise
#########################
wait_for_http_connection() {
local url="${1:?missing url}"
local retries="${2:-}"
local sleep_time="${3:-}"
if ! retry_while "debug_execute curl --silent ${url}" "$retries" "$sleep_time"; then
error "Could not connect to ${url}"
return 1
fi
}

View File

@ -1,466 +0,0 @@
#!/bin/bash
#
# Library for operating system actions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libvalidations.sh
# Functions
########################
# Check if an user exists in the system
# Arguments:
# $1 - user
# Returns:
# Boolean
#########################
user_exists() {
local user="${1:?user is missing}"
id "$user" >/dev/null 2>&1
}
########################
# Check if a group exists in the system
# Arguments:
# $1 - group
# Returns:
# Boolean
#########################
group_exists() {
local group="${1:?group is missing}"
getent group "$group" >/dev/null 2>&1
}
########################
# Create a group in the system if it does not exist already
# Arguments:
# $1 - group
# Flags:
# -i|--gid - the ID for the new group
# -s|--system - Whether to create new user as system user (uid <= 999)
# Returns:
# None
#########################
ensure_group_exists() {
local group="${1:?group is missing}"
local gid=""
local is_system_user=false
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-i | --gid)
shift
gid="${1:?missing gid}"
;;
-s | --system)
is_system_user=true
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if ! group_exists "$group"; then
local -a args=("$group")
if [[ -n "$gid" ]]; then
if group_exists "$gid"; then
error "The GID $gid is already in use." >&2
return 1
fi
args+=("--gid" "$gid")
fi
$is_system_user && args+=("--system")
groupadd "${args[@]}" >/dev/null 2>&1
fi
}
########################
# Create an user in the system if it does not exist already
# Arguments:
# $1 - user
# Flags:
# -i|--uid - the ID for the new user
# -g|--group - the group the new user should belong to
# -a|--append-groups - comma-separated list of supplemental groups to append to the new user
# -h|--home - the home directory for the new user
# -s|--system - whether to create new user as system user (uid <= 999)
# Returns:
# None
#########################
ensure_user_exists() {
local user="${1:?user is missing}"
local uid=""
local group=""
local append_groups=""
local home=""
local is_system_user=false
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-i | --uid)
shift
uid="${1:?missing uid}"
;;
-g | --group)
shift
group="${1:?missing group}"
;;
-a | --append-groups)
shift
append_groups="${1:?missing append_groups}"
;;
-h | --home)
shift
home="${1:?missing home directory}"
;;
-s | --system)
is_system_user=true
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if ! user_exists "$user"; then
local -a user_args=("-N" "$user")
if [[ -n "$uid" ]]; then
if user_exists "$uid"; then
error "The UID $uid is already in use."
return 1
fi
user_args+=("--uid" "$uid")
else
$is_system_user && user_args+=("--system")
fi
useradd "${user_args[@]}" >/dev/null 2>&1
fi
if [[ -n "$group" ]]; then
local -a group_args=("$group")
$is_system_user && group_args+=("--system")
ensure_group_exists "${group_args[@]}"
usermod -g "$group" "$user" >/dev/null 2>&1
fi
if [[ -n "$append_groups" ]]; then
local -a groups
read -ra groups <<<"$(tr ',;' ' ' <<<"$append_groups")"
for group in "${groups[@]}"; do
ensure_group_exists "$group"
usermod -aG "$group" "$user" >/dev/null 2>&1
done
fi
if [[ -n "$home" ]]; then
mkdir -p "$home"
usermod -d "$home" "$user" >/dev/null 2>&1
configure_permissions_ownership "$home" -d "775" -f "664" -u "$user" -g "$group"
fi
}
########################
# Check if the script is currently running as root
# Arguments:
# $1 - user
# $2 - group
# Returns:
# Boolean
#########################
am_i_root() {
if [[ "$(id -u)" = "0" ]]; then
true
else
false
fi
}
########################
# Print OS metadata
# Arguments:
# $1 - Flag name
# Flags:
# --id - Distro ID
# --version - Distro version
# --branch - Distro branch
# --codename - Distro codename
# Returns:
# String
#########################
get_os_metadata() {
local -r flag_name="${1:?missing flag}"
# Helper function
get_os_release_metadata() {
local -r env_name="${1:?missing environment variable name}"
(
. /etc/os-release
echo "${!env_name}"
)
}
case "$flag_name" in
--id)
get_os_release_metadata ID
;;
--version)
get_os_release_metadata VERSION_ID
;;
--branch)
get_os_release_metadata VERSION_ID | sed 's/\..*//'
;;
--codename)
get_os_release_metadata VERSION_CODENAME
;;
*)
error "Unknown flag ${flag_name}"
return 1
;;
esac
}
########################
# Get total memory available
# Arguments:
# None
# Returns:
# Memory in bytes
#########################
get_total_memory() {
echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024))
}
########################
# Get machine size depending on specified memory
# Globals:
# None
# Arguments:
# None
# Flags:
# --memory - memory size (optional)
# Returns:
# Detected instance size
#########################
get_machine_size() {
local memory=""
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
--memory)
shift
memory="${1:?missing memory}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if [[ -z "$memory" ]]; then
debug "Memory was not specified, detecting available memory automatically"
memory="$(get_total_memory)"
fi
sanitized_memory=$(convert_to_mb "$memory")
if [[ "$sanitized_memory" -gt 26000 ]]; then
echo 2xlarge
elif [[ "$sanitized_memory" -gt 13000 ]]; then
echo xlarge
elif [[ "$sanitized_memory" -gt 6000 ]]; then
echo large
elif [[ "$sanitized_memory" -gt 3000 ]]; then
echo medium
elif [[ "$sanitized_memory" -gt 1500 ]]; then
echo small
else
echo micro
fi
}
########################
# Get machine size depending on specified memory
# Globals:
# None
# Arguments:
# $1 - memory size (optional)
# Returns:
# Detected instance size
#########################
get_supported_machine_sizes() {
echo micro small medium large xlarge 2xlarge
}
########################
# Convert memory size from string to amount of megabytes (i.e. 2G -> 2048)
# Globals:
# None
# Arguments:
# $1 - memory size
# Returns:
# Result of the conversion
#########################
convert_to_mb() {
local amount="${1:-}"
if [[ $amount =~ ^([0-9]+)(m|M|g|G) ]]; then
size="${BASH_REMATCH[1]}"
unit="${BASH_REMATCH[2]}"
if [[ "$unit" = "g" || "$unit" = "G" ]]; then
amount="$((size * 1024))"
else
amount="$size"
fi
fi
echo "$amount"
}
#########################
# Redirects output to /dev/null if debug mode is disabled
# Globals:
# BITNAMI_DEBUG
# Arguments:
# $@ - Command to execute
# Returns:
# None
#########################
debug_execute() {
if is_boolean_yes "${BITNAMI_DEBUG:-false}"; then
"$@"
else
"$@" >/dev/null 2>&1
fi
}
########################
# Retries a command a given number of times
# Arguments:
# $1 - cmd (as a string)
# $2 - max retries. Default: 12
# $3 - sleep between retries (in seconds). Default: 5
# Returns:
# Boolean
#########################
retry_while() {
local cmd="${1:?cmd is missing}"
local retries="${2:-12}"
local sleep_time="${3:-5}"
local return_value=1
read -r -a command <<<"$cmd"
for ((i = 1; i <= retries; i += 1)); do
"${command[@]}" && return_value=0 && break
sleep "$sleep_time"
done
return $return_value
}
########################
# Generate a random string
# Arguments:
# -t|--type - String type (ascii, alphanumeric, numeric), defaults to ascii
# -c|--count - Number of characters, defaults to 32
# Arguments:
# None
# Returns:
# None
# Returns:
# String
#########################
generate_random_string() {
local type="ascii"
local count="32"
local filter
local result
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
-t | --type)
shift
type="$1"
;;
-c | --count)
shift
count="$1"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
# Validate type
case "$type" in
ascii)
filter="[:print:]"
;;
alphanumeric)
filter="a-zA-Z0-9"
;;
numeric)
filter="0-9"
;;
*)
echo "Invalid type ${type}" >&2
return 1
;;
esac
# Obtain count + 10 lines from /dev/urandom to ensure that the resulting string has the expected size
# Note there is a very small chance of strings starting with EOL character
# Therefore, the higher amount of lines read, this will happen less frequently
result="$(head -n "$((count + 10))" /dev/urandom | tr -dc "$filter" | head -c "$count")"
echo "$result"
}
########################
# Create md5 hash from a string
# Arguments:
# $1 - string
# Returns:
# md5 hash - string
#########################
generate_md5_hash() {
local -r str="${1:?missing input string}"
echo -n "$str" | md5sum | awk '{print $1}'
}
########################
# Create sha1 hash from a string
# Arguments:
# $1 - string
# $2 - algorithm - 1 (default), 224, 256, 384, 512
# Returns:
# sha1 hash - string
#########################
generate_sha_hash() {
local -r str="${1:?missing input string}"
local -r algorithm="${2:-1}"
echo -n "$str" | "sha${algorithm}sum" | awk '{print $1}'
}
########################
# Converts a string to its hexadecimal representation
# Arguments:
# $1 - string
# Returns:
# hexadecimal representation of the string
#########################
convert_to_hex() {
local -r str=${1:?missing input string}
local -i iterator
local char
for ((iterator = 0; iterator < ${#str}; iterator++)); do
char=${str:iterator:1}
printf '%x' "'${char}"
done
}

View File

@ -1,122 +0,0 @@
#!/bin/bash
#
# Bitnami persistence library
# Used for bringing persistence capabilities to applications that don't have clear separation of data and logic
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libos.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libversion.sh
# Functions
########################
# Persist an application directory
# Globals:
# BITNAMI_ROOT_DIR
# BITNAMI_VOLUME_DIR
# Arguments:
# $1 - App folder name
# $2 - List of app files to persist
# Returns:
# true if all steps succeeded, false otherwise
#########################
persist_app() {
local -r app="${1:?missing app}"
local -a files_to_restore
read -r -a files_to_persist <<< "$(tr ',;:' ' ' <<< "$2")"
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
# Persist the individual files
if [[ "${#files_to_persist[@]}" -le 0 ]]; then
warn "No files are configured to be persisted"
return
fi
pushd "$install_dir" >/dev/null || exit
local file_to_persist_relative file_to_persist_destination file_to_persist_destination_folder
local -r tmp_file="/tmp/perms.acl"
for file_to_persist in "${files_to_persist[@]}"; do
if [[ ! -f "$file_to_persist" && ! -d "$file_to_persist" ]]; then
error "Cannot persist '${file_to_persist}' because it does not exist"
return 1
fi
file_to_persist_relative="$(relativize "$file_to_persist" "$install_dir")"
file_to_persist_destination="${persist_dir}/${file_to_persist_relative}"
file_to_persist_destination_folder="$(dirname "$file_to_persist_destination")"
# Get original permissions for existing files, which will be applied later
# Exclude the root directory with 'sed', to avoid issues when copying the entirety of it to a volume
getfacl -R "$file_to_persist_relative" | sed -E '/# file: (\..+|[^.])/,$!d' > "$tmp_file"
# Copy directories to the volume
ensure_dir_exists "$file_to_persist_destination_folder"
cp -Lr --preserve=links "$file_to_persist_relative" "$file_to_persist_destination_folder"
# Restore permissions
pushd "$persist_dir" >/dev/null || exit
if am_i_root; then
setfacl --restore="$tmp_file"
else
# When running as non-root, don't change ownership
setfacl --restore=<(grep -E -v '^# (owner|group):' "$tmp_file")
fi
popd >/dev/null || exit
done
popd >/dev/null || exit
rm -f "$tmp_file"
# Install the persisted files into the installation directory, via symlinks
restore_persisted_app "$@"
}
########################
# Restore a persisted application directory
# Globals:
# BITNAMI_ROOT_DIR
# BITNAMI_VOLUME_DIR
# FORCE_MAJOR_UPGRADE
# Arguments:
# $1 - App folder name
# $2 - List of app files to restore
# Returns:
# true if all steps succeeded, false otherwise
#########################
restore_persisted_app() {
local -r app="${1:?missing app}"
local -a files_to_restore
read -r -a files_to_restore <<< "$(tr ',;:' ' ' <<< "$2")"
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
# Restore the individual persisted files
if [[ "${#files_to_restore[@]}" -le 0 ]]; then
warn "No persisted files are configured to be restored"
return
fi
local file_to_restore_relative file_to_restore_origin file_to_restore_destination
for file_to_restore in "${files_to_restore[@]}"; do
file_to_restore_relative="$(relativize "$file_to_restore" "$install_dir")"
# We use 'realpath --no-symlinks' to ensure that the case of '.' is covered and the directory is removed
file_to_restore_origin="$(realpath --no-symlinks "${install_dir}/${file_to_restore_relative}")"
file_to_restore_destination="$(realpath --no-symlinks "${persist_dir}/${file_to_restore_relative}")"
rm -rf "$file_to_restore_origin"
ln -sfn "$file_to_restore_destination" "$file_to_restore_origin"
done
}
########################
# Check if an application directory was already persisted
# Globals:
# BITNAMI_VOLUME_DIR
# Arguments:
# $1 - App folder name
# Returns:
# true if all steps succeeded, false otherwise
#########################
is_app_initialized() {
local -r app="${1:?missing app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
if ! is_mounted_dir_empty "$persist_dir"; then
true
else
false
fi
}

View File

@ -1,273 +0,0 @@
#!/bin/bash
#
# Library for managing services
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libvalidations.sh
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Read the provided pid file and returns a PID
# Arguments:
# $1 - Pid file
# Returns:
# PID
#########################
get_pid_from_file() {
local pid_file="${1:?pid file is missing}"
if [[ -f "$pid_file" ]]; then
if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then
echo "$(< "$pid_file")"
fi
fi
}
########################
# Check if a provided PID corresponds to a running service
# Arguments:
# $1 - PID
# Returns:
# Boolean
#########################
is_service_running() {
local pid="${1:?pid is missing}"
kill -0 "$pid" 2>/dev/null
}
########################
# Stop a service by sending a termination signal to its pid
# Arguments:
# $1 - Pid file
# $2 - Signal number (optional)
# Returns:
# None
#########################
stop_service_using_pid() {
local pid_file="${1:?pid file is missing}"
local signal="${2:-}"
local pid
pid="$(get_pid_from_file "$pid_file")"
[[ -z "$pid" ]] || ! is_service_running "$pid" && return
if [[ -n "$signal" ]]; then
kill "-${signal}" "$pid"
else
kill "$pid"
fi
local counter=10
while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do
sleep 1
counter=$((counter - 1))
done
}
########################
# Start cron daemon
# Arguments:
# None
# Returns:
# true if started correctly, false otherwise
#########################
cron_start() {
if [[ -x "/usr/sbin/cron" ]]; then
/usr/sbin/cron
elif [[ -x "/usr/sbin/crond" ]]; then
/usr/sbin/crond
else
false
fi
}
########################
# Generate a cron configuration file for a given service
# Arguments:
# $1 - Service name
# $2 - Command
# Flags:
# --run-as - User to run as (default: root)
# --schedule - Cron schedule configuration (default: * * * * *)
# Returns:
# None
#########################
generate_cron_conf() {
local service_name="${1:?service name is missing}"
local cmd="${2:?command is missing}"
local run_as="root"
local schedule="* * * * *"
local clean="true"
local clean="true"
# Parse optional CLI flags
shift 2
while [[ "$#" -gt 0 ]]; do
case "$1" in
--run-as)
shift
run_as="$1"
;;
--schedule)
shift
schedule="$1"
;;
--no-clean)
clean="false"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
mkdir -p /etc/cron.d
if "$clean"; then
echo "${schedule} ${run_as} ${cmd}" > /etc/cron.d/"$service_name"
else
echo "${schedule} ${run_as} ${cmd}" >> /etc/cron.d/"$service_name"
fi
}
########################
# Remove a cron configuration file for a given service
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_cron_conf() {
local service_name="${1:?service name is missing}"
local cron_conf_dir="/etc/monit/conf.d"
rm -f "${cron_conf_dir}/${service_name}"
}
########################
# Generate a monit configuration file for a given service
# Arguments:
# $1 - Service name
# $2 - Pid file
# $3 - Start command
# $4 - Stop command
# Flags:
# --disable - Whether to disable the monit configuration
# Returns:
# None
#########################
generate_monit_conf() {
local service_name="${1:?service name is missing}"
local pid_file="${2:?pid file is missing}"
local start_command="${3:?start command is missing}"
local stop_command="${4:?stop command is missing}"
local monit_conf_dir="/etc/monit/conf.d"
local disabled="no"
# Parse optional CLI flags
shift 4
while [[ "$#" -gt 0 ]]; do
case "$1" in
--disable)
disabled="yes"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
is_boolean_yes "$disabled" && conf_suffix=".disabled"
mkdir -p "$monit_conf_dir"
cat >"${monit_conf_dir}/${service_name}.conf${conf_suffix:-}" <<EOF
check process ${service_name}
with pidfile "${pid_file}"
start program = "${start_command}" with timeout 90 seconds
stop program = "${stop_command}" with timeout 90 seconds
EOF
}
########################
# Remove a monit configuration file for a given service
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_monit_conf() {
local service_name="${1:?service name is missing}"
local monit_conf_dir="/etc/monit/conf.d"
rm -f "${monit_conf_dir}/${service_name}.conf"
}
########################
# Generate a logrotate configuration file
# Arguments:
# $1 - Service name
# $2 - Log files pattern
# Flags:
# --period - Period
# --rotations - Number of rotations to store
# --extra - Extra options (Optional)
# Returns:
# None
#########################
generate_logrotate_conf() {
local service_name="${1:?service name is missing}"
local log_path="${2:?log path is missing}"
local period="weekly"
local rotations="150"
local extra=""
local logrotate_conf_dir="/etc/logrotate.d"
local var_name
# Parse optional CLI flags
shift 2
while [[ "$#" -gt 0 ]]; do
case "$1" in
--period|--rotations|--extra)
var_name="$(echo "$1" | sed -e "s/^--//" -e "s/-/_/g")"
shift
declare "$var_name"="${1:?"$var_name" is missing}"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
mkdir -p "$logrotate_conf_dir"
cat <<EOF | sed '/^\s*$/d' >"${logrotate_conf_dir}/${service_name}"
${log_path} {
${period}
rotate ${rotations}
dateext
compress
copytruncate
missingok
$(indent "$extra" 2)
}
EOF
}
########################
# Remove a logrotate configuration file
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_logrotate_conf() {
local service_name="${1:?service name is missing}"
local logrotate_conf_dir="/etc/logrotate.d"
rm -f "${logrotate_conf_dir}/${service_name}"
}

View File

@ -1,264 +0,0 @@
#!/bin/bash
#
# Validation functions library
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Check if the provided argument is an integer
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_int() {
local -r int="${1:?missing value}"
if [[ "$int" =~ ^-?[0-9]+ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a positive integer
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_positive_int() {
local -r int="${1:?missing value}"
if is_int "$int" && (( "${int}" >= 0 )); then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean or is the string 'yes/true'
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_boolean_yes() {
local -r bool="${1:-}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean yes/no value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_yes_no_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^(yes|no)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean true/false value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_true_false_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^(true|false)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean 1/0 value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_1_0_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^[10]$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is an empty string or not defined
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_empty_value() {
local -r val="${1:-}"
if [[ -z "$val" ]]; then
true
else
false
fi
}
########################
# Validate if the provided argument is a valid port
# Arguments:
# $1 - Port to validate
# Returns:
# Boolean and error message
#########################
validate_port() {
local value
local unprivileged=0
# Parse flags
while [[ "$#" -gt 0 ]]; do
case "$1" in
-unprivileged)
unprivileged=1
;;
--)
shift
break
;;
-*)
stderr_print "unrecognized flag $1"
return 1
;;
*)
break
;;
esac
shift
done
if [[ "$#" -gt 1 ]]; then
echo "too many arguments provided"
return 2
elif [[ "$#" -eq 0 ]]; then
stderr_print "missing port argument"
return 1
else
value=$1
fi
if [[ -z "$value" ]]; then
echo "the value is empty"
return 1
else
if ! is_int "$value"; then
echo "value is not an integer"
return 2
elif [[ "$value" -lt 0 ]]; then
echo "negative value provided"
return 2
elif [[ "$value" -gt 65535 ]]; then
echo "requested port is greater than 65535"
return 2
elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then
echo "privileged port requested"
return 3
fi
fi
}
########################
# Validate if the provided argument is a valid IPv4 address
# Arguments:
# $1 - IP to validate
# Returns:
# Boolean
#########################
validate_ipv4() {
local ip="${1:?ip is missing}"
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")"
[[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \
&& ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]]
stat=$?
fi
return $stat
}
########################
# Validate a string format
# Arguments:
# $1 - String to validate
# Returns:
# Boolean
#########################
validate_string() {
local string
local min_length=-1
local max_length=-1
# Parse flags
while [ "$#" -gt 0 ]; do
case "$1" in
-min-length)
shift
min_length=${1:-}
;;
-max-length)
shift
max_length=${1:-}
;;
--)
shift
break
;;
-*)
stderr_print "unrecognized flag $1"
return 1
;;
*)
break
;;
esac
shift
done
if [ "$#" -gt 1 ]; then
stderr_print "too many arguments provided"
return 2
elif [ "$#" -eq 0 ]; then
stderr_print "missing string"
return 1
else
string=$1
fi
if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then
echo "string length is less than $min_length"
return 1
fi
if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then
echo "string length is great than $max_length"
return 1
fi
}

View File

@ -1,49 +0,0 @@
#!/bin/bash
#
# Library for managing versions strings
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Gets semantic version
# Arguments:
# $1 - version: string to extract major.minor.patch
# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch
# Returns:
# array with the major, minor and release
#########################
get_sematic_version () {
local version="${1:?version is required}"
local section="${2:?section is required}"
local -a version_sections
#Regex to parse versions: x.y.z
local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?'
if [[ "$version" =~ $regex ]]; then
local i=1
local j=1
local n=${#BASH_REMATCH[*]}
while [[ $i -lt $n ]]; do
if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then
version_sections[$j]=${BASH_REMATCH[$i]}
((j++))
fi
((i++))
done
local number_regex='^[0-9]+$'
if [[ "$section" =~ $number_regex ]] && (( section > 0 )) && (( section <= 3 )); then
echo "${version_sections[$section]}"
return
else
stderr_print "Section allowed values are: 1, 2, and 3"
return 1
fi
fi
}

View File

@ -1,458 +0,0 @@
#!/bin/bash
#
# Bitnami web server handler library
# shellcheck disable=SC1090,SC1091
# Load generic libraries
. /opt/bitnami/scripts/liblog.sh
########################
# Execute a command (or list of commands) with the web server environment and library loaded
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_execute() {
local -r web_server="${1:?missing web server}"
shift
# Run program in sub-shell to avoid web server environment getting loaded when not necessary
(
. "/opt/bitnami/scripts/lib${web_server}.sh"
. "/opt/bitnami/scripts/${web_server}-env.sh"
"$@"
)
}
########################
# Prints the list of enabled web servers
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_list() {
local -r -a supported_web_servers=(apache nginx)
local -a existing_web_servers=()
for web_server in "${supported_web_servers[@]}"; do
[[ -f "/opt/bitnami/scripts/${web_server}-env.sh" ]] && existing_web_servers+=("$web_server")
done
echo "${existing_web_servers[@]:-}"
}
########################
# Prints the currently-enabled web server type (only one, in order of preference)
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_type() {
local -a web_servers
read -r -a web_servers <<< "$(web_server_list)"
echo "${web_servers[0]:-}"
}
########################
# Validate that a supported web server is configured
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_validate() {
local error_code=0
local supported_web_servers=("apache" "nginx")
# Auxiliary functions
print_validation_error() {
error "$1"
error_code=1
}
if [[ -z "$(web_server_type)" || ! " ${supported_web_servers[*]} " == *" $(web_server_type) "* ]]; then
print_validation_error "Could not detect any supported web servers. It must be one of: ${supported_web_servers[*]}"
elif ! web_server_execute "$(web_server_type)" type -t "is_$(web_server_type)_running" >/dev/null; then
print_validation_error "Could not load the $(web_server_type) web server library from /opt/bitnami/scripts. Check that it exists and is readable."
fi
return "$error_code"
}
########################
# Check whether the web server is running
# Globals:
# *
# Arguments:
# None
# Returns:
# true if the web server is running, false otherwise
#########################
is_web_server_running() {
"is_$(web_server_type)_running"
}
########################
# Start web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_start() {
info "Starting $(web_server_type) in background"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/start.sh"
}
########################
# Stop web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_stop() {
info "Stopping $(web_server_type)"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/stop.sh"
}
########################
# Restart web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_restart() {
info "Restarting $(web_server_type)"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/restart.sh"
}
########################
# Reload web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_reload() {
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/reload.sh"
}
########################
# Ensure a web server application configuration exists (i.e. Apache virtual host format or NGINX server block)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --type - Application type, which has an effect on which configuration template to use
# --hosts - Host listen addresses
# --server-name - Server name
# --server-aliases - Server aliases
# --allow-remote-connections - Whether to allow remote connections or to require local connections
# --disable - Whether to render server configurations with a .disabled prefix
# --disable-http - Whether to render the app's HTTP server configuration with a .disabled prefix
# --disable-https - Whether to render the app's HTTPS server configuration with a .disabled prefix
# --http-port - HTTP port number
# --https-port - HTTPS port number
# --document-root - Path to document root directory
# Apache-specific flags:
# --apache-additional-configuration - Additional vhost configuration (no default)
# --apache-additional-http-configuration - Additional HTTP vhost configuration (no default)
# --apache-additional-https-configuration - Additional HTTPS vhost configuration (no default)
# --apache-before-vhost-configuration - Configuration to add before the <VirtualHost> directive (no default)
# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no' and type is not defined)
# --apache-extra-directory-configuration - Extra configuration for the document root directory
# --apache-proxy-address - Address where to proxy requests
# --apache-proxy-configuration - Extra configuration for the proxy
# --apache-proxy-http-configuration - Extra configuration for the proxy HTTP vhost
# --apache-proxy-https-configuration - Extra configuration for the proxy HTTPS vhost
# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup (only allowed when type is not defined)
# NGINX-specific flags:
# --nginx-additional-configuration - Additional server block configuration (no default)
# --nginx-external-configuration - Configuration external to server block (no default)
# Returns:
# true if the configuration was enabled, false otherwise
########################
ensure_web_server_app_configuration_exists() {
local app="${1:?missing app}"
shift
local -a apache_args nginx_args web_servers args_var
apache_args=("$app")
nginx_args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--disable \
| --disable-http \
| --disable-https \
)
apache_args+=("$1")
nginx_args+=("$1")
;;
--hosts \
| --server-name \
| --server-aliases \
| --type \
| --allow-remote-connections \
| --http-port \
| --https-port \
| --document-root \
)
apache_args+=("$1" "${2:?missing value}")
nginx_args+=("$1" "${2:?missing value}")
shift
;;
# Specific Apache flags
--apache-additional-configuration \
| --apache-additional-http-configuration \
| --apache-additional-https-configuration \
| --apache-before-vhost-configuration \
| --apache-allow-override \
| --apache-extra-directory-configuration \
| --apache-proxy-address \
| --apache-proxy-configuration \
| --apache-proxy-http-configuration \
| --apache-proxy-https-configuration \
| --apache-move-htaccess \
)
apache_args+=("${1//apache-/}" "${2:?missing value}")
shift
;;
# Specific NGINX flags
--nginx-additional-configuration \
| --nginx-external-configuration)
nginx_args+=("${1//nginx-/}" "${2:?missing value}")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
args_var="${web_server}_args[@]"
web_server_execute "$web_server" "ensure_${web_server}_app_configuration_exists" "${!args_var}"
done
}
########################
# Ensure a web server application configuration does not exist anymore (i.e. Apache virtual host format or NGINX server block)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Returns:
# true if the configuration was disabled, false otherwise
########################
ensure_web_server_app_configuration_not_exists() {
local app="${1:?missing app}"
local -a web_servers
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
web_server_execute "$web_server" "ensure_${web_server}_app_configuration_not_exists" "$app"
done
}
########################
# Ensure the web server loads the configuration for an application in a URL prefix
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --allow-remote-connections - Whether to allow remote connections or to require local connections
# --document-root - Path to document root directory
# --prefix - URL prefix from where it will be accessible (i.e. /myapp)
# --type - Application type, which has an effect on what configuration template will be used
# Apache-specific flags:
# --apache-additional-configuration - Additional vhost configuration (no default)
# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no')
# --apache-extra-directory-configuration - Extra configuration for the document root directory
# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup
# NGINX-specific flags:
# --nginx-additional-configuration - Additional server block configuration (no default)
# Returns:
# true if the configuration was enabled, false otherwise
########################
ensure_web_server_prefix_configuration_exists() {
local app="${1:?missing app}"
shift
local -a apache_args nginx_args web_servers args_var
apache_args=("$app")
nginx_args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--allow-remote-connections \
| --document-root \
| --prefix \
| --type \
)
apache_args+=("$1" "${2:?missing value}")
nginx_args+=("$1" "${2:?missing value}")
shift
;;
# Specific Apache flags
--apache-additional-configuration \
| --apache-allow-override \
| --apache-extra-directory-configuration \
| --apache-move-htaccess \
)
apache_args+=("${1//apache-/}" "$2")
shift
;;
# Specific NGINX flags
--nginx-additional-configuration)
nginx_args+=("${1//nginx-/}" "$2")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
args_var="${web_server}_args[@]"
web_server_execute "$web_server" "ensure_${web_server}_prefix_configuration_exists" "${!args_var}"
done
}
########################
# Ensure a web server application configuration is updated with the runtime configuration (i.e. ports)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --hosts - Host listen addresses
# --server-name - Server name
# --server-aliases - Server aliases
# --enable-http - Enable HTTP app configuration (if not enabled already)
# --enable-https - Enable HTTPS app configuration (if not enabled already)
# --disable-http - Disable HTTP app configuration (if not disabled already)
# --disable-https - Disable HTTPS app configuration (if not disabled already)
# --http-port - HTTP port number
# --https-port - HTTPS port number
# Returns:
# true if the configuration was updated, false otherwise
########################
web_server_update_app_configuration() {
local app="${1:?missing app}"
shift
local -a args web_servers
args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--enable-http \
| --enable-https \
| --disable-http \
| --disable-https \
)
args+=("$1")
;;
--hosts \
| --server-name \
| --server-aliases \
| --http-port \
| --https-port \
)
args+=("$1" "${2:?missing value}")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
web_server_execute "$web_server" "${web_server}_update_app_configuration" "${args[@]}"
done
}
########################
# Enable loading page, which shows users that the initialization process is not yet completed
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_enable_loading_page() {
ensure_web_server_app_configuration_exists "__loading" --hosts "_default_" \
--apache-additional-configuration "
# Show a HTTP 503 Service Unavailable page by default
RedirectMatch 503 ^/$
# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes
ErrorDocument 404 /index.html
ErrorDocument 503 /index.html" \
--nginx-additional-configuration "
# Show a HTTP 503 Service Unavailable page by default
location / {
return 503;
}
# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes
error_page 404 @installing;
error_page 503 @installing;
location @installing {
rewrite ^(.*)$ /index.html break;
}"
web_server_reload
}
########################
# Enable loading page, which shows users that the initialization process is not yet completed
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_disable_install_page() {
ensure_web_server_app_configuration_not_exists "__loading"
web_server_reload
}

View File

@ -1,24 +0,0 @@
#!/bin/sh
set -e
set -u
export DEBIAN_FRONTEND=noninteractive
n=0
max=2
until [ $n -gt $max ]; do
set +e
(
apt-get update -qq &&
apt-get install -y --no-install-recommends "$@"
)
CODE=$?
set -e
if [ $CODE -eq 0 ]; then
break
fi
if [ $n -eq $max ]; then
exit $CODE
fi
echo "apt failed, retrying"
n=$(($n + 1))
done
rm -r /var/lib/apt/lists /var/cache/apt/archives

View File

@ -1,17 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purposes
# Load libraries
. /opt/bitnami/scripts/libbitnami.sh
. /opt/bitnami/scripts/liblog.sh
print_welcome_page
echo ""
exec "$@"

View File

@ -1,24 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/libfile.sh
. /opt/bitnami/scripts/liblog.sh
#
# Java post-unpack operations
#
# Override default files in the Java security directory. This is used for
# custom base images (with custom CA certificates or block lists is used)
if ! is_dir_empty "$JAVA_EXTRA_SECURITY_DIR"; then
info "Adding custom CAs to the Java security folder"
cp -Lr "$JAVA_EXTRA_SECURITY_DIR" /opt/bitnami/java/lib/security
fi

View File

@ -1,647 +0,0 @@
#!/bin/bash
#
# Bitnami Solr library
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libfile.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libos.sh
. /opt/bitnami/scripts/libpersistence.sh
. /opt/bitnami/scripts/libservice.sh
. /opt/bitnami/scripts/libvalidations.sh
########################
# Create initial security.json
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_generate_initial_security() {
info "Generating initial security file"
cat >"${SOLR_BASE_DIR}/server/solr/security.json" <<EOF
{
"authentication":{
"blockUnknown": true,
"class":"solr.BasicAuthPlugin",
"credentials":{"${SOLR_ADMIN_USERNAME}":"IV0EHq1OnNrj6gvRCwvFwTrZ1+z1oBbnQdiVC3otuq0= Ndd7LKvVBAaZIF0QAVi1ekCfAJXr1GGfLtRUXhgrF8c="},
"forwardCredentials": false
},
"authorization":{
"class":"solr.RuleBasedAuthorizationPlugin",
"permissions":[{"name":"security-edit",
"role":"admin"}],
"user-role":{"${SOLR_ADMIN_USERNAME}":"admin"}
}}
EOF
if am_i_root; then
configure_permissions_ownership "${SOLR_BASE_DIR}/server/solr/security.json" -u "$SOLR_DAEMON_USER" -g "$SOLR_DAEMON_GROUP"
fi
}
########################
# Configure Solr Heap Size
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_set_heap_size() {
local heap_ms_size
local heap_mx_size
local machine_mem=""
debug "Calculating appropriate Xmx and Xms values..."
machine_mem="$(get_total_memory)"
if [[ "$machine_mem" -lt 512 ]]; then
heap_ms_size=256
heap_mx_size=256
elif [[ "$machine_mem" -lt 4096 ]]; then
heap_ms_size=256
heap_mx_size=$((machine_mem - 512))
else
heap_ms_size=512
heap_mx_size="$((machine_mem - 1024))"
fi
info "Setting '-Xms${heap_ms_size}m -Xmx${heap_mx_size}m' heap options..."
replace_in_file "$SOLR_BIN_DIR"/solr.in.sh ".*SOLR_JAVA_MEM=.*" "SOLR_JAVA_MEM=\"-Xms${heap_ms_size}m -Xmx${heap_mx_size}m\""
}
########################
# Validate settings in SOLR_* env. variables
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_validate() {
info "Validating settings in SOLR_* env vars..."
local error_code=0
# Auxiliary functions
print_validation_error() {
error "$1"
error_code=1
}
! is_yes_no_value "$SOLR_ENABLE_AUTHENTICATION" && print_validation_error "SOLR_ENABLE_AUTHENTICATION possible values are yes or no"
if is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION"; then
[[ -z "$SOLR_ADMIN_USERNAME" ]] && print_validation_error "You need to provide an username in SOLR_USERNAME"
[[ -z "$SOLR_ADMIN_PASSWORD" ]] && print_validation_error "You need to provide a password for the user: ${SOLR_ADMIN_USERNAME}"
fi
! is_yes_no_value "$SOLR_SSL_ENABLED" && print_validation_error "SOLR_SSL_ENABLED possible values are yes or no"
if is_boolean_yes "$SOLR_SSL_ENABLED"; then
[[ -z "$SOLR_SSL_KEY_STORE" ]] && print_validation_error "You need to provide a key store file in SOLR_SSL_KEY_STORE"
[[ -z "$SOLR_SSL_TRUST_STORE" ]] && print_validation_error "You need to provide a trust store file in SOLR_SSL_TRUST_STORE"
[[ -z "$SOLR_SSL_KEY_STORE_PASSWORD" ]] && print_validation_error "You need to provide a password in SOLR_SSL_KEY_STORE_PASSWORD"
[[ -z "$SOLR_SSL_TRUST_STORE_PASSWORD" ]] && print_validation_error "You need to provide a password file in SOLR_SSL_TRUST_STORE_PASSWORD"
fi
! is_yes_no_value "$SOLR_ENABLE_CLOUD_MODE" && print_validation_error "SOLR_ENABLE_CLOUD_MODE possible values are yes or no"
is_boolean_yes "$SOLR_ENABLE_CLOUD_MODE" && [[ -z "$SOLR_ZK_HOSTS" ]] && print_validation_error "You need to provide the Zookeper node list in SOLR_ZK_HOSTS"
! is_boolean_yes "$SOLR_CLOUD_BOOTSTRAP" && is_boolean_yes "$SOLR_ENABLE_CLOUD_MODE" && [[ -n "$SOLR_CORES" ]] && info "This node is not a boostrap node and will not create the collection"
! is_true_false_value "$SOLR_SSL_CHECK_PEER_NAME" && print_validation_error "SOLR_SSL_CHECK_PEER_NAME possible values are true or false"
[[ "$SOLR_NUMBER_OF_NODES" -lt $(("$SOLR_COLLECTION_REPLICAS" * "$SOLR_COLLECTION_SHARDS")) ]] && print_validation_error "Not enough nodes for the replicas and shards indicated"
[[ "$error_code" -eq 0 ]] || exit "$error_code"
}
########################
# Wait for solr root to exists in zookeeper
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_wait_for_zk_root() {
info "Waiting for solr root in zookeeper"
if ! retry_while solr_zk_root_exists; then
error "Failed to connect to the zookeeper"
exit 1
fi
}
########################
# Wait for Zookeeper to be up
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_wait_for_zookeeper() {
local host
local port
info "Waiting for Zookeeper to be up"
read -r -a zoo_nodes <<<"$(tr ',' ' ' <<<"${SOLR_ZK_HOSTS}")"
for zoo_node in "${zoo_nodes[@]}"; do
if [[ "$zoo_node" =~ (.*):([0-9]*) ]]; then
host="${BASH_REMATCH[1]}"
port="${BASH_REMATCH[2]}"
debug "Trying: $host:$port"
if ! retry_while "debug_execute nc -z ${host} ${port}" "$SOLR_ZK_MAX_RETRIES" "$SOLR_ZK_SLEEP_TIME"; then
error "Failed to connect to the zookeeper node at ${host}:${port}"
return 1
fi
fi
done
}
#########################
# Create SOLR core
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_create_cores() {
local -r exec="curl"
local command_args=("--silent" "--fail")
local protocol="http"
is_boolean_yes "$SOLR_SSL_ENABLED" && protocol="https" && command_args+=("-k")
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && command_args+=("--user" "${SOLR_ADMIN_USERNAME}:${SOLR_ADMIN_PASSWORD}")
read -r -a cores <<<"$(tr ',;' ' ' <<<"${SOLR_CORES}")"
info "Creating cores..."
for core in "${cores[@]}"; do
mkdir -p "${SOLR_SERVER_DIR}/solr/${core}/data"
mkdir -p "${SOLR_SERVER_DIR}/solr/${core}/conf"
cp -Lr "${SOLR_CORE_CONF_DIR}"/* "${SOLR_SERVER_DIR}/solr/${core}/conf/"
command_args+=("${protocol}://localhost:${SOLR_PORT_NUMBER}/solr/admin/cores?action=CREATE&name=${core}&instanceDir=${core}&dataDir=data")
info "Creating solr core: ${core}"
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
error "There was an error when creating the core"
exit 1
else
info "Core created"
fi
done
}
#########################
# Update user password
# Globals:
# SOLR_*
# Arguments:
# $1 - username
# $2 - password
# Returns:
# None
#########################
solr_update_password() {
local -r exec="curl"
local -r default_password="SolrRocks"
local -r username="${1:?user is required}"
local -r password="${2:?password is required}"
local protocol="http"
local command_args=()
is_boolean_yes "$SOLR_SSL_ENABLED" && protocol="https" && command_args+=("-k")
command_args+=("--silent" "--user" "${username}:${default_password}" "${protocol}://localhost:${SOLR_PORT_NUMBER}/api/cluster/security/authentication" "-H" "'Content-type:application/json'" "-d" "{\"set-user\":{\"${username}\":\"${password}\"}}")
info "Updating user password"
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
error "There was an error when updating the user password"
exit 1
else
info "Password updated"
fi
}
#########################
# Check if the API is ready
# Globals:
# SOLR_*
# Arguments:
# $1 - username
# $2 - password
# Returns:
# Boolean
#########################
solr_check_api() {
local -r exec="curl"
local -r username="${1:?user is required}"
local -r password="${2:?password is required}"
local protocol="http"
local command_args=()
debug "Checking if the API is ready"
is_boolean_yes "$SOLR_SSL_ENABLED" && protocol="https" && command_args+=("-k")
command_args+=("--silent" "--user" "${username}:${password}" "${protocol}://localhost:${SOLR_PORT_NUMBER}/api/" "-H" "'Content-type:application/json'")
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
return 1
fi
}
#########################
# Wait for api
# Globals:
# SOLR_*
# Arguments:
# $1 - username
# $2 - password
# Returns:
# None
#########################
solr_wait_for_api() {
local -r username="${1:?user is required}"
local -r password="${2:?password is required}"
info "Wait for Solr API"
if ! retry_while "solr_check_api ${username} ${password}"; then
error "Solr API not available"
exit 1
fi
}
#########################
# Create SOLR cloud user
# Globals:
# SOLR_*
# Arguments:
# $1 - username
# $2 - password
# Returns:
# None
#########################
solr_create_cloud_user() {
local -r exec="${SOLR_BIN_DIR}/solr"
local -r username="${1:?user is required}"
local -r password="${2:?password is required}"
local command_args=("auth" "enable" "-type" "basicAuth" "-credentials" "${username}:${password}" "-blockUnknown" "true" "-z" "$SOLR_ZK_HOSTS/solr")
info "Creating user: ${username}"
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
error "There was an error when creating the user"
exit 1
else
info "User created"
fi
}
#########################
# Create SOLR collection
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_create_collection() {
local -r exec="curl"
local command_args=("--silent")
local protocol="http"
info "Creating collection:${SOLR_COLLECTION} with ${SOLR_COLLECTION_REPLICAS} replicas and ${SOLR_COLLECTION_SHARDS} shards"
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && command_args+=("--user" "${SOLR_ADMIN_USERNAME}:${SOLR_ADMIN_PASSWORD}")
is_boolean_yes "$SOLR_SSL_ENABLED" && protocol="https" && command_args+=("-k")
command_args+=("${protocol}://localhost:${SOLR_PORT_NUMBER}/solr/admin/collections?action=CREATE&name=${SOLR_COLLECTION}&numShards=${SOLR_COLLECTION_SHARDS}&replicationFactor=${SOLR_COLLECTION_REPLICAS}")
#Check if the collection exists before creating it
if ! solr_collection_exists "$SOLR_COLLECTION"; then
# Will wait for other nodes to join before creating a collection with shards and/or replicas
if [[ "$SOLR_COLLECTION_REPLICAS" -gt 1 ]] || [[ "$SOLR_COLLECTION_SHARDS" -gt 1 ]]; then
info "Waiting for other nodes to be available"
if ! retry_while "solr_check_number_of_nodes ${SOLR_NUMBER_OF_NODES}" "$SOLR_ZK_MAX_RETRIES" "$SOLR_ZK_SLEEP_TIME"; then
error "There are not enough nodes to create the collection"
fi
fi
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
error "There was an error when creating the collection"
exit 1
else
info "Collection created"
fi
else
info "Skipping. Collection already exists."
fi
}
#########################
# Check if the root of solr exists in zookeeper
# Globals:
# SOLR_*
# Arguments:
# $1 - Collection name
# Returns:
# None
#########################
solr_zk_root_exists() {
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "ls" "/" "-z" "$SOLR_ZK_HOSTS")
debug "Checking if root of solr exists in zookeeper"
"$exec" "${command_args[@]}" 2>/dev/null | grep -q "solr"
}
#########################
# Check if a collection already exists
# Globals:
# SOLR_*
# Arguments:
# $1 - Collection name
# Returns:
# None
#########################
solr_collection_exists() {
local -r collection="${1:?collection is required}"
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "ls" "/solr/collections" "-z" "$SOLR_ZK_HOSTS")
debug "Checking if ${collection} exists"
"$exec" "${command_args[@]}" | grep -q "$collection"
}
########################
# Check the number of nodes in the cluster
# Arguments:
# $1 - expected number of nodes
# Returns:
# Boolean
########################
solr_check_number_of_nodes() {
local -r nodes="${1:-1}"
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "ls" "/solr/live_nodes" "-z" "$SOLR_ZK_HOSTS")
[[ $("$exec" "${command_args[@]}" | wc -l) -ge "$nodes" ]]
}
########################
# Check if zookeeper has been initialized
# Arguments:
# None
# Returns:
# Boolean
########################
solr_is_zk_initialized() {
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "ls" "/solr" "-z" "$SOLR_ZK_HOSTS")
info "Checking if solr has been initialized in zookeeper"
if ! debug_execute "$exec" "${command_args[@]}" 2>/dev/null; then
info "Zookeeper was not initialized."
return 1
else
info "Zookeeper was initialized."
return 0
fi
}
#########################
# Start solr in background
# Globals:
# SOLR_*
# Arguments:
# $1 - Mode: cloud or empty
# Returns:
# None
#########################
solr_start_bg() {
local -r mode="${1:-}"
local -r exec="${SOLR_BIN_DIR}/solr"
local start_args=("start" "-p" "${SOLR_PORT_NUMBER}" "-d" "server")
info "Starting solr in background"
if [[ "$mode" == "cloud" ]]; then
start_args+=("-cloud" "-z" "$SOLR_ZK_HOSTS/solr")
fi
# Do not start as root, to avoid solr error message
if am_i_root; then
debug_execute "gosu" "$SOLR_DAEMON_USER" "$exec" "${start_args[@]}"
else
debug_execute "$exec" "${start_args[@]}"
fi
}
#########################
# Stop SOLR
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_stop() {
info "Stopping solr"
stop_service_using_pid "$SOLR_PID_FILE"
}
########################
# Check if Solr is running
# Globals:
# SOLR_PID_FILE
# Arguments:
# None
# Returns:
# Whether Solr is running
########################
is_solr_running() {
local pid
pid="$(get_pid_from_file "$SOLR_PID_FILE")"
if [[ -n "$pid" ]]; then
is_service_running "$pid"
else
false
fi
}
########################
# Check if Solr is running
# Globals:
# SOLR_PID_FILE
# Arguments:
# None
# Returns:
# Whether Solr is not running
########################
is_solr_not_running() {
! is_solr_running
}
#########################
# Create root in zookeeper
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_zk_initialize() {
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "mkroot" "/solr" "-z" "$SOLR_ZK_HOSTS")
if solr_is_zk_initialized; then
info "Zookeeper is already initialized"
else
info "Creating root in zookeeper"
debug_execute "$exec" "${command_args[@]}"
fi
}
#########################
# Set cluster properties in zookeeper
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_set_ssl_url_scheme() {
info "Initializing configuring Solr HTTPS in Zookeeper"
solr_wait_for_zk_root && "${SOLR_SERVER_DIR}/scripts/cloud-scripts/zkcli.sh" -zkhost "${SOLR_ZK_HOSTS}/solr" -cmd clusterprop -name urlScheme -val https
}
#########################
# Create root in zookeeper
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_migrate_old_data() {
local -r exec="mv"
local command_args=("${SOLR_VOLUME_DIR}/data" "${SOLR_VOLUME_DIR}/server/solr")
if am_i_root; then
warn "Persisted data detected in old location. Migrating and changing permissions"
ensure_dir_exists "${SOLR_VOLUME_DIR}/server"
debug_execute "$exec" "${command_args[@]}"
configure_permissions_ownership "${SOLR_VOLUME_DIR}/server/solr" -d 775 -f 664 -g "root"
warn "Data migrated."
else
error "Persisted data detected in old location. You will need to run first the container as root to migrate the data"
exit 1
fi
}
#########################
# Initialize SOLR
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_initialize() {
info "Initializing Solr ..."
# Check if there is persisted data from old version and migrate it
! is_dir_empty "${SOLR_VOLUME_DIR}/data" && [[ -f "$SOLR_VOLUME_DIR/.initialized" ]] && solr_migrate_old_data
is_boolean_yes "$SOLR_SSL_ENABLED" && export SOLR_SSL_ENABLED=true
# Check if Solr has already been initialized and persisted in a previous run
local -r app_name="solr"
if ! is_app_initialized "$app_name"; then
# Ensure the solr base directory exists and has proper permissions
info "Configuring file permissions for Solr"
ensure_dir_exists "$SOLR_VOLUME_DIR"
rm -f "$SOLR_PID_FILE"
if is_boolean_yes "$SOLR_ENABLE_CLOUD_MODE"; then
info "Deploying Solr Cloud from scratch"
if ! solr_wait_for_zookeeper; then
error "Zookeeper not detected"
exit 1
fi
if is_boolean_yes "$SOLR_CLOUD_BOOTSTRAP"; then
solr_zk_initialize
solr_start_bg "cloud"
solr_wait_for_api "admin" "SolrRocks"
is_boolean_yes "$SOLR_SSL_ENABLED" && solr_set_ssl_url_scheme
[[ -n "$SOLR_COLLECTION" ]] && solr_create_collection
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && solr_create_cloud_user "$SOLR_ADMIN_USERNAME" "$SOLR_ADMIN_PASSWORD"
solr_stop
else
if is_boolean_yes "$SOLR_SSL_ENABLED"; then
solr_set_ssl_url_scheme
else
solr_wait_for_zk_root
fi
fi
else
info "Deploying Solr from scratch"
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && solr_generate_initial_security
solr_start_bg
solr_wait_for_api "admin" "SolrRocks"
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && solr_update_password "$SOLR_ADMIN_USERNAME" "$SOLR_ADMIN_PASSWORD"
[[ -n "$SOLR_CORES" ]] && solr_create_cores
solr_stop
fi
info "Persisting Solr installation"
persist_app "$app_name" "$SOLR_DATA_TO_PERSIST"
else
info "Restoring persisted Solr installation"
# Compatibility with previous container images
if [[ "$(ls "$SOLR_VOLUME_DIR")" = "data" ]]; then
warn "The persisted data for this Solr installation is located at '${SOLR_VOLUME_DIR}/data' instead of '${SOLR_VOLUME_DIR}'"
warn "This is deprecated and support for this may be removed in a future release"
rm "${SOLR_BASE_DIR}/server/solr"
ln -s "${SOLR_VOLUME_DIR}/data" "${SOLR_BASE_DIR}/server/solr"
fi
restore_persisted_app "$app_name" "$SOLR_DATA_TO_PERSIST"
fi
}

View File

@ -1,104 +0,0 @@
#!/bin/bash
#
# Environment configuration for solr
# The values for all environment variables will be set in the below order of precedence
# 1. Custom environment variables defined below after Bitnami defaults
# 2. Constants defined in this file (environment variables with no default), i.e. BITNAMI_ROOT_DIR
# 3. Environment variables overridden via external files using *_FILE variables (see below)
# 4. Environment variables set externally (i.e. current Bash context/Dockerfile/userdata)
# Load logging library
# shellcheck disable=SC1090,SC1091
. /opt/bitnami/scripts/liblog.sh
export BITNAMI_ROOT_DIR="/opt/bitnami"
export BITNAMI_VOLUME_DIR="/bitnami"
# Logging configuration
export MODULE="${MODULE:-solr}"
export BITNAMI_DEBUG="${BITNAMI_DEBUG:-false}"
# By setting an environment variable matching *_FILE to a file path, the prefixed environment
# variable will be overridden with the value specified in that file
solr_env_vars=(
SOLR_ENABLE_CLOUD_MODE
SOLR_NUMBER_OF_NODES
SOLR_HOST
SOLR_JETTY_HOST
SOLR_HEAP
SOLR_JAVA_MEM
SOLR_PORT_NUMBER
SOLR_CORES
SOLR_COLLECTION
SOLR_COLLECTION_REPLICAS
SOLR_COLLECTION_SHARDS
SOLR_ENABLE_AUTHENTICATION
SOLR_ADMIN_USERNAME
SOLR_ADMIN_PASSWORD
SOLR_CLOUD_BOOTSTRAP
SOLR_CORE_CONF_DIR
SOLR_SSL_ENABLED
SOLR_SSL_CHECK_PEER_NAME
SOLR_ZK_MAX_RETRIES
SOLR_ZK_SLEEP_TIME
SOLR_COLLECTION
)
for env_var in "${solr_env_vars[@]}"; do
file_env_var="${env_var}_FILE"
if [[ -n "${!file_env_var:-}" ]]; then
if [[ -r "${!file_env_var:-}" ]]; then
export "${env_var}=$(< "${!file_env_var}")"
unset "${file_env_var}"
else
warn "Skipping export of '${env_var}'. '${!file_env_var:-}' is not readable."
fi
fi
done
unset solr_env_vars
# Paths
export BITNAMI_VOLUME_DIR="/bitnami"
export SOLR_BASE_DIR="${BITNAMI_ROOT_DIR}/solr"
export SOLR_JAVA_HOME="${BITNAMI_ROOT_DIR}/java"
export SOLR_BIN_DIR="${SOLR_BASE_DIR}/bin"
export SOLR_TMP_DIR="${SOLR_BASE_DIR}/tmp"
export SOLR_PID_DIR="${SOLR_BASE_DIR}/tmp"
export SOLR_LOGS_DIR="${SOLR_BASE_DIR}/logs"
export SOLR_SERVER_DIR="${SOLR_BASE_DIR}/server"
# Persistence
export SOLR_VOLUME_DIR="${BITNAMI_VOLUME_DIR}/solr"
export SOLR_DATA_TO_PERSIST="server/solr"
# Solr parameters
export SOLR_ENABLE_CLOUD_MODE="${SOLR_ENABLE_CLOUD_MODE:-no}"
export SOLR_NUMBER_OF_NODES="${SOLR_NUMBER_OF_NODES:-1}"
export SOLR_HOST="${SOLR_HOST:-}"
export SOLR_JETTY_HOST="${SOLR_JETTY_HOST:-0.0.0.0}"
export SOLR_HEAP="${SOLR_HEAP:-}"
export SOLR_JAVA_MEM="${SOLR_JAVA_MEM:--Xms512m -Xmx512m}"
export SOLR_PORT_NUMBER="${SOLR_PORT_NUMBER:-8983}"
export SOLR_PID_FILE="${SOLR_PID_DIR}/solr-${SOLR_PORT_NUMBER}.pid"
export SOLR_CORES="${SOLR_CORES:-}"
SOLR_COLLECTION="${SOLR_COLLECTION:-"${SOLR_COLLECTION:-}"}"
export SOLR_COLLECTION="${SOLR_COLLECTION:-}"
export SOLR_COLLECTION_REPLICAS="${SOLR_COLLECTION_REPLICAS:-1}"
export SOLR_COLLECTION_SHARDS="${SOLR_COLLECTION_SHARDS:-1}"
export SOLR_ENABLE_AUTHENTICATION="${SOLR_ENABLE_AUTHENTICATION:-no}"
export SOLR_ADMIN_USERNAME="${SOLR_ADMIN_USERNAME:-admin}"
export SOLR_ADMIN_PASSWORD="${SOLR_ADMIN_PASSWORD:-bitnami}"
export SOLR_CLOUD_BOOTSTRAP="${SOLR_CLOUD_BOOTSTRAP:-no}"
export SOLR_CORE_CONF_DIR="${SOLR_CORE_CONF_DIR:-${SOLR_SERVER_DIR}/solr/configsets/_default/conf}"
export SOLR_SSL_ENABLED="${SOLR_SSL_ENABLED:-no}"
export SOLR_SSL_CHECK_PEER_NAME="${SOLR_SSL_CHECK_PEER_NAME:-false}"
# System users (when running with a privileged user)
export SOLR_DAEMON_USER="solr"
export SOLR_DAEMON_GROUP="solr"
# Solr retries configuration
export SOLR_ZK_MAX_RETRIES="${SOLR_ZK_MAX_RETRIES:-5}"
export SOLR_ZK_SLEEP_TIME="${SOLR_ZK_SLEEP_TIME:-5}"
# Custom environment variables may be defined below

View File

@ -1,27 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
#set -o xtrace # Uncomment this line for debugging purposes
# Load libraries
. /opt/bitnami/scripts/libbitnami.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libsolr.sh
# Load solr environment variables
. /opt/bitnami/scripts/solr-env.sh
print_welcome_page
if [[ "$*" = *"/opt/bitnami/scripts/solr/run.sh"* ]]; then
info "** Starting solr setup **"
/opt/bitnami/scripts/solr/setup.sh
info "** solr setup finished! **"
fi
echo ""
exec "$@"

View File

@ -1,29 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1090,SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/libsolr.sh
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libos.sh
# Load solr environment variables
. /opt/bitnami/scripts/solr-env.sh
ensure_user_exists "$SOLR_DAEMON_USER" --group "$SOLR_DAEMON_GROUP"
for dir in "$SOLR_TMP_DIR" "$SOLR_VOLUME_DIR" "$SOLR_LOGS_DIR" "$SOLR_BASE_DIR"; do
ensure_dir_exists "$dir"
configure_permissions_ownership "$dir" -d "775" -u "$SOLR_DAEMON_USER" -g "root"
done
# Create basic solr configuration
replace_in_file "$SOLR_BIN_DIR"/solr.in.sh "#SOLR_JAVA_HOME=\"\"" "SOLR_JAVA_HOME=$SOLR_JAVA_HOME"
replace_in_file "$SOLR_BIN_DIR"/solr.in.sh "#SOLR_PID_DIR=" "SOLR_PID_DIR=$SOLR_PID_DIR"
replace_in_file "$SOLR_BIN_DIR"/solr.in.sh "#SOLR_LOGS_DIR=logs" "SOLR_LOGS_DIR=$SOLR_LOGS_DIR"

View File

@ -1,31 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libsolr.sh
. /opt/bitnami/scripts/libos.sh
# Load solr environment variables
. /opt/bitnami/scripts/solr-env.sh
info "** Starting solr **"
start_command=("${SOLR_BIN_DIR}/solr" "-p" "${SOLR_PORT_NUMBER}" "-d" "/opt/bitnami/solr/server" "-f")
if is_boolean_yes "$SOLR_ENABLE_CLOUD_MODE"; then
start_command+=("-cloud" "-z" "$SOLR_ZK_HOSTS/solr")
fi
is_boolean_yes "$SOLR_SSL_ENABLED" && export SOLR_SSL_ENABLED=true
if am_i_root; then
exec gosu "$SOLR_DAEMON_USER" "${start_command[@]}"
else
exec "${start_command[@]}"
fi

View File

@ -1,20 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/libsolr.sh
# Load solr environment variables
. /opt/bitnami/scripts/solr-env.sh
# Ensure solr environment variables are valid
solr_validate
# Ensure solr is initialized
solr_initialize

View File

@ -1,33 +0,0 @@
FROM docker.io/bitnami/minideb:buster
LABEL maintainer "Bitnami <containers@bitnami.com>"
ENV HOME="/" \
OS_ARCH="amd64" \
OS_FLAVOUR="debian-10" \
OS_NAME="linux"
ARG JAVA_EXTRA_SECURITY_DIR="/bitnami/java/extra-security"
COPY prebuildfs /
# Install required system packages and dependencies
RUN install_packages acl ca-certificates curl gzip libc6 lsof netcat-traditional procps tar zlib1g
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "java" "11.0.15-0" --checksum 255bc547614ce8052f416a74c58cfb916f724876315f15b816e0eb98b2bded35
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "solr" "9.0.0-0" --checksum 18455e7df25a9a2f424ebf00757587595a30422408c3fa16a1cee8fb36cb95c4
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "gosu" "1.14.0-7" --checksum d6280b6f647a62bf6edc74dc8e526bfff63ddd8067dcb8540843f47203d9ccf1
RUN apt-get update && apt-get upgrade -y && \
rm -r /var/lib/apt/lists /var/cache/apt/archives
RUN chmod g+rwX /opt/bitnami
COPY rootfs /
RUN /opt/bitnami/scripts/java/postunpack.sh
RUN /opt/bitnami/scripts/solr/postunpack.sh
ENV APP_VERSION="9.0.0" \
BITNAMI_APP_NAME="solr" \
JAVA_HOME="/opt/bitnami/java" \
PATH="/opt/bitnami/java/bin:/opt/bitnami/solr/bin:/opt/bitnami/common/bin:$PATH"
EXPOSE 8983
USER 1001
ENTRYPOINT [ "/opt/bitnami/scripts/solr/entrypoint.sh" ]
CMD [ "/opt/bitnami/scripts/solr/run.sh" ]

View File

@ -1,13 +0,0 @@
version: '2'
services:
solr:
image: docker.io/bitnami/solr:9
environment:
- SOLR_ENABLE_AUTHENTICATION=yes
ports:
- '8983:8983'
volumes:
- 'solr_data:/bitnami/solr'
volumes:
solr_data:
driver: local

View File

@ -1,23 +0,0 @@
{
"gosu": {
"arch": "amd64",
"digest": "d6280b6f647a62bf6edc74dc8e526bfff63ddd8067dcb8540843f47203d9ccf1",
"distro": "debian-10",
"type": "NAMI",
"version": "1.14.0-7"
},
"java": {
"arch": "amd64",
"digest": "255bc547614ce8052f416a74c58cfb916f724876315f15b816e0eb98b2bded35",
"distro": "debian-10",
"type": "NAMI",
"version": "11.0.15-0"
},
"solr": {
"arch": "amd64",
"digest": "18455e7df25a9a2f424ebf00757587595a30422408c3fa16a1cee8fb36cb95c4",
"distro": "debian-10",
"type": "NAMI",
"version": "9.0.0-0"
}
}

View File

@ -1,3 +0,0 @@
Bitnami containers ship with software bundles. You can find the licenses under:
/opt/bitnami/nami/COPYING
/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt

View File

@ -1,51 +0,0 @@
#!/bin/bash
#
# Bitnami custom library
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Constants
BOLD='\033[1m'
# Functions
########################
# Print the welcome page
# Globals:
# DISABLE_WELCOME_MESSAGE
# BITNAMI_APP_NAME
# Arguments:
# None
# Returns:
# None
#########################
print_welcome_page() {
if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then
if [[ -n "$BITNAMI_APP_NAME" ]]; then
print_image_welcome_page
fi
fi
}
########################
# Print the welcome page for a Bitnami Docker image
# Globals:
# BITNAMI_APP_NAME
# Arguments:
# None
# Returns:
# None
#########################
print_image_welcome_page() {
local github_url="https://github.com/bitnami/bitnami-docker-${BITNAMI_APP_NAME}"
log ""
log "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}"
log "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}"
log "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}"
log ""
}

View File

@ -1,65 +0,0 @@
#!/bin/bash
#
# Library for managing Bitnami components
# Constants
CACHE_ROOT="/tmp/bitnami/pkg/cache"
DOWNLOAD_URL="https://downloads.bitnami.com/files/stacksmith"
# Functions
########################
# Download and unpack a Bitnami package
# Globals:
# OS_NAME
# OS_ARCH
# OS_FLAVOUR
# Arguments:
# $1 - component's name
# $2 - component's version
# Returns:
# None
#########################
component_unpack() {
local name="${1:?name is required}"
local version="${2:?version is required}"
local base_name="${name}-${version}-${OS_NAME}-${OS_ARCH}-${OS_FLAVOUR}"
local package_sha256=""
local directory="/opt/bitnami"
# Validate arguments
shift 2
while [ "$#" -gt 0 ]; do
case "$1" in
-c|--checksum)
shift
package_sha256="${1:?missing package checksum}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
echo "Downloading $base_name package"
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz" ]; then
echo "${CACHE_ROOT}/${base_name}.tar.gz already exists, skipping download."
cp "${CACHE_ROOT}/${base_name}.tar.gz" .
rm "${CACHE_ROOT}/${base_name}.tar.gz"
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz.sha256" ]; then
echo "Using the local sha256 from ${CACHE_ROOT}/${base_name}.tar.gz.sha256"
package_sha256="$(< "${CACHE_ROOT}/${base_name}.tar.gz.sha256")"
rm "${CACHE_ROOT}/${base_name}.tar.gz.sha256"
fi
else
curl --remote-name --silent --show-error --fail "${DOWNLOAD_URL}/${base_name}.tar.gz"
fi
if [ -n "$package_sha256" ]; then
echo "Verifying package integrity"
echo "$package_sha256 ${base_name}.tar.gz" | sha256sum --check - || exit "$?"
fi
tar --directory "${directory}" --extract --gunzip --file "${base_name}.tar.gz" --no-same-owner --strip-components=2
rm "${base_name}.tar.gz"
}

View File

@ -1,139 +0,0 @@
#!/bin/bash
#
# Library for managing files
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libos.sh
# Functions
########################
# Replace a regex-matching string in a file
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - substitute regex
# $4 - use POSIX regex. Default: true
# Returns:
# None
#########################
replace_in_file() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local substitute_regex="${3:?substitute regex is required}"
local posix_regex=${4:-true}
local result
# We should avoid using 'sed in-place' substitutions
# 1) They are not compatible with files mounted from ConfigMap(s)
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
if [[ $posix_regex = true ]]; then
result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
else
result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
fi
echo "$result" > "$filename"
}
########################
# Replace a regex-matching multiline string in a file
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - substitute regex
# Returns:
# None
#########################
replace_in_file_multiline() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local substitute_regex="${3:?substitute regex is required}"
local result
local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
result="$(perl -pe "BEGIN{undef $/;} s${del}${match_regex}${del}${substitute_regex}${del}sg" "$filename")"
echo "$result" > "$filename"
}
########################
# Remove a line in a file based on a regex
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - use POSIX regex. Default: true
# Returns:
# None
#########################
remove_in_file() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local posix_regex=${3:-true}
local result
# We should avoid using 'sed in-place' substitutions
# 1) They are not compatible with files mounted from ConfigMap(s)
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
if [[ $posix_regex = true ]]; then
result="$(sed -E "/$match_regex/d" "$filename")"
else
result="$(sed "/$match_regex/d" "$filename")"
fi
echo "$result" > "$filename"
}
########################
# Appends text after the last line matching a pattern
# Arguments:
# $1 - file
# $2 - match regex
# $3 - contents to add
# Returns:
# None
#########################
append_file_after_last_match() {
local file="${1:?missing file}"
local match_regex="${2:?missing pattern}"
local value="${3:?missing value}"
# We read the file in reverse, replace the first match (0,/pattern/s) and then reverse the results again
result="$(tac "$file" | sed -E "0,/($match_regex)/s||${value}\n\1|" | tac)"
echo "$result" > "$file"
}
########################
# Wait until certain entry is present in a log file
# Arguments:
# $1 - entry to look for
# $2 - log file
# $3 - max retries. Default: 12
# $4 - sleep between retries (in seconds). Default: 5
# Returns:
# Boolean
#########################
wait_for_log_entry() {
local -r entry="${1:-missing entry}"
local -r log_file="${2:-missing log file}"
local -r retries="${3:-12}"
local -r interval_time="${4:-5}"
local attempt=0
check_log_file_for_entry() {
if ! grep -qE "$entry" "$log_file"; then
debug "Entry \"${entry}\" still not present in ${log_file} (attempt $((++attempt))/${retries})"
return 1
fi
}
debug "Checking that ${log_file} log file contains entry \"${entry}\""
if retry_while check_log_file_for_entry "$retries" "$interval_time"; then
debug "Found entry \"${entry}\" in ${log_file}"
true
else
error "Could not find entry \"${entry}\" in ${log_file} after ${retries} retries"
debug_execute cat "$log_file"
return 1
fi
}

View File

@ -1,190 +0,0 @@
#!/bin/bash
#
# Library for file system actions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Ensure a file/directory is owned (user and group) but the given user
# Arguments:
# $1 - filepath
# $2 - owner
# Returns:
# None
#########################
owned_by() {
local path="${1:?path is missing}"
local owner="${2:?owner is missing}"
local group="${3:-}"
if [[ -n $group ]]; then
chown "$owner":"$group" "$path"
else
chown "$owner":"$owner" "$path"
fi
}
########################
# Ensure a directory exists and, optionally, is owned by the given user
# Arguments:
# $1 - directory
# $2 - owner
# Returns:
# None
#########################
ensure_dir_exists() {
local dir="${1:?directory is missing}"
local owner_user="${2:-}"
local owner_group="${3:-}"
mkdir -p "${dir}"
if [[ -n $owner_user ]]; then
owned_by "$dir" "$owner_user" "$owner_group"
fi
}
########################
# Checks whether a directory is empty or not
# arguments:
# $1 - directory
# returns:
# boolean
#########################
is_dir_empty() {
local -r path="${1:?missing directory}"
# Calculate real path in order to avoid issues with symlinks
local -r dir="$(realpath "$path")"
if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then
true
else
false
fi
}
########################
# Checks whether a mounted directory is empty or not
# arguments:
# $1 - directory
# returns:
# boolean
#########################
is_mounted_dir_empty() {
local dir="${1:?missing directory}"
if is_dir_empty "$dir" || find "$dir" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" -exec false {} +; then
true
else
false
fi
}
########################
# Checks whether a file can be written to or not
# arguments:
# $1 - file
# returns:
# boolean
#########################
is_file_writable() {
local file="${1:?missing file}"
local dir
dir="$(dirname "$file")"
if [[ (-f "$file" && -w "$file") || (! -f "$file" && -d "$dir" && -w "$dir") ]]; then
true
else
false
fi
}
########################
# Relativize a path
# arguments:
# $1 - path
# $2 - base
# returns:
# None
#########################
relativize() {
local -r path="${1:?missing path}"
local -r base="${2:?missing base}"
pushd "$base" >/dev/null || exit
realpath -q --no-symlinks --relative-base="$base" "$path" | sed -e 's|^/$|.|' -e 's|^/||'
popd >/dev/null || exit
}
########################
# Configure permisions and ownership recursively
# Globals:
# None
# Arguments:
# $1 - paths (as a string).
# Flags:
# -f|--file-mode - mode for directories.
# -d|--dir-mode - mode for files.
# -u|--user - user
# -g|--group - group
# Returns:
# None
#########################
configure_permissions_ownership() {
local -r paths="${1:?paths is missing}"
local dir_mode=""
local file_mode=""
local user=""
local group=""
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-f | --file-mode)
shift
file_mode="${1:?missing mode for files}"
;;
-d | --dir-mode)
shift
dir_mode="${1:?missing mode for directories}"
;;
-u | --user)
shift
user="${1:?missing user}"
;;
-g | --group)
shift
group="${1:?missing group}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a filepaths <<<"$paths"
for p in "${filepaths[@]}"; do
if [[ -e "$p" ]]; then
if [[ -n $dir_mode ]]; then
find -L "$p" -type d -exec chmod "$dir_mode" {} \;
fi
if [[ -n $file_mode ]]; then
find -L "$p" -type f -exec chmod "$file_mode" {} \;
fi
if [[ -n $user ]] && [[ -n $group ]]; then
chown -LR "$user":"$group" "$p"
elif [[ -n $user ]] && [[ -z $group ]]; then
chown -LR "$user" "$p"
elif [[ -z $user ]] && [[ -n $group ]]; then
chgrp -LR "$group" "$p"
fi
else
stderr_print "$p does not exist"
fi
done
}

View File

@ -1,16 +0,0 @@
#!/bin/bash
#
# Library to use for scripts expected to be used as Kubernetes lifecycle hooks
# shellcheck disable=SC1091
# Load generic libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libos.sh
# Override functions that log to stdout/stderr of the current process, so they print to process 1
for function_to_override in stderr_print debug_execute; do
# Output is sent to output of process 1 and thus end up in the container log
# The hook output in general isn't saved
eval "$(declare -f "$function_to_override") >/proc/1/fd/1 2>/proc/1/fd/2"
done

View File

@ -1,112 +0,0 @@
#!/bin/bash
#
# Library for logging functions
# Constants
RESET='\033[0m'
RED='\033[38;5;1m'
GREEN='\033[38;5;2m'
YELLOW='\033[38;5;3m'
MAGENTA='\033[38;5;5m'
CYAN='\033[38;5;6m'
# Functions
########################
# Print to STDERR
# Arguments:
# Message to print
# Returns:
# None
#########################
stderr_print() {
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
local bool="${BITNAMI_QUIET:-false}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if ! [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
printf "%b\\n" "${*}" >&2
fi
}
########################
# Log message
# Arguments:
# Message to log
# Returns:
# None
#########################
log() {
stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}"
}
########################
# Log an 'info' message
# Arguments:
# Message to log
# Returns:
# None
#########################
info() {
log "${GREEN}INFO ${RESET} ==> ${*}"
}
########################
# Log message
# Arguments:
# Message to log
# Returns:
# None
#########################
warn() {
log "${YELLOW}WARN ${RESET} ==> ${*}"
}
########################
# Log an 'error' message
# Arguments:
# Message to log
# Returns:
# None
#########################
error() {
log "${RED}ERROR${RESET} ==> ${*}"
}
########################
# Log a 'debug' message
# Globals:
# BITNAMI_DEBUG
# Arguments:
# None
# Returns:
# None
#########################
debug() {
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
local bool="${BITNAMI_DEBUG:-false}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
log "${MAGENTA}DEBUG${RESET} ==> ${*}"
fi
}
########################
# Indent a string
# Arguments:
# $1 - string
# $2 - number of indentation characters (default: 4)
# $3 - indentation character (default: " ")
# Returns:
# None
#########################
indent() {
local string="${1:-}"
local num="${2:?missing num}"
local char="${3:-" "}"
# Build the indentation unit string
local indent_unit=""
for ((i = 0; i < num; i++)); do
indent_unit="${indent_unit}${char}"
done
# shellcheck disable=SC2001
# Complex regex, see https://github.com/koalaman/shellcheck/wiki/SC2001#exceptions
echo "$string" | sed "s/^/${indent_unit}/"
}

View File

@ -1,163 +0,0 @@
#!/bin/bash
#
# Library for network functions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Resolve IP address for a host/domain (i.e. DNS lookup)
# Arguments:
# $1 - Hostname to resolve
# $2 - IP address version (v4, v6), leave empty for resolving to any version
# Returns:
# IP
#########################
dns_lookup() {
local host="${1:?host is missing}"
local ip_version="${2:-}"
getent "ahosts${ip_version}" "$host" | awk '/STREAM/ {print $1 }' | head -n 1
}
#########################
# Wait for a hostname and return the IP
# Arguments:
# $1 - hostname
# $2 - number of retries
# $3 - seconds to wait between retries
# Returns:
# - IP address that corresponds to the hostname
#########################
wait_for_dns_lookup() {
local hostname="${1:?hostname is missing}"
local retries="${2:-5}"
local seconds="${3:-1}"
check_host() {
if [[ $(dns_lookup "$hostname") == "" ]]; then
false
else
true
fi
}
# Wait for the host to be ready
retry_while "check_host ${hostname}" "$retries" "$seconds"
dns_lookup "$hostname"
}
########################
# Get machine's IP
# Arguments:
# None
# Returns:
# Machine IP
#########################
get_machine_ip() {
local -a ip_addresses
local hostname
hostname="$(hostname)"
read -r -a ip_addresses <<< "$(dns_lookup "$hostname" | xargs echo)"
if [[ "${#ip_addresses[@]}" -gt 1 ]]; then
warn "Found more than one IP address associated to hostname ${hostname}: ${ip_addresses[*]}, will use ${ip_addresses[0]}"
elif [[ "${#ip_addresses[@]}" -lt 1 ]]; then
error "Could not find any IP address associated to hostname ${hostname}"
exit 1
fi
echo "${ip_addresses[0]}"
}
########################
# Check if the provided argument is a resolved hostname
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_hostname_resolved() {
local -r host="${1:?missing value}"
if [[ -n "$(dns_lookup "$host")" ]]; then
true
else
false
fi
}
########################
# Parse URL
# Globals:
# None
# Arguments:
# $1 - uri - String
# $2 - component to obtain. Valid options (scheme, authority, userinfo, host, port, path, query or fragment) - String
# Returns:
# String
parse_uri() {
local uri="${1:?uri is missing}"
local component="${2:?component is missing}"
# Solution based on https://tools.ietf.org/html/rfc3986#appendix-B with
# additional sub-expressions to split authority into userinfo, host and port
# Credits to Patryk Obara (see https://stackoverflow.com/a/45977232/6694969)
local -r URI_REGEX='^(([^:/?#]+):)?(//((([^@/?#]+)@)?([^:/?#]+)(:([0-9]+))?))?(/([^?#]*))?(\?([^#]*))?(#(.*))?'
# || | ||| | | | | | | | | |
# |2 scheme | ||6 userinfo 7 host | 9 port | 11 rpath | 13 query | 15 fragment
# 1 scheme: | |5 userinfo@ 8 :... 10 path 12 ?... 14 #...
# | 4 authority
# 3 //...
local index=0
case "$component" in
scheme)
index=2
;;
authority)
index=4
;;
userinfo)
index=6
;;
host)
index=7
;;
port)
index=9
;;
path)
index=10
;;
query)
index=13
;;
fragment)
index=14
;;
*)
stderr_print "unrecognized component $component"
return 1
;;
esac
[[ "$uri" =~ $URI_REGEX ]] && echo "${BASH_REMATCH[${index}]}"
}
########################
# Wait for a HTTP connection to succeed
# Globals:
# *
# Arguments:
# $1 - URL to wait for
# $2 - Maximum amount of retries (optional)
# $3 - Time between retries (optional)
# Returns:
# true if the HTTP connection succeeded, false otherwise
#########################
wait_for_http_connection() {
local url="${1:?missing url}"
local retries="${2:-}"
local sleep_time="${3:-}"
if ! retry_while "debug_execute curl --silent ${url}" "$retries" "$sleep_time"; then
error "Could not connect to ${url}"
return 1
fi
}

View File

@ -1,466 +0,0 @@
#!/bin/bash
#
# Library for operating system actions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libvalidations.sh
# Functions
########################
# Check if an user exists in the system
# Arguments:
# $1 - user
# Returns:
# Boolean
#########################
user_exists() {
local user="${1:?user is missing}"
id "$user" >/dev/null 2>&1
}
########################
# Check if a group exists in the system
# Arguments:
# $1 - group
# Returns:
# Boolean
#########################
group_exists() {
local group="${1:?group is missing}"
getent group "$group" >/dev/null 2>&1
}
########################
# Create a group in the system if it does not exist already
# Arguments:
# $1 - group
# Flags:
# -i|--gid - the ID for the new group
# -s|--system - Whether to create new user as system user (uid <= 999)
# Returns:
# None
#########################
ensure_group_exists() {
local group="${1:?group is missing}"
local gid=""
local is_system_user=false
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-i | --gid)
shift
gid="${1:?missing gid}"
;;
-s | --system)
is_system_user=true
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if ! group_exists "$group"; then
local -a args=("$group")
if [[ -n "$gid" ]]; then
if group_exists "$gid"; then
error "The GID $gid is already in use." >&2
return 1
fi
args+=("--gid" "$gid")
fi
$is_system_user && args+=("--system")
groupadd "${args[@]}" >/dev/null 2>&1
fi
}
########################
# Create an user in the system if it does not exist already
# Arguments:
# $1 - user
# Flags:
# -i|--uid - the ID for the new user
# -g|--group - the group the new user should belong to
# -a|--append-groups - comma-separated list of supplemental groups to append to the new user
# -h|--home - the home directory for the new user
# -s|--system - whether to create new user as system user (uid <= 999)
# Returns:
# None
#########################
ensure_user_exists() {
local user="${1:?user is missing}"
local uid=""
local group=""
local append_groups=""
local home=""
local is_system_user=false
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-i | --uid)
shift
uid="${1:?missing uid}"
;;
-g | --group)
shift
group="${1:?missing group}"
;;
-a | --append-groups)
shift
append_groups="${1:?missing append_groups}"
;;
-h | --home)
shift
home="${1:?missing home directory}"
;;
-s | --system)
is_system_user=true
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if ! user_exists "$user"; then
local -a user_args=("-N" "$user")
if [[ -n "$uid" ]]; then
if user_exists "$uid"; then
error "The UID $uid is already in use."
return 1
fi
user_args+=("--uid" "$uid")
else
$is_system_user && user_args+=("--system")
fi
useradd "${user_args[@]}" >/dev/null 2>&1
fi
if [[ -n "$group" ]]; then
local -a group_args=("$group")
$is_system_user && group_args+=("--system")
ensure_group_exists "${group_args[@]}"
usermod -g "$group" "$user" >/dev/null 2>&1
fi
if [[ -n "$append_groups" ]]; then
local -a groups
read -ra groups <<<"$(tr ',;' ' ' <<<"$append_groups")"
for group in "${groups[@]}"; do
ensure_group_exists "$group"
usermod -aG "$group" "$user" >/dev/null 2>&1
done
fi
if [[ -n "$home" ]]; then
mkdir -p "$home"
usermod -d "$home" "$user" >/dev/null 2>&1
configure_permissions_ownership "$home" -d "775" -f "664" -u "$user" -g "$group"
fi
}
########################
# Check if the script is currently running as root
# Arguments:
# $1 - user
# $2 - group
# Returns:
# Boolean
#########################
am_i_root() {
if [[ "$(id -u)" = "0" ]]; then
true
else
false
fi
}
########################
# Print OS metadata
# Arguments:
# $1 - Flag name
# Flags:
# --id - Distro ID
# --version - Distro version
# --branch - Distro branch
# --codename - Distro codename
# Returns:
# String
#########################
get_os_metadata() {
local -r flag_name="${1:?missing flag}"
# Helper function
get_os_release_metadata() {
local -r env_name="${1:?missing environment variable name}"
(
. /etc/os-release
echo "${!env_name}"
)
}
case "$flag_name" in
--id)
get_os_release_metadata ID
;;
--version)
get_os_release_metadata VERSION_ID
;;
--branch)
get_os_release_metadata VERSION_ID | sed 's/\..*//'
;;
--codename)
get_os_release_metadata VERSION_CODENAME
;;
*)
error "Unknown flag ${flag_name}"
return 1
;;
esac
}
########################
# Get total memory available
# Arguments:
# None
# Returns:
# Memory in bytes
#########################
get_total_memory() {
echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024))
}
########################
# Get machine size depending on specified memory
# Globals:
# None
# Arguments:
# None
# Flags:
# --memory - memory size (optional)
# Returns:
# Detected instance size
#########################
get_machine_size() {
local memory=""
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
--memory)
shift
memory="${1:?missing memory}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if [[ -z "$memory" ]]; then
debug "Memory was not specified, detecting available memory automatically"
memory="$(get_total_memory)"
fi
sanitized_memory=$(convert_to_mb "$memory")
if [[ "$sanitized_memory" -gt 26000 ]]; then
echo 2xlarge
elif [[ "$sanitized_memory" -gt 13000 ]]; then
echo xlarge
elif [[ "$sanitized_memory" -gt 6000 ]]; then
echo large
elif [[ "$sanitized_memory" -gt 3000 ]]; then
echo medium
elif [[ "$sanitized_memory" -gt 1500 ]]; then
echo small
else
echo micro
fi
}
########################
# Get machine size depending on specified memory
# Globals:
# None
# Arguments:
# $1 - memory size (optional)
# Returns:
# Detected instance size
#########################
get_supported_machine_sizes() {
echo micro small medium large xlarge 2xlarge
}
########################
# Convert memory size from string to amount of megabytes (i.e. 2G -> 2048)
# Globals:
# None
# Arguments:
# $1 - memory size
# Returns:
# Result of the conversion
#########################
convert_to_mb() {
local amount="${1:-}"
if [[ $amount =~ ^([0-9]+)(m|M|g|G) ]]; then
size="${BASH_REMATCH[1]}"
unit="${BASH_REMATCH[2]}"
if [[ "$unit" = "g" || "$unit" = "G" ]]; then
amount="$((size * 1024))"
else
amount="$size"
fi
fi
echo "$amount"
}
#########################
# Redirects output to /dev/null if debug mode is disabled
# Globals:
# BITNAMI_DEBUG
# Arguments:
# $@ - Command to execute
# Returns:
# None
#########################
debug_execute() {
if is_boolean_yes "${BITNAMI_DEBUG:-false}"; then
"$@"
else
"$@" >/dev/null 2>&1
fi
}
########################
# Retries a command a given number of times
# Arguments:
# $1 - cmd (as a string)
# $2 - max retries. Default: 12
# $3 - sleep between retries (in seconds). Default: 5
# Returns:
# Boolean
#########################
retry_while() {
local cmd="${1:?cmd is missing}"
local retries="${2:-12}"
local sleep_time="${3:-5}"
local return_value=1
read -r -a command <<<"$cmd"
for ((i = 1; i <= retries; i += 1)); do
"${command[@]}" && return_value=0 && break
sleep "$sleep_time"
done
return $return_value
}
########################
# Generate a random string
# Arguments:
# -t|--type - String type (ascii, alphanumeric, numeric), defaults to ascii
# -c|--count - Number of characters, defaults to 32
# Arguments:
# None
# Returns:
# None
# Returns:
# String
#########################
generate_random_string() {
local type="ascii"
local count="32"
local filter
local result
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
-t | --type)
shift
type="$1"
;;
-c | --count)
shift
count="$1"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
# Validate type
case "$type" in
ascii)
filter="[:print:]"
;;
alphanumeric)
filter="a-zA-Z0-9"
;;
numeric)
filter="0-9"
;;
*)
echo "Invalid type ${type}" >&2
return 1
;;
esac
# Obtain count + 10 lines from /dev/urandom to ensure that the resulting string has the expected size
# Note there is a very small chance of strings starting with EOL character
# Therefore, the higher amount of lines read, this will happen less frequently
result="$(head -n "$((count + 10))" /dev/urandom | tr -dc "$filter" | head -c "$count")"
echo "$result"
}
########################
# Create md5 hash from a string
# Arguments:
# $1 - string
# Returns:
# md5 hash - string
#########################
generate_md5_hash() {
local -r str="${1:?missing input string}"
echo -n "$str" | md5sum | awk '{print $1}'
}
########################
# Create sha1 hash from a string
# Arguments:
# $1 - string
# $2 - algorithm - 1 (default), 224, 256, 384, 512
# Returns:
# sha1 hash - string
#########################
generate_sha_hash() {
local -r str="${1:?missing input string}"
local -r algorithm="${2:-1}"
echo -n "$str" | "sha${algorithm}sum" | awk '{print $1}'
}
########################
# Converts a string to its hexadecimal representation
# Arguments:
# $1 - string
# Returns:
# hexadecimal representation of the string
#########################
convert_to_hex() {
local -r str=${1:?missing input string}
local -i iterator
local char
for ((iterator = 0; iterator < ${#str}; iterator++)); do
char=${str:iterator:1}
printf '%x' "'${char}"
done
}

View File

@ -1,122 +0,0 @@
#!/bin/bash
#
# Bitnami persistence library
# Used for bringing persistence capabilities to applications that don't have clear separation of data and logic
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libos.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libversion.sh
# Functions
########################
# Persist an application directory
# Globals:
# BITNAMI_ROOT_DIR
# BITNAMI_VOLUME_DIR
# Arguments:
# $1 - App folder name
# $2 - List of app files to persist
# Returns:
# true if all steps succeeded, false otherwise
#########################
persist_app() {
local -r app="${1:?missing app}"
local -a files_to_restore
read -r -a files_to_persist <<< "$(tr ',;:' ' ' <<< "$2")"
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
# Persist the individual files
if [[ "${#files_to_persist[@]}" -le 0 ]]; then
warn "No files are configured to be persisted"
return
fi
pushd "$install_dir" >/dev/null || exit
local file_to_persist_relative file_to_persist_destination file_to_persist_destination_folder
local -r tmp_file="/tmp/perms.acl"
for file_to_persist in "${files_to_persist[@]}"; do
if [[ ! -f "$file_to_persist" && ! -d "$file_to_persist" ]]; then
error "Cannot persist '${file_to_persist}' because it does not exist"
return 1
fi
file_to_persist_relative="$(relativize "$file_to_persist" "$install_dir")"
file_to_persist_destination="${persist_dir}/${file_to_persist_relative}"
file_to_persist_destination_folder="$(dirname "$file_to_persist_destination")"
# Get original permissions for existing files, which will be applied later
# Exclude the root directory with 'sed', to avoid issues when copying the entirety of it to a volume
getfacl -R "$file_to_persist_relative" | sed -E '/# file: (\..+|[^.])/,$!d' > "$tmp_file"
# Copy directories to the volume
ensure_dir_exists "$file_to_persist_destination_folder"
cp -Lr --preserve=links "$file_to_persist_relative" "$file_to_persist_destination_folder"
# Restore permissions
pushd "$persist_dir" >/dev/null || exit
if am_i_root; then
setfacl --restore="$tmp_file"
else
# When running as non-root, don't change ownership
setfacl --restore=<(grep -E -v '^# (owner|group):' "$tmp_file")
fi
popd >/dev/null || exit
done
popd >/dev/null || exit
rm -f "$tmp_file"
# Install the persisted files into the installation directory, via symlinks
restore_persisted_app "$@"
}
########################
# Restore a persisted application directory
# Globals:
# BITNAMI_ROOT_DIR
# BITNAMI_VOLUME_DIR
# FORCE_MAJOR_UPGRADE
# Arguments:
# $1 - App folder name
# $2 - List of app files to restore
# Returns:
# true if all steps succeeded, false otherwise
#########################
restore_persisted_app() {
local -r app="${1:?missing app}"
local -a files_to_restore
read -r -a files_to_restore <<< "$(tr ',;:' ' ' <<< "$2")"
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
# Restore the individual persisted files
if [[ "${#files_to_restore[@]}" -le 0 ]]; then
warn "No persisted files are configured to be restored"
return
fi
local file_to_restore_relative file_to_restore_origin file_to_restore_destination
for file_to_restore in "${files_to_restore[@]}"; do
file_to_restore_relative="$(relativize "$file_to_restore" "$install_dir")"
# We use 'realpath --no-symlinks' to ensure that the case of '.' is covered and the directory is removed
file_to_restore_origin="$(realpath --no-symlinks "${install_dir}/${file_to_restore_relative}")"
file_to_restore_destination="$(realpath --no-symlinks "${persist_dir}/${file_to_restore_relative}")"
rm -rf "$file_to_restore_origin"
ln -sfn "$file_to_restore_destination" "$file_to_restore_origin"
done
}
########################
# Check if an application directory was already persisted
# Globals:
# BITNAMI_VOLUME_DIR
# Arguments:
# $1 - App folder name
# Returns:
# true if all steps succeeded, false otherwise
#########################
is_app_initialized() {
local -r app="${1:?missing app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
if ! is_mounted_dir_empty "$persist_dir"; then
true
else
false
fi
}

View File

@ -1,273 +0,0 @@
#!/bin/bash
#
# Library for managing services
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libvalidations.sh
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Read the provided pid file and returns a PID
# Arguments:
# $1 - Pid file
# Returns:
# PID
#########################
get_pid_from_file() {
local pid_file="${1:?pid file is missing}"
if [[ -f "$pid_file" ]]; then
if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then
echo "$(< "$pid_file")"
fi
fi
}
########################
# Check if a provided PID corresponds to a running service
# Arguments:
# $1 - PID
# Returns:
# Boolean
#########################
is_service_running() {
local pid="${1:?pid is missing}"
kill -0 "$pid" 2>/dev/null
}
########################
# Stop a service by sending a termination signal to its pid
# Arguments:
# $1 - Pid file
# $2 - Signal number (optional)
# Returns:
# None
#########################
stop_service_using_pid() {
local pid_file="${1:?pid file is missing}"
local signal="${2:-}"
local pid
pid="$(get_pid_from_file "$pid_file")"
[[ -z "$pid" ]] || ! is_service_running "$pid" && return
if [[ -n "$signal" ]]; then
kill "-${signal}" "$pid"
else
kill "$pid"
fi
local counter=10
while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do
sleep 1
counter=$((counter - 1))
done
}
########################
# Start cron daemon
# Arguments:
# None
# Returns:
# true if started correctly, false otherwise
#########################
cron_start() {
if [[ -x "/usr/sbin/cron" ]]; then
/usr/sbin/cron
elif [[ -x "/usr/sbin/crond" ]]; then
/usr/sbin/crond
else
false
fi
}
########################
# Generate a cron configuration file for a given service
# Arguments:
# $1 - Service name
# $2 - Command
# Flags:
# --run-as - User to run as (default: root)
# --schedule - Cron schedule configuration (default: * * * * *)
# Returns:
# None
#########################
generate_cron_conf() {
local service_name="${1:?service name is missing}"
local cmd="${2:?command is missing}"
local run_as="root"
local schedule="* * * * *"
local clean="true"
local clean="true"
# Parse optional CLI flags
shift 2
while [[ "$#" -gt 0 ]]; do
case "$1" in
--run-as)
shift
run_as="$1"
;;
--schedule)
shift
schedule="$1"
;;
--no-clean)
clean="false"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
mkdir -p /etc/cron.d
if "$clean"; then
echo "${schedule} ${run_as} ${cmd}" > /etc/cron.d/"$service_name"
else
echo "${schedule} ${run_as} ${cmd}" >> /etc/cron.d/"$service_name"
fi
}
########################
# Remove a cron configuration file for a given service
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_cron_conf() {
local service_name="${1:?service name is missing}"
local cron_conf_dir="/etc/monit/conf.d"
rm -f "${cron_conf_dir}/${service_name}"
}
########################
# Generate a monit configuration file for a given service
# Arguments:
# $1 - Service name
# $2 - Pid file
# $3 - Start command
# $4 - Stop command
# Flags:
# --disable - Whether to disable the monit configuration
# Returns:
# None
#########################
generate_monit_conf() {
local service_name="${1:?service name is missing}"
local pid_file="${2:?pid file is missing}"
local start_command="${3:?start command is missing}"
local stop_command="${4:?stop command is missing}"
local monit_conf_dir="/etc/monit/conf.d"
local disabled="no"
# Parse optional CLI flags
shift 4
while [[ "$#" -gt 0 ]]; do
case "$1" in
--disable)
disabled="yes"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
is_boolean_yes "$disabled" && conf_suffix=".disabled"
mkdir -p "$monit_conf_dir"
cat >"${monit_conf_dir}/${service_name}.conf${conf_suffix:-}" <<EOF
check process ${service_name}
with pidfile "${pid_file}"
start program = "${start_command}" with timeout 90 seconds
stop program = "${stop_command}" with timeout 90 seconds
EOF
}
########################
# Remove a monit configuration file for a given service
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_monit_conf() {
local service_name="${1:?service name is missing}"
local monit_conf_dir="/etc/monit/conf.d"
rm -f "${monit_conf_dir}/${service_name}.conf"
}
########################
# Generate a logrotate configuration file
# Arguments:
# $1 - Service name
# $2 - Log files pattern
# Flags:
# --period - Period
# --rotations - Number of rotations to store
# --extra - Extra options (Optional)
# Returns:
# None
#########################
generate_logrotate_conf() {
local service_name="${1:?service name is missing}"
local log_path="${2:?log path is missing}"
local period="weekly"
local rotations="150"
local extra=""
local logrotate_conf_dir="/etc/logrotate.d"
local var_name
# Parse optional CLI flags
shift 2
while [[ "$#" -gt 0 ]]; do
case "$1" in
--period|--rotations|--extra)
var_name="$(echo "$1" | sed -e "s/^--//" -e "s/-/_/g")"
shift
declare "$var_name"="${1:?"$var_name" is missing}"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
mkdir -p "$logrotate_conf_dir"
cat <<EOF | sed '/^\s*$/d' >"${logrotate_conf_dir}/${service_name}"
${log_path} {
${period}
rotate ${rotations}
dateext
compress
copytruncate
missingok
$(indent "$extra" 2)
}
EOF
}
########################
# Remove a logrotate configuration file
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_logrotate_conf() {
local service_name="${1:?service name is missing}"
local logrotate_conf_dir="/etc/logrotate.d"
rm -f "${logrotate_conf_dir}/${service_name}"
}

View File

@ -1,264 +0,0 @@
#!/bin/bash
#
# Validation functions library
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Check if the provided argument is an integer
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_int() {
local -r int="${1:?missing value}"
if [[ "$int" =~ ^-?[0-9]+ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a positive integer
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_positive_int() {
local -r int="${1:?missing value}"
if is_int "$int" && (( "${int}" >= 0 )); then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean or is the string 'yes/true'
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_boolean_yes() {
local -r bool="${1:-}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean yes/no value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_yes_no_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^(yes|no)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean true/false value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_true_false_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^(true|false)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean 1/0 value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_1_0_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^[10]$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is an empty string or not defined
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_empty_value() {
local -r val="${1:-}"
if [[ -z "$val" ]]; then
true
else
false
fi
}
########################
# Validate if the provided argument is a valid port
# Arguments:
# $1 - Port to validate
# Returns:
# Boolean and error message
#########################
validate_port() {
local value
local unprivileged=0
# Parse flags
while [[ "$#" -gt 0 ]]; do
case "$1" in
-unprivileged)
unprivileged=1
;;
--)
shift
break
;;
-*)
stderr_print "unrecognized flag $1"
return 1
;;
*)
break
;;
esac
shift
done
if [[ "$#" -gt 1 ]]; then
echo "too many arguments provided"
return 2
elif [[ "$#" -eq 0 ]]; then
stderr_print "missing port argument"
return 1
else
value=$1
fi
if [[ -z "$value" ]]; then
echo "the value is empty"
return 1
else
if ! is_int "$value"; then
echo "value is not an integer"
return 2
elif [[ "$value" -lt 0 ]]; then
echo "negative value provided"
return 2
elif [[ "$value" -gt 65535 ]]; then
echo "requested port is greater than 65535"
return 2
elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then
echo "privileged port requested"
return 3
fi
fi
}
########################
# Validate if the provided argument is a valid IPv4 address
# Arguments:
# $1 - IP to validate
# Returns:
# Boolean
#########################
validate_ipv4() {
local ip="${1:?ip is missing}"
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")"
[[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \
&& ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]]
stat=$?
fi
return $stat
}
########################
# Validate a string format
# Arguments:
# $1 - String to validate
# Returns:
# Boolean
#########################
validate_string() {
local string
local min_length=-1
local max_length=-1
# Parse flags
while [ "$#" -gt 0 ]; do
case "$1" in
-min-length)
shift
min_length=${1:-}
;;
-max-length)
shift
max_length=${1:-}
;;
--)
shift
break
;;
-*)
stderr_print "unrecognized flag $1"
return 1
;;
*)
break
;;
esac
shift
done
if [ "$#" -gt 1 ]; then
stderr_print "too many arguments provided"
return 2
elif [ "$#" -eq 0 ]; then
stderr_print "missing string"
return 1
else
string=$1
fi
if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then
echo "string length is less than $min_length"
return 1
fi
if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then
echo "string length is great than $max_length"
return 1
fi
}

View File

@ -1,49 +0,0 @@
#!/bin/bash
#
# Library for managing versions strings
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Gets semantic version
# Arguments:
# $1 - version: string to extract major.minor.patch
# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch
# Returns:
# array with the major, minor and release
#########################
get_sematic_version () {
local version="${1:?version is required}"
local section="${2:?section is required}"
local -a version_sections
#Regex to parse versions: x.y.z
local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?'
if [[ "$version" =~ $regex ]]; then
local i=1
local j=1
local n=${#BASH_REMATCH[*]}
while [[ $i -lt $n ]]; do
if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then
version_sections[$j]=${BASH_REMATCH[$i]}
((j++))
fi
((i++))
done
local number_regex='^[0-9]+$'
if [[ "$section" =~ $number_regex ]] && (( section > 0 )) && (( section <= 3 )); then
echo "${version_sections[$section]}"
return
else
stderr_print "Section allowed values are: 1, 2, and 3"
return 1
fi
fi
}

View File

@ -1,458 +0,0 @@
#!/bin/bash
#
# Bitnami web server handler library
# shellcheck disable=SC1090,SC1091
# Load generic libraries
. /opt/bitnami/scripts/liblog.sh
########################
# Execute a command (or list of commands) with the web server environment and library loaded
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_execute() {
local -r web_server="${1:?missing web server}"
shift
# Run program in sub-shell to avoid web server environment getting loaded when not necessary
(
. "/opt/bitnami/scripts/lib${web_server}.sh"
. "/opt/bitnami/scripts/${web_server}-env.sh"
"$@"
)
}
########################
# Prints the list of enabled web servers
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_list() {
local -r -a supported_web_servers=(apache nginx)
local -a existing_web_servers=()
for web_server in "${supported_web_servers[@]}"; do
[[ -f "/opt/bitnami/scripts/${web_server}-env.sh" ]] && existing_web_servers+=("$web_server")
done
echo "${existing_web_servers[@]:-}"
}
########################
# Prints the currently-enabled web server type (only one, in order of preference)
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_type() {
local -a web_servers
read -r -a web_servers <<< "$(web_server_list)"
echo "${web_servers[0]:-}"
}
########################
# Validate that a supported web server is configured
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_validate() {
local error_code=0
local supported_web_servers=("apache" "nginx")
# Auxiliary functions
print_validation_error() {
error "$1"
error_code=1
}
if [[ -z "$(web_server_type)" || ! " ${supported_web_servers[*]} " == *" $(web_server_type) "* ]]; then
print_validation_error "Could not detect any supported web servers. It must be one of: ${supported_web_servers[*]}"
elif ! web_server_execute "$(web_server_type)" type -t "is_$(web_server_type)_running" >/dev/null; then
print_validation_error "Could not load the $(web_server_type) web server library from /opt/bitnami/scripts. Check that it exists and is readable."
fi
return "$error_code"
}
########################
# Check whether the web server is running
# Globals:
# *
# Arguments:
# None
# Returns:
# true if the web server is running, false otherwise
#########################
is_web_server_running() {
"is_$(web_server_type)_running"
}
########################
# Start web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_start() {
info "Starting $(web_server_type) in background"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/start.sh"
}
########################
# Stop web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_stop() {
info "Stopping $(web_server_type)"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/stop.sh"
}
########################
# Restart web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_restart() {
info "Restarting $(web_server_type)"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/restart.sh"
}
########################
# Reload web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_reload() {
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/reload.sh"
}
########################
# Ensure a web server application configuration exists (i.e. Apache virtual host format or NGINX server block)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --type - Application type, which has an effect on which configuration template to use
# --hosts - Host listen addresses
# --server-name - Server name
# --server-aliases - Server aliases
# --allow-remote-connections - Whether to allow remote connections or to require local connections
# --disable - Whether to render server configurations with a .disabled prefix
# --disable-http - Whether to render the app's HTTP server configuration with a .disabled prefix
# --disable-https - Whether to render the app's HTTPS server configuration with a .disabled prefix
# --http-port - HTTP port number
# --https-port - HTTPS port number
# --document-root - Path to document root directory
# Apache-specific flags:
# --apache-additional-configuration - Additional vhost configuration (no default)
# --apache-additional-http-configuration - Additional HTTP vhost configuration (no default)
# --apache-additional-https-configuration - Additional HTTPS vhost configuration (no default)
# --apache-before-vhost-configuration - Configuration to add before the <VirtualHost> directive (no default)
# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no' and type is not defined)
# --apache-extra-directory-configuration - Extra configuration for the document root directory
# --apache-proxy-address - Address where to proxy requests
# --apache-proxy-configuration - Extra configuration for the proxy
# --apache-proxy-http-configuration - Extra configuration for the proxy HTTP vhost
# --apache-proxy-https-configuration - Extra configuration for the proxy HTTPS vhost
# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup (only allowed when type is not defined)
# NGINX-specific flags:
# --nginx-additional-configuration - Additional server block configuration (no default)
# --nginx-external-configuration - Configuration external to server block (no default)
# Returns:
# true if the configuration was enabled, false otherwise
########################
ensure_web_server_app_configuration_exists() {
local app="${1:?missing app}"
shift
local -a apache_args nginx_args web_servers args_var
apache_args=("$app")
nginx_args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--disable \
| --disable-http \
| --disable-https \
)
apache_args+=("$1")
nginx_args+=("$1")
;;
--hosts \
| --server-name \
| --server-aliases \
| --type \
| --allow-remote-connections \
| --http-port \
| --https-port \
| --document-root \
)
apache_args+=("$1" "${2:?missing value}")
nginx_args+=("$1" "${2:?missing value}")
shift
;;
# Specific Apache flags
--apache-additional-configuration \
| --apache-additional-http-configuration \
| --apache-additional-https-configuration \
| --apache-before-vhost-configuration \
| --apache-allow-override \
| --apache-extra-directory-configuration \
| --apache-proxy-address \
| --apache-proxy-configuration \
| --apache-proxy-http-configuration \
| --apache-proxy-https-configuration \
| --apache-move-htaccess \
)
apache_args+=("${1//apache-/}" "${2:?missing value}")
shift
;;
# Specific NGINX flags
--nginx-additional-configuration \
| --nginx-external-configuration)
nginx_args+=("${1//nginx-/}" "${2:?missing value}")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
args_var="${web_server}_args[@]"
web_server_execute "$web_server" "ensure_${web_server}_app_configuration_exists" "${!args_var}"
done
}
########################
# Ensure a web server application configuration does not exist anymore (i.e. Apache virtual host format or NGINX server block)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Returns:
# true if the configuration was disabled, false otherwise
########################
ensure_web_server_app_configuration_not_exists() {
local app="${1:?missing app}"
local -a web_servers
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
web_server_execute "$web_server" "ensure_${web_server}_app_configuration_not_exists" "$app"
done
}
########################
# Ensure the web server loads the configuration for an application in a URL prefix
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --allow-remote-connections - Whether to allow remote connections or to require local connections
# --document-root - Path to document root directory
# --prefix - URL prefix from where it will be accessible (i.e. /myapp)
# --type - Application type, which has an effect on what configuration template will be used
# Apache-specific flags:
# --apache-additional-configuration - Additional vhost configuration (no default)
# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no')
# --apache-extra-directory-configuration - Extra configuration for the document root directory
# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup
# NGINX-specific flags:
# --nginx-additional-configuration - Additional server block configuration (no default)
# Returns:
# true if the configuration was enabled, false otherwise
########################
ensure_web_server_prefix_configuration_exists() {
local app="${1:?missing app}"
shift
local -a apache_args nginx_args web_servers args_var
apache_args=("$app")
nginx_args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--allow-remote-connections \
| --document-root \
| --prefix \
| --type \
)
apache_args+=("$1" "${2:?missing value}")
nginx_args+=("$1" "${2:?missing value}")
shift
;;
# Specific Apache flags
--apache-additional-configuration \
| --apache-allow-override \
| --apache-extra-directory-configuration \
| --apache-move-htaccess \
)
apache_args+=("${1//apache-/}" "$2")
shift
;;
# Specific NGINX flags
--nginx-additional-configuration)
nginx_args+=("${1//nginx-/}" "$2")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
args_var="${web_server}_args[@]"
web_server_execute "$web_server" "ensure_${web_server}_prefix_configuration_exists" "${!args_var}"
done
}
########################
# Ensure a web server application configuration is updated with the runtime configuration (i.e. ports)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --hosts - Host listen addresses
# --server-name - Server name
# --server-aliases - Server aliases
# --enable-http - Enable HTTP app configuration (if not enabled already)
# --enable-https - Enable HTTPS app configuration (if not enabled already)
# --disable-http - Disable HTTP app configuration (if not disabled already)
# --disable-https - Disable HTTPS app configuration (if not disabled already)
# --http-port - HTTP port number
# --https-port - HTTPS port number
# Returns:
# true if the configuration was updated, false otherwise
########################
web_server_update_app_configuration() {
local app="${1:?missing app}"
shift
local -a args web_servers
args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--enable-http \
| --enable-https \
| --disable-http \
| --disable-https \
)
args+=("$1")
;;
--hosts \
| --server-name \
| --server-aliases \
| --http-port \
| --https-port \
)
args+=("$1" "${2:?missing value}")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
web_server_execute "$web_server" "${web_server}_update_app_configuration" "${args[@]}"
done
}
########################
# Enable loading page, which shows users that the initialization process is not yet completed
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_enable_loading_page() {
ensure_web_server_app_configuration_exists "__loading" --hosts "_default_" \
--apache-additional-configuration "
# Show a HTTP 503 Service Unavailable page by default
RedirectMatch 503 ^/$
# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes
ErrorDocument 404 /index.html
ErrorDocument 503 /index.html" \
--nginx-additional-configuration "
# Show a HTTP 503 Service Unavailable page by default
location / {
return 503;
}
# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes
error_page 404 @installing;
error_page 503 @installing;
location @installing {
rewrite ^(.*)$ /index.html break;
}"
web_server_reload
}
########################
# Enable loading page, which shows users that the initialization process is not yet completed
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_disable_install_page() {
ensure_web_server_app_configuration_not_exists "__loading"
web_server_reload
}

View File

@ -1,24 +0,0 @@
#!/bin/sh
set -e
set -u
export DEBIAN_FRONTEND=noninteractive
n=0
max=2
until [ $n -gt $max ]; do
set +e
(
apt-get update -qq &&
apt-get install -y --no-install-recommends "$@"
)
CODE=$?
set -e
if [ $CODE -eq 0 ]; then
break
fi
if [ $n -eq $max ]; then
exit $CODE
fi
echo "apt failed, retrying"
n=$(($n + 1))
done
rm -r /var/lib/apt/lists /var/cache/apt/archives

View File

@ -1,17 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purposes
# Load libraries
. /opt/bitnami/scripts/libbitnami.sh
. /opt/bitnami/scripts/liblog.sh
print_welcome_page
echo ""
exec "$@"

View File

@ -1,24 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/libfile.sh
. /opt/bitnami/scripts/liblog.sh
#
# Java post-unpack operations
#
# Override default files in the Java security directory. This is used for
# custom base images (with custom CA certificates or block lists is used)
if ! is_dir_empty "$JAVA_EXTRA_SECURITY_DIR"; then
info "Adding custom CAs to the Java security folder"
cp -Lr "$JAVA_EXTRA_SECURITY_DIR" /opt/bitnami/java/lib/security
fi

View File

@ -1,647 +0,0 @@
#!/bin/bash
#
# Bitnami Solr library
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libfile.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libos.sh
. /opt/bitnami/scripts/libpersistence.sh
. /opt/bitnami/scripts/libservice.sh
. /opt/bitnami/scripts/libvalidations.sh
########################
# Create initial security.json
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_generate_initial_security() {
info "Generating initial security file"
cat >"${SOLR_BASE_DIR}/server/solr/security.json" <<EOF
{
"authentication":{
"blockUnknown": true,
"class":"solr.BasicAuthPlugin",
"credentials":{"${SOLR_ADMIN_USERNAME}":"IV0EHq1OnNrj6gvRCwvFwTrZ1+z1oBbnQdiVC3otuq0= Ndd7LKvVBAaZIF0QAVi1ekCfAJXr1GGfLtRUXhgrF8c="},
"forwardCredentials": false
},
"authorization":{
"class":"solr.RuleBasedAuthorizationPlugin",
"permissions":[{"name":"security-edit",
"role":"admin"}],
"user-role":{"${SOLR_ADMIN_USERNAME}":"admin"}
}}
EOF
if am_i_root; then
configure_permissions_ownership "${SOLR_BASE_DIR}/server/solr/security.json" -u "$SOLR_DAEMON_USER" -g "$SOLR_DAEMON_GROUP"
fi
}
########################
# Configure Solr Heap Size
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_set_heap_size() {
local heap_ms_size
local heap_mx_size
local machine_mem=""
debug "Calculating appropriate Xmx and Xms values..."
machine_mem="$(get_total_memory)"
if [[ "$machine_mem" -lt 512 ]]; then
heap_ms_size=256
heap_mx_size=256
elif [[ "$machine_mem" -lt 4096 ]]; then
heap_ms_size=256
heap_mx_size=$((machine_mem - 512))
else
heap_ms_size=512
heap_mx_size="$((machine_mem - 1024))"
fi
info "Setting '-Xms${heap_ms_size}m -Xmx${heap_mx_size}m' heap options..."
replace_in_file "$SOLR_BIN_DIR"/solr.in.sh ".*SOLR_JAVA_MEM=.*" "SOLR_JAVA_MEM=\"-Xms${heap_ms_size}m -Xmx${heap_mx_size}m\""
}
########################
# Validate settings in SOLR_* env. variables
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_validate() {
info "Validating settings in SOLR_* env vars..."
local error_code=0
# Auxiliary functions
print_validation_error() {
error "$1"
error_code=1
}
! is_yes_no_value "$SOLR_ENABLE_AUTHENTICATION" && print_validation_error "SOLR_ENABLE_AUTHENTICATION possible values are yes or no"
if is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION"; then
[[ -z "$SOLR_ADMIN_USERNAME" ]] && print_validation_error "You need to provide an username in SOLR_USERNAME"
[[ -z "$SOLR_ADMIN_PASSWORD" ]] && print_validation_error "You need to provide a password for the user: ${SOLR_ADMIN_USERNAME}"
fi
! is_yes_no_value "$SOLR_SSL_ENABLED" && print_validation_error "SOLR_SSL_ENABLED possible values are yes or no"
if is_boolean_yes "$SOLR_SSL_ENABLED"; then
[[ -z "$SOLR_SSL_KEY_STORE" ]] && print_validation_error "You need to provide a key store file in SOLR_SSL_KEY_STORE"
[[ -z "$SOLR_SSL_TRUST_STORE" ]] && print_validation_error "You need to provide a trust store file in SOLR_SSL_TRUST_STORE"
[[ -z "$SOLR_SSL_KEY_STORE_PASSWORD" ]] && print_validation_error "You need to provide a password in SOLR_SSL_KEY_STORE_PASSWORD"
[[ -z "$SOLR_SSL_TRUST_STORE_PASSWORD" ]] && print_validation_error "You need to provide a password file in SOLR_SSL_TRUST_STORE_PASSWORD"
fi
! is_yes_no_value "$SOLR_ENABLE_CLOUD_MODE" && print_validation_error "SOLR_ENABLE_CLOUD_MODE possible values are yes or no"
is_boolean_yes "$SOLR_ENABLE_CLOUD_MODE" && [[ -z "$SOLR_ZK_HOSTS" ]] && print_validation_error "You need to provide the Zookeper node list in SOLR_ZK_HOSTS"
! is_boolean_yes "$SOLR_CLOUD_BOOTSTRAP" && is_boolean_yes "$SOLR_ENABLE_CLOUD_MODE" && [[ -n "$SOLR_CORES" ]] && info "This node is not a boostrap node and will not create the collection"
! is_true_false_value "$SOLR_SSL_CHECK_PEER_NAME" && print_validation_error "SOLR_SSL_CHECK_PEER_NAME possible values are true or false"
[[ "$SOLR_NUMBER_OF_NODES" -lt $(("$SOLR_COLLECTION_REPLICAS" * "$SOLR_COLLECTION_SHARDS")) ]] && print_validation_error "Not enough nodes for the replicas and shards indicated"
[[ "$error_code" -eq 0 ]] || exit "$error_code"
}
########################
# Wait for solr root to exists in zookeeper
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_wait_for_zk_root() {
info "Waiting for solr root in zookeeper"
if ! retry_while solr_zk_root_exists; then
error "Failed to connect to the zookeeper"
exit 1
fi
}
########################
# Wait for Zookeeper to be up
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_wait_for_zookeeper() {
local host
local port
info "Waiting for Zookeeper to be up"
read -r -a zoo_nodes <<<"$(tr ',' ' ' <<<"${SOLR_ZK_HOSTS}")"
for zoo_node in "${zoo_nodes[@]}"; do
if [[ "$zoo_node" =~ (.*):([0-9]*) ]]; then
host="${BASH_REMATCH[1]}"
port="${BASH_REMATCH[2]}"
debug "Trying: $host:$port"
if ! retry_while "debug_execute nc -z ${host} ${port}" "$SOLR_ZK_MAX_RETRIES" "$SOLR_ZK_SLEEP_TIME"; then
error "Failed to connect to the zookeeper node at ${host}:${port}"
return 1
fi
fi
done
}
#########################
# Create SOLR core
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_create_cores() {
local -r exec="curl"
local command_args=("--silent" "--fail")
local protocol="http"
is_boolean_yes "$SOLR_SSL_ENABLED" && protocol="https" && command_args+=("-k")
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && command_args+=("--user" "${SOLR_ADMIN_USERNAME}:${SOLR_ADMIN_PASSWORD}")
read -r -a cores <<<"$(tr ',;' ' ' <<<"${SOLR_CORES}")"
info "Creating cores..."
for core in "${cores[@]}"; do
mkdir -p "${SOLR_SERVER_DIR}/solr/${core}/data"
mkdir -p "${SOLR_SERVER_DIR}/solr/${core}/conf"
cp -Lr "${SOLR_CORE_CONF_DIR}"/* "${SOLR_SERVER_DIR}/solr/${core}/conf/"
command_args+=("${protocol}://localhost:${SOLR_PORT_NUMBER}/solr/admin/cores?action=CREATE&name=${core}&instanceDir=${core}&dataDir=data")
info "Creating solr core: ${core}"
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
error "There was an error when creating the core"
exit 1
else
info "Core created"
fi
done
}
#########################
# Update user password
# Globals:
# SOLR_*
# Arguments:
# $1 - username
# $2 - password
# Returns:
# None
#########################
solr_update_password() {
local -r exec="curl"
local -r default_password="SolrRocks"
local -r username="${1:?user is required}"
local -r password="${2:?password is required}"
local protocol="http"
local command_args=()
is_boolean_yes "$SOLR_SSL_ENABLED" && protocol="https" && command_args+=("-k")
command_args+=("--silent" "--user" "${username}:${default_password}" "${protocol}://localhost:${SOLR_PORT_NUMBER}/api/cluster/security/authentication" "-H" "'Content-type:application/json'" "-d" "{\"set-user\":{\"${username}\":\"${password}\"}}")
info "Updating user password"
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
error "There was an error when updating the user password"
exit 1
else
info "Password updated"
fi
}
#########################
# Check if the API is ready
# Globals:
# SOLR_*
# Arguments:
# $1 - username
# $2 - password
# Returns:
# Boolean
#########################
solr_check_api() {
local -r exec="curl"
local -r username="${1:?user is required}"
local -r password="${2:?password is required}"
local protocol="http"
local command_args=()
debug "Checking if the API is ready"
is_boolean_yes "$SOLR_SSL_ENABLED" && protocol="https" && command_args+=("-k")
command_args+=("--silent" "--user" "${username}:${password}" "${protocol}://localhost:${SOLR_PORT_NUMBER}/api/" "-H" "'Content-type:application/json'")
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
return 1
fi
}
#########################
# Wait for api
# Globals:
# SOLR_*
# Arguments:
# $1 - username
# $2 - password
# Returns:
# None
#########################
solr_wait_for_api() {
local -r username="${1:?user is required}"
local -r password="${2:?password is required}"
info "Wait for Solr API"
if ! retry_while "solr_check_api ${username} ${password}"; then
error "Solr API not available"
exit 1
fi
}
#########################
# Create SOLR cloud user
# Globals:
# SOLR_*
# Arguments:
# $1 - username
# $2 - password
# Returns:
# None
#########################
solr_create_cloud_user() {
local -r exec="${SOLR_BIN_DIR}/solr"
local -r username="${1:?user is required}"
local -r password="${2:?password is required}"
local command_args=("auth" "enable" "-type" "basicAuth" "-credentials" "${username}:${password}" "-blockUnknown" "true" "-z" "$SOLR_ZK_HOSTS/solr")
info "Creating user: ${username}"
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
error "There was an error when creating the user"
exit 1
else
info "User created"
fi
}
#########################
# Create SOLR collection
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_create_collection() {
local -r exec="curl"
local command_args=("--silent")
local protocol="http"
info "Creating collection:${SOLR_COLLECTION} with ${SOLR_COLLECTION_REPLICAS} replicas and ${SOLR_COLLECTION_SHARDS} shards"
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && command_args+=("--user" "${SOLR_ADMIN_USERNAME}:${SOLR_ADMIN_PASSWORD}")
is_boolean_yes "$SOLR_SSL_ENABLED" && protocol="https" && command_args+=("-k")
command_args+=("${protocol}://localhost:${SOLR_PORT_NUMBER}/solr/admin/collections?action=CREATE&name=${SOLR_COLLECTION}&numShards=${SOLR_COLLECTION_SHARDS}&replicationFactor=${SOLR_COLLECTION_REPLICAS}")
#Check if the collection exists before creating it
if ! solr_collection_exists "$SOLR_COLLECTION"; then
# Will wait for other nodes to join before creating a collection with shards and/or replicas
if [[ "$SOLR_COLLECTION_REPLICAS" -gt 1 ]] || [[ "$SOLR_COLLECTION_SHARDS" -gt 1 ]]; then
info "Waiting for other nodes to be available"
if ! retry_while "solr_check_number_of_nodes ${SOLR_NUMBER_OF_NODES}" "$SOLR_ZK_MAX_RETRIES" "$SOLR_ZK_SLEEP_TIME"; then
error "There are not enough nodes to create the collection"
fi
fi
if ! debug_execute "$exec" "${command_args[@]}" >/dev/null; then
error "There was an error when creating the collection"
exit 1
else
info "Collection created"
fi
else
info "Skipping. Collection already exists."
fi
}
#########################
# Check if the root of solr exists in zookeeper
# Globals:
# SOLR_*
# Arguments:
# $1 - Collection name
# Returns:
# None
#########################
solr_zk_root_exists() {
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "ls" "/" "-z" "$SOLR_ZK_HOSTS")
debug "Checking if root of solr exists in zookeeper"
"$exec" "${command_args[@]}" 2>/dev/null | grep -q "solr"
}
#########################
# Check if a collection already exists
# Globals:
# SOLR_*
# Arguments:
# $1 - Collection name
# Returns:
# None
#########################
solr_collection_exists() {
local -r collection="${1:?collection is required}"
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "ls" "/solr/collections" "-z" "$SOLR_ZK_HOSTS")
debug "Checking if ${collection} exists"
"$exec" "${command_args[@]}" | grep -q "$collection"
}
########################
# Check the number of nodes in the cluster
# Arguments:
# $1 - expected number of nodes
# Returns:
# Boolean
########################
solr_check_number_of_nodes() {
local -r nodes="${1:-1}"
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "ls" "/solr/live_nodes" "-z" "$SOLR_ZK_HOSTS")
[[ $("$exec" "${command_args[@]}" | wc -l) -ge "$nodes" ]]
}
########################
# Check if zookeeper has been initialized
# Arguments:
# None
# Returns:
# Boolean
########################
solr_is_zk_initialized() {
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "ls" "/solr" "-z" "$SOLR_ZK_HOSTS")
info "Checking if solr has been initialized in zookeeper"
if ! debug_execute "$exec" "${command_args[@]}" 2>/dev/null; then
info "Zookeeper was not initialized."
return 1
else
info "Zookeeper was initialized."
return 0
fi
}
#########################
# Start solr in background
# Globals:
# SOLR_*
# Arguments:
# $1 - Mode: cloud or empty
# Returns:
# None
#########################
solr_start_bg() {
local -r mode="${1:-}"
local -r exec="${SOLR_BIN_DIR}/solr"
local start_args=("start" "-p" "${SOLR_PORT_NUMBER}" "-d" "server")
info "Starting solr in background"
if [[ "$mode" == "cloud" ]]; then
start_args+=("-cloud" "-z" "$SOLR_ZK_HOSTS/solr")
fi
# Do not start as root, to avoid solr error message
if am_i_root; then
debug_execute "gosu" "$SOLR_DAEMON_USER" "$exec" "${start_args[@]}"
else
debug_execute "$exec" "${start_args[@]}"
fi
}
#########################
# Stop SOLR
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_stop() {
info "Stopping solr"
stop_service_using_pid "$SOLR_PID_FILE"
}
########################
# Check if Solr is running
# Globals:
# SOLR_PID_FILE
# Arguments:
# None
# Returns:
# Whether Solr is running
########################
is_solr_running() {
local pid
pid="$(get_pid_from_file "$SOLR_PID_FILE")"
if [[ -n "$pid" ]]; then
is_service_running "$pid"
else
false
fi
}
########################
# Check if Solr is running
# Globals:
# SOLR_PID_FILE
# Arguments:
# None
# Returns:
# Whether Solr is not running
########################
is_solr_not_running() {
! is_solr_running
}
#########################
# Create root in zookeeper
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_zk_initialize() {
local -r exec="${SOLR_BIN_DIR}/solr"
local command_args=("zk" "mkroot" "/solr" "-z" "$SOLR_ZK_HOSTS")
if solr_is_zk_initialized; then
info "Zookeeper is already initialized"
else
info "Creating root in zookeeper"
debug_execute "$exec" "${command_args[@]}"
fi
}
#########################
# Set cluster properties in zookeeper
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_set_ssl_url_scheme() {
info "Initializing configuring Solr HTTPS in Zookeeper"
solr_wait_for_zk_root && "${SOLR_SERVER_DIR}/scripts/cloud-scripts/zkcli.sh" -zkhost "${SOLR_ZK_HOSTS}/solr" -cmd clusterprop -name urlScheme -val https
}
#########################
# Create root in zookeeper
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_migrate_old_data() {
local -r exec="mv"
local command_args=("${SOLR_VOLUME_DIR}/data" "${SOLR_VOLUME_DIR}/server/solr")
if am_i_root; then
warn "Persisted data detected in old location. Migrating and changing permissions"
ensure_dir_exists "${SOLR_VOLUME_DIR}/server"
debug_execute "$exec" "${command_args[@]}"
configure_permissions_ownership "${SOLR_VOLUME_DIR}/server/solr" -d 775 -f 664 -g "root"
warn "Data migrated."
else
error "Persisted data detected in old location. You will need to run first the container as root to migrate the data"
exit 1
fi
}
#########################
# Initialize SOLR
# Globals:
# SOLR_*
# Arguments:
# None
# Returns:
# None
#########################
solr_initialize() {
info "Initializing Solr ..."
# Check if there is persisted data from old version and migrate it
! is_dir_empty "${SOLR_VOLUME_DIR}/data" && [[ -f "$SOLR_VOLUME_DIR/.initialized" ]] && solr_migrate_old_data
is_boolean_yes "$SOLR_SSL_ENABLED" && export SOLR_SSL_ENABLED=true
# Check if Solr has already been initialized and persisted in a previous run
local -r app_name="solr"
if ! is_app_initialized "$app_name"; then
# Ensure the solr base directory exists and has proper permissions
info "Configuring file permissions for Solr"
ensure_dir_exists "$SOLR_VOLUME_DIR"
rm -f "$SOLR_PID_FILE"
if is_boolean_yes "$SOLR_ENABLE_CLOUD_MODE"; then
info "Deploying Solr Cloud from scratch"
if ! solr_wait_for_zookeeper; then
error "Zookeeper not detected"
exit 1
fi
if is_boolean_yes "$SOLR_CLOUD_BOOTSTRAP"; then
solr_zk_initialize
solr_start_bg "cloud"
solr_wait_for_api "admin" "SolrRocks"
is_boolean_yes "$SOLR_SSL_ENABLED" && solr_set_ssl_url_scheme
[[ -n "$SOLR_COLLECTION" ]] && solr_create_collection
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && solr_create_cloud_user "$SOLR_ADMIN_USERNAME" "$SOLR_ADMIN_PASSWORD"
solr_stop
else
if is_boolean_yes "$SOLR_SSL_ENABLED"; then
solr_set_ssl_url_scheme
else
solr_wait_for_zk_root
fi
fi
else
info "Deploying Solr from scratch"
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && solr_generate_initial_security
solr_start_bg
solr_wait_for_api "admin" "SolrRocks"
is_boolean_yes "$SOLR_ENABLE_AUTHENTICATION" && solr_update_password "$SOLR_ADMIN_USERNAME" "$SOLR_ADMIN_PASSWORD"
[[ -n "$SOLR_CORES" ]] && solr_create_cores
solr_stop
fi
info "Persisting Solr installation"
persist_app "$app_name" "$SOLR_DATA_TO_PERSIST"
else
info "Restoring persisted Solr installation"
# Compatibility with previous container images
if [[ "$(ls "$SOLR_VOLUME_DIR")" = "data" ]]; then
warn "The persisted data for this Solr installation is located at '${SOLR_VOLUME_DIR}/data' instead of '${SOLR_VOLUME_DIR}'"
warn "This is deprecated and support for this may be removed in a future release"
rm "${SOLR_BASE_DIR}/server/solr"
ln -s "${SOLR_VOLUME_DIR}/data" "${SOLR_BASE_DIR}/server/solr"
fi
restore_persisted_app "$app_name" "$SOLR_DATA_TO_PERSIST"
fi
}

View File

@ -1,104 +0,0 @@
#!/bin/bash
#
# Environment configuration for solr
# The values for all environment variables will be set in the below order of precedence
# 1. Custom environment variables defined below after Bitnami defaults
# 2. Constants defined in this file (environment variables with no default), i.e. BITNAMI_ROOT_DIR
# 3. Environment variables overridden via external files using *_FILE variables (see below)
# 4. Environment variables set externally (i.e. current Bash context/Dockerfile/userdata)
# Load logging library
# shellcheck disable=SC1090,SC1091
. /opt/bitnami/scripts/liblog.sh
export BITNAMI_ROOT_DIR="/opt/bitnami"
export BITNAMI_VOLUME_DIR="/bitnami"
# Logging configuration
export MODULE="${MODULE:-solr}"
export BITNAMI_DEBUG="${BITNAMI_DEBUG:-false}"
# By setting an environment variable matching *_FILE to a file path, the prefixed environment
# variable will be overridden with the value specified in that file
solr_env_vars=(
SOLR_ENABLE_CLOUD_MODE
SOLR_NUMBER_OF_NODES
SOLR_HOST
SOLR_JETTY_HOST
SOLR_HEAP
SOLR_JAVA_MEM
SOLR_PORT_NUMBER
SOLR_CORES
SOLR_COLLECTION
SOLR_COLLECTION_REPLICAS
SOLR_COLLECTION_SHARDS
SOLR_ENABLE_AUTHENTICATION
SOLR_ADMIN_USERNAME
SOLR_ADMIN_PASSWORD
SOLR_CLOUD_BOOTSTRAP
SOLR_CORE_CONF_DIR
SOLR_SSL_ENABLED
SOLR_SSL_CHECK_PEER_NAME
SOLR_ZK_MAX_RETRIES
SOLR_ZK_SLEEP_TIME
SOLR_COLLECTION
)
for env_var in "${solr_env_vars[@]}"; do
file_env_var="${env_var}_FILE"
if [[ -n "${!file_env_var:-}" ]]; then
if [[ -r "${!file_env_var:-}" ]]; then
export "${env_var}=$(< "${!file_env_var}")"
unset "${file_env_var}"
else
warn "Skipping export of '${env_var}'. '${!file_env_var:-}' is not readable."
fi
fi
done
unset solr_env_vars
# Paths
export BITNAMI_VOLUME_DIR="/bitnami"
export SOLR_BASE_DIR="${BITNAMI_ROOT_DIR}/solr"
export SOLR_JAVA_HOME="${BITNAMI_ROOT_DIR}/java"
export SOLR_BIN_DIR="${SOLR_BASE_DIR}/bin"
export SOLR_TMP_DIR="${SOLR_BASE_DIR}/tmp"
export SOLR_PID_DIR="${SOLR_BASE_DIR}/tmp"
export SOLR_LOGS_DIR="${SOLR_BASE_DIR}/logs"
export SOLR_SERVER_DIR="${SOLR_BASE_DIR}/server"
# Persistence
export SOLR_VOLUME_DIR="${BITNAMI_VOLUME_DIR}/solr"
export SOLR_DATA_TO_PERSIST="server/solr"
# Solr parameters
export SOLR_ENABLE_CLOUD_MODE="${SOLR_ENABLE_CLOUD_MODE:-no}"
export SOLR_NUMBER_OF_NODES="${SOLR_NUMBER_OF_NODES:-1}"
export SOLR_HOST="${SOLR_HOST:-}"
export SOLR_JETTY_HOST="${SOLR_JETTY_HOST:-0.0.0.0}"
export SOLR_HEAP="${SOLR_HEAP:-}"
export SOLR_JAVA_MEM="${SOLR_JAVA_MEM:--Xms512m -Xmx512m}"
export SOLR_PORT_NUMBER="${SOLR_PORT_NUMBER:-8983}"
export SOLR_PID_FILE="${SOLR_PID_DIR}/solr-${SOLR_PORT_NUMBER}.pid"
export SOLR_CORES="${SOLR_CORES:-}"
SOLR_COLLECTION="${SOLR_COLLECTION:-"${SOLR_COLLECTION:-}"}"
export SOLR_COLLECTION="${SOLR_COLLECTION:-}"
export SOLR_COLLECTION_REPLICAS="${SOLR_COLLECTION_REPLICAS:-1}"
export SOLR_COLLECTION_SHARDS="${SOLR_COLLECTION_SHARDS:-1}"
export SOLR_ENABLE_AUTHENTICATION="${SOLR_ENABLE_AUTHENTICATION:-no}"
export SOLR_ADMIN_USERNAME="${SOLR_ADMIN_USERNAME:-admin}"
export SOLR_ADMIN_PASSWORD="${SOLR_ADMIN_PASSWORD:-bitnami}"
export SOLR_CLOUD_BOOTSTRAP="${SOLR_CLOUD_BOOTSTRAP:-no}"
export SOLR_CORE_CONF_DIR="${SOLR_CORE_CONF_DIR:-${SOLR_SERVER_DIR}/solr/configsets/_default/conf}"
export SOLR_SSL_ENABLED="${SOLR_SSL_ENABLED:-no}"
export SOLR_SSL_CHECK_PEER_NAME="${SOLR_SSL_CHECK_PEER_NAME:-false}"
# System users (when running with a privileged user)
export SOLR_DAEMON_USER="solr"
export SOLR_DAEMON_GROUP="solr"
# Solr retries configuration
export SOLR_ZK_MAX_RETRIES="${SOLR_ZK_MAX_RETRIES:-5}"
export SOLR_ZK_SLEEP_TIME="${SOLR_ZK_SLEEP_TIME:-5}"
# Custom environment variables may be defined below

View File

@ -1,27 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
#set -o xtrace # Uncomment this line for debugging purposes
# Load libraries
. /opt/bitnami/scripts/libbitnami.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libsolr.sh
# Load solr environment variables
. /opt/bitnami/scripts/solr-env.sh
print_welcome_page
if [[ "$*" = *"/opt/bitnami/scripts/solr/run.sh"* ]]; then
info "** Starting solr setup **"
/opt/bitnami/scripts/solr/setup.sh
info "** solr setup finished! **"
fi
echo ""
exec "$@"

View File

@ -1,29 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1090,SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/libsolr.sh
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libos.sh
# Load solr environment variables
. /opt/bitnami/scripts/solr-env.sh
ensure_user_exists "$SOLR_DAEMON_USER" --group "$SOLR_DAEMON_GROUP"
for dir in "$SOLR_TMP_DIR" "$SOLR_VOLUME_DIR" "$SOLR_LOGS_DIR" "$SOLR_BASE_DIR"; do
ensure_dir_exists "$dir"
configure_permissions_ownership "$dir" -d "775" -u "$SOLR_DAEMON_USER" -g "root"
done
# Create basic solr configuration
replace_in_file "$SOLR_BIN_DIR"/solr.in.sh "#SOLR_JAVA_HOME=\"\"" "SOLR_JAVA_HOME=$SOLR_JAVA_HOME"
replace_in_file "$SOLR_BIN_DIR"/solr.in.sh "#SOLR_PID_DIR=" "SOLR_PID_DIR=$SOLR_PID_DIR"
replace_in_file "$SOLR_BIN_DIR"/solr.in.sh "#SOLR_LOGS_DIR=logs" "SOLR_LOGS_DIR=$SOLR_LOGS_DIR"

View File

@ -1,31 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libsolr.sh
. /opt/bitnami/scripts/libos.sh
# Load solr environment variables
. /opt/bitnami/scripts/solr-env.sh
info "** Starting solr **"
start_command=("${SOLR_BIN_DIR}/solr" "-p" "${SOLR_PORT_NUMBER}" "-d" "/opt/bitnami/solr/server" "-f")
if is_boolean_yes "$SOLR_ENABLE_CLOUD_MODE"; then
start_command+=("-cloud" "-z" "$SOLR_ZK_HOSTS/solr")
fi
is_boolean_yes "$SOLR_SSL_ENABLED" && export SOLR_SSL_ENABLED=true
if am_i_root; then
exec gosu "$SOLR_DAEMON_USER" "${start_command[@]}"
else
exec "${start_command[@]}"
fi

View File

@ -1,20 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/libsolr.sh
# Load solr environment variables
. /opt/bitnami/scripts/solr-env.sh
# Ensure solr environment variables are valid
solr_validate
# Ensure solr is initialized
solr_initialize