Remove */ol-7 if */debian-10 exists
This commit is contained in:
parent
2ca71f0dc3
commit
e32b5d5a7b
|
|
@ -1,32 +0,0 @@
|
|||
FROM oraclelinux:7-slim
|
||||
LABEL maintainer "Bitnami <containers@bitnami.com>"
|
||||
|
||||
ENV BITNAMI_PKG_CHMOD="-R g+rwX" \
|
||||
HOME="/" \
|
||||
OS_ARCH="x86_64" \
|
||||
OS_FLAVOUR="ol-7" \
|
||||
OS_NAME="linux"
|
||||
|
||||
COPY prebuildfs /
|
||||
# Install required system packages and dependencies
|
||||
RUN install_packages ca-certificates curl glibc gzip hostname libaio-devel procps-ng sudo tar which zlib
|
||||
RUN . ./libcomponent.sh && component_unpack "java" "11.0.6-0" --checksum 44f5ec63dbe8d2ea9aaea237cec2d8821ac3ee6fa2812c295c0fa04068d0e08e
|
||||
RUN . ./libcomponent.sh && component_unpack "elasticsearch" "6.8.6-2" --checksum 60aead94df15a17d3bf2f08a272ab08b0220b1584550ab9e900aead25fbd0bbf
|
||||
RUN yum upgrade -y && \
|
||||
rm -r /var/cache/yum
|
||||
RUN /build/install-gosu.sh
|
||||
RUN curl --silent -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_amd64 > /usr/local/bin/yq && echo 99a01ae32f0704773c72103adb7050ef5c5cad14b517a8612543821ef32d6cc9 /usr/local/bin/yq | sha256sum --check && chmod +x /usr/local/bin/yq && mkdir -p /opt/bitnami/licenses && curl --silent -L https://raw.githubusercontent.com/mikefarah/yq/master/LICENSE > /opt/bitnami/licenses/yq-2.4.0.txt
|
||||
|
||||
COPY rootfs /
|
||||
RUN /postunpack.sh
|
||||
ENV BITNAMI_APP_NAME="elasticsearch" \
|
||||
BITNAMI_IMAGE_VERSION="6.8.6-ol-7-r63" \
|
||||
LD_LIBRARY_PATH="/opt/bitnami/elasticsearch/jdk/lib:/opt/bitnami/elasticsearch/jdk/lib/server:$LD_LIBRARY_PATH" \
|
||||
NAMI_PREFIX="/.nami" \
|
||||
PATH="/opt/bitnami/java/bin:/opt/bitnami/elasticsearch/bin:$PATH"
|
||||
|
||||
EXPOSE 9200 9300
|
||||
|
||||
USER 1001
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
CMD [ "/run.sh" ]
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: 'bitnami/elasticsearch:6-ol-7'
|
||||
ports:
|
||||
- '9200:9200'
|
||||
- '9300:9300'
|
||||
volumes:
|
||||
- 'elasticsearch_data:/bitnami/elasticsearch/data'
|
||||
volumes:
|
||||
elasticsearch_data:
|
||||
driver: local
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
VERSION="1.11"
|
||||
SHA256="0b843df6d86e270c5b0f5cbd3c326a04e18f4b7f9b8457fa497b0454c4b138d7"
|
||||
|
||||
curl --silent -L "https://github.com/tianon/gosu/releases/download/${VERSION}/gosu-amd64" > "/usr/local/bin/gosu"
|
||||
echo "$SHA256" "/usr/local/bin/gosu" | sha256sum --check
|
||||
chmod u+x "/usr/local/bin/gosu"
|
||||
mkdir -p "/opt/bitnami/licenses"
|
||||
curl --silent -L "https://raw.githubusercontent.com/tianon/gosu/master/LICENSE" > "/opt/bitnami/licenses/gosu-${VERSION}.txt"
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Bitnami custom library
|
||||
|
||||
# Load Generic Libraries
|
||||
. /liblog.sh
|
||||
|
||||
# Constants
|
||||
BOLD='\033[1m'
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Print the welcome page
|
||||
# Globals:
|
||||
# DISABLE_WELCOME_MESSAGE
|
||||
# BITNAMI_APP_NAME
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
print_welcome_page() {
|
||||
if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then
|
||||
if [[ -n "$BITNAMI_APP_NAME" ]]; then
|
||||
print_image_welcome_page
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Print the welcome page for a Bitnami Docker image
|
||||
# Globals:
|
||||
# BITNAMI_APP_NAME
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
print_image_welcome_page() {
|
||||
local github_url="https://github.com/bitnami/bitnami-docker-${BITNAMI_APP_NAME}"
|
||||
|
||||
log ""
|
||||
log "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}"
|
||||
log "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}"
|
||||
log "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}"
|
||||
log "Send us your feedback at ${BOLD}containers@bitnami.com${RESET}"
|
||||
log ""
|
||||
}
|
||||
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing Bitnami components
|
||||
|
||||
# Constants
|
||||
CACHE_ROOT="/tmp/bitnami/pkg/cache"
|
||||
DOWNLOAD_URL="https://downloads.bitnami.com/files/stacksmith"
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Download and unpack a Bitnami package
|
||||
# Globals:
|
||||
# OS_NAME
|
||||
# OS_ARCH
|
||||
# OS_FLAVOUR
|
||||
# Arguments:
|
||||
# $1 - component's name
|
||||
# $2 - component's version
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
component_unpack() {
|
||||
local name="${1:?name is required}"
|
||||
local version="${2:?version is required}"
|
||||
local base_name="${name}-${version}-${OS_NAME}-${OS_ARCH}-${OS_FLAVOUR}"
|
||||
local package_sha256=""
|
||||
|
||||
# Validate arguments
|
||||
shift 2
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-c|--checksum)
|
||||
shift
|
||||
package_sha256="${1:?missing package checksum}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
echo "Downloading $base_name package"
|
||||
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz" ]; then
|
||||
echo "${CACHE_ROOT}/${base_name}.tar.gz already exists, skipping download."
|
||||
cp "${CACHE_ROOT}/${base_name}.tar.gz" .
|
||||
rm "${CACHE_ROOT}/${base_name}.tar.gz"
|
||||
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz.sha256" ]; then
|
||||
echo "Using the local sha256 from ${CACHE_ROOT}/${base_name}.tar.gz.sha256"
|
||||
package_sha256="$(< "${CACHE_ROOT}/${base_name}.tar.gz.sha256")"
|
||||
rm "${CACHE_ROOT}/${base_name}.tar.gz.sha256"
|
||||
fi
|
||||
else
|
||||
curl --remote-name --silent "${DOWNLOAD_URL}/${base_name}.tar.gz"
|
||||
fi
|
||||
if [ -n "$package_sha256" ]; then
|
||||
echo "Verifying package integrity"
|
||||
echo "$package_sha256 ${base_name}.tar.gz" | sha256sum --check -
|
||||
fi
|
||||
tar --directory /opt/bitnami --extract --gunzip --file "${base_name}.tar.gz" --no-same-owner --strip-components=2 "${base_name}/files/"
|
||||
rm "${base_name}.tar.gz"
|
||||
}
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing files
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Replace a regex in a file
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - substitute regex
|
||||
# $4 - use POSIX regex. Default: true
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
replace_in_file() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local substitute_regex="${3:?substitute regex is required}"
|
||||
local posix_regex=${4:-true}
|
||||
|
||||
local result
|
||||
|
||||
# We should avoid using 'sed in-place' substitutions
|
||||
# 1) They are not compatible with files mounted from ConfigMap(s)
|
||||
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
|
||||
if [[ $posix_regex = true ]]; then
|
||||
result="$(sed -E "s@$match_regex@$substitute_regex@g" "$filename")"
|
||||
else
|
||||
result="$(sed "s@$match_regex@$substitute_regex@g" "$filename")"
|
||||
fi
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
||||
########################
|
||||
# Remove a line in a file based on a regex
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - use POSIX regex. Default: true
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
remove_in_file() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local posix_regex=${3:-true}
|
||||
local result
|
||||
|
||||
# We should avoid using 'sed in-place' substitutions
|
||||
# 1) They are not compatible with files mounted from ConfigMap(s)
|
||||
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
|
||||
if [[ $posix_regex = true ]]; then
|
||||
result="$(sed -E "/$match_regex/d" "$filename")"
|
||||
else
|
||||
result="$(sed "/$match_regex/d" "$filename")"
|
||||
fi
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
|
@ -1,129 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for file system actions
|
||||
|
||||
# Load Generic Libraries
|
||||
. /liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Ensure a file/directory is owned (user and group) but the given user
|
||||
# Arguments:
|
||||
# $1 - filepath
|
||||
# $2 - owner
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
owned_by() {
|
||||
local path="${1:?path is missing}"
|
||||
local owner="${2:?owner is missing}"
|
||||
|
||||
chown "$owner":"$owner" "$path"
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure a directory exists and, optionally, is owned by the given user
|
||||
# Arguments:
|
||||
# $1 - directory
|
||||
# $2 - owner
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_dir_exists() {
|
||||
local dir="${1:?directory is missing}"
|
||||
local owner="${2:-}"
|
||||
|
||||
mkdir -p "${dir}"
|
||||
if [[ -n $owner ]]; then
|
||||
owned_by "$dir" "$owner"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Checks whether a directory is empty or not
|
||||
# Arguments:
|
||||
# $1 - directory
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_dir_empty() {
|
||||
local dir="${1:?missing directory}"
|
||||
|
||||
if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure permisions and ownership recursively
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - paths (as a string).
|
||||
# Flags:
|
||||
# -f|--file-mode - mode for directories.
|
||||
# -d|--dir-mode - mode for files.
|
||||
# -u|--user - user
|
||||
# -g|--group - group
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
configure_permissions_ownership() {
|
||||
local -r paths="${1:?paths is missing}"
|
||||
local dir_mode=""
|
||||
local file_mode=""
|
||||
local user=""
|
||||
local group=""
|
||||
|
||||
# Validate arguments
|
||||
shift 1
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-f|--file-mode)
|
||||
shift
|
||||
file_mode="${1:?missing mode for files}"
|
||||
;;
|
||||
-d|--dir-mode)
|
||||
shift
|
||||
dir_mode="${1:?missing mode for directories}"
|
||||
;;
|
||||
-u|--user)
|
||||
shift
|
||||
user="${1:?missing user}"
|
||||
;;
|
||||
-g|--group)
|
||||
shift
|
||||
group="${1:?missing group}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
read -r -a filepaths <<< "$paths"
|
||||
for p in "${filepaths[@]}"; do
|
||||
if [[ -e "$p" ]]; then
|
||||
if [[ -n $dir_mode ]]; then
|
||||
find -L "$p" -type d -exec chmod "$dir_mode" {} \;
|
||||
fi
|
||||
if [[ -n $file_mode ]]; then
|
||||
find -L "$p" -type f -exec chmod "$file_mode" {} \;
|
||||
fi
|
||||
if [[ -n $user ]] && [[ -n $group ]]; then
|
||||
chown -LR "$user":"$group" "$p"
|
||||
elif [[ -n $user ]] && [[ -z $group ]]; then
|
||||
chown -LR "$user" "$p"
|
||||
elif [[ -z $user ]] && [[ -n $group ]]; then
|
||||
chgrp -LR "$group" "$p"
|
||||
fi
|
||||
else
|
||||
stderr_print "$p does not exist"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for logging functions
|
||||
|
||||
# Constants
|
||||
RESET='\033[0m'
|
||||
RED='\033[38;5;1m'
|
||||
GREEN='\033[38;5;2m'
|
||||
YELLOW='\033[38;5;3m'
|
||||
MAGENTA='\033[38;5;5m'
|
||||
CYAN='\033[38;5;6m'
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Print to STDERR
|
||||
# Arguments:
|
||||
# Message to print
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
stderr_print() {
|
||||
printf "%b\\n" "${*}" >&2
|
||||
}
|
||||
|
||||
########################
|
||||
# Log message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
log() {
|
||||
stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}"
|
||||
}
|
||||
########################
|
||||
# Log an 'info' message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
info() {
|
||||
log "${GREEN}INFO ${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
warn() {
|
||||
log "${YELLOW}WARN ${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log an 'error' message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
error() {
|
||||
log "${RED}ERROR${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log a 'debug' message
|
||||
# Globals:
|
||||
# BITNAMI_DEBUG
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
debug() {
|
||||
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
|
||||
local -r bool="${BITNAMI_DEBUG:-false}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
|
||||
log "${MAGENTA}DEBUG${RESET} ==> ${*}"
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for network functions
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Resolve dns
|
||||
# Arguments:
|
||||
# $1 - Hostname to resolve
|
||||
# Returns:
|
||||
# IP
|
||||
#########################
|
||||
dns_lookup() {
|
||||
local host="${1:?host is missing}"
|
||||
getent ahosts "$host" | awk '/STREAM/ {print $1 }'
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine's IP
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Machine IP
|
||||
#########################
|
||||
get_machine_ip() {
|
||||
dns_lookup "$(hostname)"
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a resolved hostname
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_hostname_resolved() {
|
||||
local -r host="${1:?missing value}"
|
||||
if [[ -n "$(dns_lookup "$host")" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,132 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for operating system actions
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Check if an user exists in the system
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
user_exists() {
|
||||
local user="${1:?user is missing}"
|
||||
id "$user" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if a group exists in the system
|
||||
# Arguments:
|
||||
# $1 - group
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
group_exists() {
|
||||
local group="${1:?group is missing}"
|
||||
getent group "$group" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Create a group in the system if it does not exist already
|
||||
# Arguments:
|
||||
# $1 - group
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_group_exists() {
|
||||
local group="${1:?group is missing}"
|
||||
|
||||
if ! group_exists "$group"; then
|
||||
groupadd "$group" >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Create an user in the system if it does not exist already
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# $2 - group
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_user_exists() {
|
||||
local user="${1:?user is missing}"
|
||||
local group="${2:-}"
|
||||
|
||||
if ! user_exists "$user"; then
|
||||
useradd "$user" >/dev/null 2>&1
|
||||
if [[ -n "$group" ]]; then
|
||||
ensure_group_exists "$group"
|
||||
usermod -a -G "$group" "$user" >/dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the script is currently running as root
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# $2 - group
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
am_i_root() {
|
||||
if [[ "$(id -u)" = "0" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Get total memory available
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Memory in bytes
|
||||
#########################
|
||||
get_total_memory() {
|
||||
echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024))
|
||||
}
|
||||
|
||||
#########################
|
||||
# Redirects output to /dev/null if debug mode is disabled
|
||||
# Globals:
|
||||
# BITNAMI_DEBUG
|
||||
# Arguments:
|
||||
# $@ - Command to execute
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
debug_execute() {
|
||||
if ${BITNAMI_DEBUG:-false}; then
|
||||
"$@"
|
||||
else
|
||||
"$@" >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Retries a command a given number of times
|
||||
# Arguments:
|
||||
# $1 - cmd (as a string)
|
||||
# $2 - max retries. Default: 12
|
||||
# $3 - sleep between retries (in seconds). Default: 5
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
retry_while() {
|
||||
local -r cmd="${1:?cmd is missing}"
|
||||
local -r retries="${2:-12}"
|
||||
local -r sleep_time="${3:-5}"
|
||||
local return_value=1
|
||||
|
||||
read -r -a command <<< "$cmd"
|
||||
for ((i = 1 ; i <= retries ; i+=1 )); do
|
||||
"${command[@]}" && return_value=0 && break
|
||||
sleep "$sleep_time"
|
||||
done
|
||||
return $return_value
|
||||
}
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing services
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Read the provided pid file and returns a PID
|
||||
# Arguments:
|
||||
# $1 - Pid file
|
||||
# Returns:
|
||||
# PID
|
||||
#########################
|
||||
get_pid_from_file() {
|
||||
local pid_file="${1:?pid file is missing}"
|
||||
|
||||
if [[ -f "$pid_file" ]]; then
|
||||
if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then
|
||||
echo "$(< "$pid_file")"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if a provided PID corresponds to a running service
|
||||
# Arguments:
|
||||
# $1 - PID
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_service_running() {
|
||||
local pid="${1:?pid is missing}"
|
||||
|
||||
kill -0 "$pid" 2>/dev/null
|
||||
}
|
||||
|
||||
########################
|
||||
# Stop a service by sending a termination signal to its pid
|
||||
# Arguments:
|
||||
# $1 - Pid file
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
stop_service_using_pid() {
|
||||
local pid_file="${1:?pid file is missing}"
|
||||
local pid
|
||||
|
||||
pid="$(get_pid_from_file "$pid_file")"
|
||||
[[ -z "$pid" ]] || ! is_service_running "$pid" && return
|
||||
|
||||
kill "$pid"
|
||||
local counter=10
|
||||
while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do
|
||||
sleep 1
|
||||
counter=$((counter - 1))
|
||||
done
|
||||
}
|
||||
|
|
@ -1,246 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Validation functions library
|
||||
|
||||
# Load Generic Libraries
|
||||
. /liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Check if the provided argument is an integer
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_int() {
|
||||
local -r int="${1:?missing value}"
|
||||
if [[ "$int" =~ ^-?[0-9]+ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a positive integer
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_positive_int() {
|
||||
local -r int="${1:?missing value}"
|
||||
if is_int "$int" && (( "${int}" >= 0 )); then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean or is the string 'yes/true'
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_boolean_yes() {
|
||||
local -r bool="${1:-}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean yes/no value
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_yes_no_value() {
|
||||
local -r bool="${1:-}"
|
||||
if [[ "$bool" =~ ^(yes|no)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean true/false value
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_true_false_value() {
|
||||
local -r bool="${1:-}"
|
||||
if [[ "$bool" =~ ^(true|false)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is an empty string or not defined
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_empty_value() {
|
||||
local -r val="${1:-}"
|
||||
if [[ -z "$val" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate if the provided argument is a valid port
|
||||
# Arguments:
|
||||
# $1 - Port to validate
|
||||
# Returns:
|
||||
# Boolean and error message
|
||||
#########################
|
||||
validate_port() {
|
||||
local value
|
||||
local unprivileged=0
|
||||
|
||||
# Parse flags
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-unprivileged)
|
||||
unprivileged=1
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
stderr_print "unrecognized flag $1"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [[ "$#" -gt 1 ]]; then
|
||||
echo "too many arguments provided"
|
||||
return 2
|
||||
elif [[ "$#" -eq 0 ]]; then
|
||||
stderr_print "missing port argument"
|
||||
return 1
|
||||
else
|
||||
value=$1
|
||||
fi
|
||||
|
||||
if [[ -z "$value" ]]; then
|
||||
echo "the value is empty"
|
||||
return 1
|
||||
else
|
||||
if ! is_int "$value"; then
|
||||
echo "value is not an integer"
|
||||
return 2
|
||||
elif [[ "$value" -lt 0 ]]; then
|
||||
echo "negative value provided"
|
||||
return 2
|
||||
elif [[ "$value" -gt 65535 ]]; then
|
||||
echo "requested port is greater than 65535"
|
||||
return 2
|
||||
elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then
|
||||
echo "privileged port requested"
|
||||
return 3
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate if the provided argument is a valid IPv4 address
|
||||
# Arguments:
|
||||
# $1 - IP to validate
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
validate_ipv4() {
|
||||
local ip="${1:?ip is missing}"
|
||||
local stat=1
|
||||
|
||||
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")"
|
||||
[[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \
|
||||
&& ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]]
|
||||
stat=$?
|
||||
fi
|
||||
return $stat
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate a string format
|
||||
# Arguments:
|
||||
# $1 - String to validate
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
validate_string() {
|
||||
local string
|
||||
local min_length=-1
|
||||
local max_length=-1
|
||||
|
||||
# Parse flags
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-min-length)
|
||||
shift
|
||||
min_length=${1:-}
|
||||
;;
|
||||
-max-length)
|
||||
shift
|
||||
max_length=${1:-}
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
stderr_print "unrecognized flag $1"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$#" -gt 1 ]; then
|
||||
stderr_print "too many arguments provided"
|
||||
return 2
|
||||
elif [ "$#" -eq 0 ]; then
|
||||
stderr_print "missing string"
|
||||
return 1
|
||||
else
|
||||
string=$1
|
||||
fi
|
||||
|
||||
if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then
|
||||
echo "string length is less than $min_length"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then
|
||||
echo "string length is great than $max_length"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing versions strings
|
||||
|
||||
# Load Generic Libraries
|
||||
. ./liblog.sh
|
||||
|
||||
# Functions
|
||||
########################
|
||||
# Gets semantic version
|
||||
# Arguments:
|
||||
# $1 - version: string to extract major.minor.patch
|
||||
# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch
|
||||
# Returns:
|
||||
# array with the major, minor and release
|
||||
#########################
|
||||
get_sematic_version () {
|
||||
local version="${1:?version is required}"
|
||||
local section="${2:?section is required}"
|
||||
local -a version_sections
|
||||
|
||||
#Regex to parse versions: x.y.z
|
||||
local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?'
|
||||
|
||||
if [[ "$version" =~ $regex ]]; then
|
||||
local i=1
|
||||
local j=1
|
||||
local n=${#BASH_REMATCH[*]}
|
||||
|
||||
while [[ $i -lt $n ]]; do
|
||||
if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then
|
||||
version_sections[$j]=${BASH_REMATCH[$i]}
|
||||
((j++))
|
||||
fi
|
||||
((i++))
|
||||
done
|
||||
|
||||
local number_regex='^[0-9]+$'
|
||||
if [[ "$section" =~ $number_regex ]] && (( $section > 0 )) && (( $section <= 3 )); then
|
||||
echo "${version_sections[$section]}"
|
||||
return
|
||||
else
|
||||
stderr_print "Section allowed values are: 1, 2, and 3"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
if [[ -n "oracle-epel-release-el7" ]]; then
|
||||
if ! yum list installed oracle-epel-release-el7 >/dev/null 2>&1; then
|
||||
yum -y install oracle-epel-release-el7 >/dev/null 2>&1
|
||||
CODE=$?
|
||||
if (( $CODE != 0 )); then
|
||||
echo "EPEL repository installation failed"
|
||||
exit $CODE
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
max=2
|
||||
for ((n = 1 ; n <= max ; n+=1 )); do
|
||||
set +e
|
||||
yum --enablerepo base,updates,ol7_developer_EPEL,ol7_optional_latest install -y "$@"
|
||||
CODE=$?
|
||||
set -e
|
||||
if (( $CODE == 0 )); then
|
||||
break
|
||||
fi
|
||||
if (( $n == $max )); then
|
||||
exit $CODE
|
||||
fi
|
||||
echo "yum failed, retrying"
|
||||
done
|
||||
rm -r /var/cache/yum
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
#set -o xtrace
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /libbitnami.sh
|
||||
. /libelasticsearch.sh
|
||||
|
||||
# Load Elasticsearch environment variables
|
||||
eval "$(elasticsearch_env)"
|
||||
|
||||
print_welcome_page
|
||||
|
||||
if [[ "$*" = "/run.sh" ]]; then
|
||||
info "** Starting Elasticsearch setup **"
|
||||
/setup.sh
|
||||
info "** Elasticsearch setup finished! **"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
exec "$@"
|
||||
|
|
@ -1,469 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Bitnami Elasticsearch library
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /libfile.sh
|
||||
. /liblog.sh
|
||||
. /libnet.sh
|
||||
. /libos.sh
|
||||
. /libservice.sh
|
||||
. /libvalidations.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Write a configuration setting value
|
||||
# Globals:
|
||||
# ELASTICSEARCH_CONF_FILE
|
||||
# Arguments:
|
||||
# $1 - key
|
||||
# $2 - value
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_conf_write() {
|
||||
local key="${1:?missing key}"
|
||||
local value="${2:?missing value}"
|
||||
|
||||
if [[ -s "$ELASTICSEARCH_CONF_FILE" ]]; then
|
||||
yq w -i "$ELASTICSEARCH_CONF_FILE" "$key" "$value"
|
||||
else
|
||||
yq n "$key" "$value" > "$ELASTICSEARCH_CONF_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Set a configuration setting value
|
||||
# Globals:
|
||||
# ELASTICSEARCH_CONF_FILE
|
||||
# Arguments:
|
||||
# $1 - key
|
||||
# $2 - values (array)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_conf_set() {
|
||||
local key="${1:?missing key}"
|
||||
shift
|
||||
local values=("${@}")
|
||||
|
||||
if [[ "${#values[@]}" -eq 0 ]]; then
|
||||
stderr_print "missing values"
|
||||
return 1
|
||||
elif [[ "${#values[@]}" -eq 1 ]] && [[ -n "${values[0]}" ]]; then
|
||||
elasticsearch_conf_write "$key" "${values[0]}"
|
||||
else
|
||||
for i in "${!values[@]}"; do
|
||||
if [[ -n "${values[$i]}" ]]; then
|
||||
elasticsearch_conf_write "$key" "${values[$i]}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if Elasticsearch is running
|
||||
# Globals:
|
||||
# ELASTICSEARCH_TMPDIR
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_elasticsearch_running() {
|
||||
local pid
|
||||
pid="$(get_pid_from_file "${ELASTICSEARCH_TMPDIR}/elasticsearch.pid")"
|
||||
|
||||
if [[ -z "$pid" ]]; then
|
||||
false
|
||||
else
|
||||
is_service_running "$pid"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Stop Elasticsearch
|
||||
# Globals:
|
||||
# ELASTICSEARCH_TMPDIR
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_stop() {
|
||||
! is_elasticsearch_running && return
|
||||
debug "Stopping Elasticsearch..."
|
||||
stop_service_using_pid "$ELASTICSEARCH_TMPDIR/elasticsearch.pid"
|
||||
}
|
||||
|
||||
########################
|
||||
# Start Elasticsearch and wait until it's ready
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_start() {
|
||||
is_elasticsearch_running && return
|
||||
|
||||
debug "Starting Elasticsearch..."
|
||||
local command=("${ELASTICSEARCH_BASEDIR}/bin/elasticsearch" "-d" "-p" "${ELASTICSEARCH_TMPDIR}/elasticsearch.pid" "-Epath.data=$ELASTICSEARCH_DATADIR")
|
||||
am_i_root && command=("gosu" "$ELASTICSEARCH_DAEMON_USER" "${command[@]}")
|
||||
if [[ "$BITNAMI_DEBUG" = true ]]; then
|
||||
"${command[@]}" &
|
||||
else
|
||||
"${command[@]}" >/dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
local counter=50
|
||||
while ! is_elasticsearch_running ; do
|
||||
if [[ "$counter" -ne 0 ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 2;
|
||||
counter=$((counter - 1))
|
||||
done
|
||||
local log_result=""
|
||||
local log_counter=30
|
||||
while [[ -z "$log_result" ]] && [[ "$log_counter" -ne 0 ]]; do
|
||||
log_counter=$(("$log_counter" - 1))
|
||||
log_result="$(tail -7 "${ELASTICSEARCH_LOGDIR}/elasticsearch.log" | grep -i "Node" | grep -i "started")"
|
||||
sleep 2
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Load global variables used on Elasticsearch configuration
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Series of exports to be used as 'eval' arguments
|
||||
#########################
|
||||
elasticsearch_env() {
|
||||
cat <<"EOF"
|
||||
export ELASTICSEARCH_BASEDIR="/opt/bitnami/elasticsearch"
|
||||
export ELASTICSEARCH_DATADIR="/bitnami/elasticsearch/data"
|
||||
export ELASTICSEARCH_CONFDIR="${ELASTICSEARCH_BASEDIR}/config"
|
||||
export ELASTICSEARCH_CONF_FILE="${ELASTICSEARCH_CONFDIR}/elasticsearch.yml"
|
||||
export ELASTICSEARCH_TMPDIR="${ELASTICSEARCH_BASEDIR}/tmp"
|
||||
export ELASTICSEARCH_LOGDIR="${ELASTICSEARCH_BASEDIR}/logs"
|
||||
export PATH="${ELASTICSEARCH_BASEDIR}/bin:$PATH"
|
||||
export ELASTICSEARCH_DAEMON_USER="${ELASTICSEARCH_DAEMON_USER:-elasticsearch}"
|
||||
export ELASTICSEARCH_DAEMON_GROUP="${ELASTICSEARCH_DAEMON_GROUP:-elasticsearch}"
|
||||
export ELASTICSEARCH_BIND_ADDRESS="${ELASTICSEARCH_BIND_ADDRESS:-}"
|
||||
export ELASTICSEARCH_CLUSTER_HOSTS="${ELASTICSEARCH_CLUSTER_HOSTS:-}"
|
||||
export ELASTICSEARCH_CLUSTER_MASTER_HOSTS="${ELASTICSEARCH_CLUSTER_MASTER_HOSTS:-}"
|
||||
export ELASTICSEARCH_CLUSTER_NAME="${ELASTICSEARCH_CLUSTER_NAME:-}"
|
||||
export ELASTICSEARCH_HEAP_SIZE="${ELASTICSEARCH_HEAP_SIZE:-1024m}"
|
||||
export ELASTICSEARCH_IS_DEDICATED_NODE="${ELASTICSEARCH_IS_DEDICATED_NODE:-no}"
|
||||
export ELASTICSEARCH_MINIMUM_MASTER_NODES="${ELASTICSEARCH_MINIMUM_MASTER_NODES:-}"
|
||||
export ELASTICSEARCH_NODE_NAME="${ELASTICSEARCH_NODE_NAME:-}"
|
||||
export ELASTICSEARCH_NODE_PORT_NUMBER="${ELASTICSEARCH_NODE_PORT_NUMBER:-9300}"
|
||||
export ELASTICSEARCH_NODE_TYPE="${ELASTICSEARCH_NODE_TYPE:-master}"
|
||||
export ELASTICSEARCH_PLUGINS="${ELASTICSEARCH_PLUGINS:-}"
|
||||
export ELASTICSEARCH_PORT_NUMBER="${ELASTICSEARCH_PORT_NUMBER:-9200}"
|
||||
## JVM
|
||||
export JAVA_HOME="${JAVA_HOME:-/opt/bitnami/java}"
|
||||
EOF
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate kernel settings
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_validate_kernel() {
|
||||
# Auxiliary functions
|
||||
validate_sysctl_key() {
|
||||
local key="${1:?key is missing}"
|
||||
local value="${2:?value is missing}"
|
||||
local current_value
|
||||
current_value="$(sysctl -n "$key")"
|
||||
if [[ "$current_value" -lt "$value" ]]; then
|
||||
error "Invalid kernel settings. Elasticsearch requires at least: $key = $value"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
debug "Validating Kernel settings..."
|
||||
validate_sysctl_key "vm.max_map_count" 262144
|
||||
validate_sysctl_key "fs.file-max" 65536
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate settings in ELASTICSEARCH_* env vars
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_validate() {
|
||||
local error_code=0
|
||||
|
||||
# Auxiliary functions
|
||||
print_validation_error() {
|
||||
error "$1"
|
||||
error_code=1
|
||||
}
|
||||
|
||||
validate_node_type() {
|
||||
case "$ELASTICSEARCH_NODE_TYPE" in
|
||||
coordinating|data|ingest|master)
|
||||
;;
|
||||
*)
|
||||
print_validation_error "Invalid node type $ELASTICSEARCH_NODE_TYPE. Supported types are 'coordinating/data/ingest/master'"
|
||||
esac
|
||||
}
|
||||
|
||||
debug "Validating settings in ELASTICSEARCH_* env vars..."
|
||||
local validate_port_args=()
|
||||
! am_i_root && validate_port_args+=("-unprivileged")
|
||||
for var in "ELASTICSEARCH_PORT_NUMBER" "ELASTICSEARCH_NODE_PORT_NUMBER"; do
|
||||
if ! err=$(validate_port "${validate_port_args[@]}" "${!var}"); then
|
||||
print_validation_error "An invalid port was specified in the environment variable $var: $err"
|
||||
fi
|
||||
done
|
||||
is_boolean_yes "$ELASTICSEARCH_IS_DEDICATED_NODE" && validate_node_type
|
||||
if [[ -n "$ELASTICSEARCH_BIND_ADDRESS" ]] && ! validate_ipv4 "$ELASTICSEARCH_BIND_ADDRESS"; then
|
||||
print_validation_error "The Bind Address specified in the environment variable ELASTICSEARCH_BIND_ADDRESS is not a valid IPv4"
|
||||
fi
|
||||
|
||||
[[ "$error_code" -eq 0 ]] || exit "$error_code"
|
||||
}
|
||||
|
||||
# Bash use floor by default. You can use it to get ceil.
|
||||
# ceil( a/b ) = floor( (a+b-1)/b )
|
||||
ceiling45() {
|
||||
local num=$(($1*4))
|
||||
local div=5
|
||||
echo $(( (num + div - 1) / div ))
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure Elasticsearch cluster settings
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_cluster_configuration() {
|
||||
# Auxiliary functions
|
||||
bind_address() {
|
||||
if [[ -n "$ELASTICSEARCH_BIND_ADDRESS" ]]; then
|
||||
echo "[$ELASTICSEARCH_BIND_ADDRESS, _local_]"
|
||||
else
|
||||
echo "0.0.0.0"
|
||||
fi
|
||||
}
|
||||
|
||||
info "Configuring Elasticsearch cluster settings..."
|
||||
elasticsearch_conf_set network.host "$(get_machine_ip)"
|
||||
elasticsearch_conf_set network.publish_host "$(get_machine_ip)"
|
||||
elasticsearch_conf_set network.bind_host "$(bind_address)"
|
||||
elasticsearch_conf_set cluster.name "$ELASTICSEARCH_CLUSTER_NAME"
|
||||
elasticsearch_conf_set node.name "${ELASTICSEARCH_NODE_NAME:-$(hostname)}"
|
||||
if [[ -n "$ELASTICSEARCH_CLUSTER_HOSTS" ]]; then
|
||||
read -r -a host_list <<< "$(tr ',;' ' ' <<< "$ELASTICSEARCH_CLUSTER_HOSTS")"
|
||||
master_list=( "${host_list[@]}" )
|
||||
if [[ -n "$ELASTICSEARCH_CLUSTER_MASTER_HOSTS" ]]; then
|
||||
read -r -a master_list <<< "$(tr ',;' ' ' <<< "$ELASTICSEARCH_CLUSTER_MASTER_HOSTS")"
|
||||
fi
|
||||
ELASTICSEARCH_MAJOR_VERSION=$(elasticsearch --version | grep Version: | awk -F "," '{print $1}' | awk -F ":" '{print $2}' | awk -F "." '{print $1}')
|
||||
if [[ "$ELASTICSEARCH_MAJOR_VERSION" -le 6 ]]; then
|
||||
elasticsearch_conf_set discovery.zen.ping.unicast.hosts "${host_list[@]}"
|
||||
else
|
||||
elasticsearch_conf_set discovery.seed_hosts "${host_list[@]}"
|
||||
fi
|
||||
elasticsearch_conf_set discovery.initial_state_timeout "5m"
|
||||
elasticsearch_conf_set gateway.recover_after_nodes "$(ceiling45 "${#host_list[@]}")"
|
||||
elasticsearch_conf_set gateway.expected_nodes "${#host_list[@]}"
|
||||
if [[ "$ELASTICSEARCH_NODE_TYPE" = "master" ]] && [[ "$ELASTICSEARCH_MAJOR_VERSION" -gt 6 ]]; then
|
||||
elasticsearch_conf_set cluster.initial_master_nodes "${master_list[@]}"
|
||||
fi
|
||||
if [[ -n "$ELASTICSEARCH_MINIMUM_MASTER_NODES" ]]; then
|
||||
debug "Setting minimum master nodes for quorum to $ELASTICSEARCH_MINIMUM_MASTER_NODES..."
|
||||
elasticsearch_conf_set discovery.zen.minimum_master_nodes "$ELASTICSEARCH_MINIMUM_MASTER_NODES"
|
||||
elif [[ "${#host_list[@]}" -gt 2 ]]; then
|
||||
local min_masters=""
|
||||
min_masters=$(((${#host_list[@]} / 2) +1))
|
||||
debug "Calculating minimum master nodes for quorum: $min_masters..."
|
||||
elasticsearch_conf_set discovery.zen.minimum_master_nodes "$min_masters"
|
||||
fi
|
||||
else
|
||||
elasticsearch_conf_set "discovery.type" "single-node"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure Elasticsearch node type
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_configure_node_type() {
|
||||
local is_master="false"
|
||||
local is_data="false"
|
||||
local is_ingest="false"
|
||||
if is_boolean_yes "$ELASTICSEARCH_IS_DEDICATED_NODE"; then
|
||||
case "$ELASTICSEARCH_NODE_TYPE" in
|
||||
coordinating)
|
||||
;;
|
||||
data)
|
||||
is_data="true"
|
||||
;;
|
||||
ingest)
|
||||
is_ingest="true"
|
||||
;;
|
||||
master)
|
||||
is_master="true"
|
||||
;;
|
||||
*)
|
||||
error "Invalid node type '$ELASTICSEARCH_NODE_TYPE'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
else
|
||||
is_master="true"
|
||||
is_data="true"
|
||||
fi
|
||||
debug "Configure Elasticsearch Node type..."
|
||||
elasticsearch_conf_set node.master "$is_master"
|
||||
elasticsearch_conf_set node.data "$is_data"
|
||||
elasticsearch_conf_set node.ingest "$is_ingest"
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure Elasticsearch Heap Size
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_set_heap_size() {
|
||||
local heap_size
|
||||
if [[ -n "$ELASTICSEARCH_HEAP_SIZE" ]]; then
|
||||
debug "Using specified values for Xmx and Xms heap options..."
|
||||
heap_size="$ELASTICSEARCH_HEAP_SIZE"
|
||||
else
|
||||
debug "Calculating appropiate Xmx and Xms values..."
|
||||
local machine_mem=""
|
||||
machine_mem="$(get_total_memory)"
|
||||
if [[ "$machine_mem" -lt 65536 ]]; then
|
||||
heap_size="$(("$machine_mem" / 2))m"
|
||||
else
|
||||
heap_size=32768m
|
||||
fi
|
||||
fi
|
||||
debug "Setting '-Xmx${heap_size} -Xms${heap_size}' heap options..."
|
||||
replace_in_file "${ELASTICSEARCH_CONFDIR}/jvm.options" "-Xmx[0-9]+[mg]+" "-Xmx${heap_size}"
|
||||
replace_in_file "${ELASTICSEARCH_CONFDIR}/jvm.options" "-Xms[0-9]+[mg]+" "-Xms${heap_size}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Migrate old Elasticsearch data
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
migrate_old_data() {
|
||||
warn "Persisted data follows old structure. Migrating to new one..."
|
||||
warn "Custom configuration files won't be persisted any longer!"
|
||||
local old_data_dir="${ELASTICSEARCH_DATADIR}/elasticsearch"
|
||||
local old_custom_conf_file="${old_data_dir}/conf/elasticsearch_custom.yml"
|
||||
local custom_conf_file="${ELASTICSEARCH_CONFDIR}/elasticsearch_custom.yml"
|
||||
if [[ -f "$old_custom_conf_file" ]]; then
|
||||
debug "Adding old custom configuration to user configuration"
|
||||
echo "" >> "$custom_conf_file"
|
||||
cat "$old_custom_conf_file" >> "$custom_conf_file"
|
||||
fi
|
||||
debug "Adapting data to new file structure"
|
||||
find "${old_data_dir}/data" -maxdepth 1 -mindepth 1 -exec mv {} "$ELASTICSEARCH_DATADIR" \;
|
||||
debug "Removing data that is not persisted anymore from persisted directory"
|
||||
rm -rf "$old_data_dir" "${ELASTICSEARCH_DATADIR}/java"
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure/initialize Elasticsearch
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_initialize() {
|
||||
info "Configuring/Initializing Elasticsearch..."
|
||||
|
||||
# This fixes an issue where the trap would kill the entrypoint.sh, if a PID was left over from a previous run
|
||||
# Exec replaces the process without creating a new one, and when the container is restarted it may have the same PID
|
||||
rm -f "$ELASTICSEARCH_TMPDIR/elasticsearch.pid"
|
||||
|
||||
# Persisted data from old versions
|
||||
if ! is_dir_empty "$ELASTICSEARCH_DATADIR"; then
|
||||
debug "Detected persisted data from previous deployments"
|
||||
[[ -d "$ELASTICSEARCH_DATADIR/elasticsearch" ]] && [[ -f "$ELASTICSEARCH_DATADIR/elasticsearch/.initialized" ]] && migrate_old_data
|
||||
fi
|
||||
|
||||
debug "Ensuring expected directories/files exist..."
|
||||
for dir in "$ELASTICSEARCH_TMPDIR" "$ELASTICSEARCH_DATADIR" "$ELASTICSEARCH_LOGDIR" "$ELASTICSEARCH_BASEDIR/plugins" "$ELASTICSEARCH_BASEDIR/modules" "$ELASTICSEARCH_CONFDIR"; do
|
||||
ensure_dir_exists "$dir"
|
||||
am_i_root && chown -R "$ELASTICSEARCH_DAEMON_USER:$ELASTICSEARCH_DAEMON_GROUP" "$dir"
|
||||
done
|
||||
|
||||
if [[ -f "$ELASTICSEARCH_CONF_FILE" ]]; then
|
||||
info "Custom configuration file detected, using it..."
|
||||
else
|
||||
info "Setting default configuration"
|
||||
touch "$ELASTICSEARCH_CONF_FILE"
|
||||
elasticsearch_conf_set http.port "$ELASTICSEARCH_PORT_NUMBER"
|
||||
elasticsearch_conf_set path.data "$ELASTICSEARCH_DATADIR"
|
||||
elasticsearch_conf_set transport.tcp.port "$ELASTICSEARCH_NODE_PORT_NUMBER"
|
||||
elasticsearch_cluster_configuration
|
||||
elasticsearch_configure_node_type
|
||||
fi
|
||||
elasticsearch_set_heap_size
|
||||
}
|
||||
|
||||
########################
|
||||
# Install Elasticsearch plugins
|
||||
# Globals:
|
||||
# ELASTICSEARCH_PLUGINS
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_install_plugins() {
|
||||
read -r -a plugins_list <<< "$(tr ',;' ' ' <<< "$ELASTICSEARCH_PLUGINS")"
|
||||
debug "Installing plugins: ${plugins_list[*]}"
|
||||
elasticsearch_conf_set plugin.mandatory "$ELASTICSEARCH_PLUGINS"
|
||||
for plugin in "${plugins_list[@]}"; do
|
||||
debug "Installing plugin: $plugin"
|
||||
if [[ "${BITNAMI_DEBUG:-false}" = true ]]; then
|
||||
elasticsearch-plugin install -b -v "$plugin"
|
||||
else
|
||||
elasticsearch-plugin install -b -v "$plugin" >/dev/null 2>&1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
Bitnami containers ship with software bundles. You can find the licenses under:
|
||||
/opt/bitnami/nami/COPYING
|
||||
/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /libelasticsearch.sh
|
||||
. /libfs.sh
|
||||
|
||||
# Load Elasticsearch environment variables
|
||||
eval "$(elasticsearch_env)"
|
||||
|
||||
for dir in "$ELASTICSEARCH_TMPDIR" "$ELASTICSEARCH_DATADIR" "$ELASTICSEARCH_LOGDIR" "${ELASTICSEARCH_BASEDIR}/plugins" "${ELASTICSEARCH_BASEDIR}/modules" "${ELASTICSEARCH_CONFDIR}"; do
|
||||
ensure_dir_exists "$dir"
|
||||
chmod -R ug+rwX "$dir"
|
||||
# `elasticsearch-plugin install` command complains about being unable to create the a plugin's directory
|
||||
# even when having the proper permissions.
|
||||
# The reason: the code is checking trying to check the permissions by consulting the parent directory owner,
|
||||
# instead of checking if the ES user actually has writing permissions.
|
||||
#
|
||||
# As a workaround, we will ensure the container works (at least) with the non-root user 1001. However,
|
||||
# until we can avoid this hack, we can't guarantee this container to work on K8s distributions
|
||||
# where containers are exectued with non-privileged users with random user IDs.
|
||||
#
|
||||
# Issue reported at: https://github.com/bitnami/bitnami-docker-elasticsearch/issues/50
|
||||
chown -R 1001:0 "$dir"
|
||||
done
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
#set -o xtrace
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /libelasticsearch.sh
|
||||
. /libos.sh
|
||||
|
||||
# Load Elasticsearch environment variables
|
||||
eval "$(elasticsearch_env)"
|
||||
|
||||
# Constants
|
||||
EXEC=$(command -v elasticsearch)
|
||||
ARGS=("-p" "$ELASTICSEARCH_TMPDIR/elasticsearch.pid" "-Epath.data=$ELASTICSEARCH_DATADIR")
|
||||
[[ -z "${ELASTICSEARCH_EXTRA_FLAGS:-}" ]] || ARGS=("${ARGS[@]}" "${ELASTICSEARCH_EXTRA_FLAGS[@]}")
|
||||
export JAVA_HOME=/opt/bitnami/java
|
||||
|
||||
info "** Starting Elasticsearch **"
|
||||
if am_i_root; then
|
||||
exec gosu "$ELASTICSEARCH_DAEMON_USER" "$EXEC" "${ARGS[@]}"
|
||||
else
|
||||
exec "$EXEC" "${ARGS[@]}"
|
||||
fi
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
#set -o xtrace
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /libos.sh
|
||||
. /libfs.sh
|
||||
. /libelasticsearch.sh
|
||||
|
||||
# Load Elasticsearch environment variables
|
||||
eval "$(elasticsearch_env)"
|
||||
|
||||
# Ensure kernel settings are valid
|
||||
elasticsearch_validate_kernel
|
||||
# Ensure Elasticsearch environment variables settings are valid
|
||||
elasticsearch_validate
|
||||
# Ensure Elasticsearch is stopped when this script ends
|
||||
trap "elasticsearch_stop" EXIT
|
||||
# Ensure 'daemon' user exists when running as 'root'
|
||||
am_i_root && ensure_user_exists "$ELASTICSEARCH_DAEMON_USER" "$ELASTICSEARCH_DAEMON_GROUP"
|
||||
# Ensure Elasticsearch is initialized
|
||||
elasticsearch_initialize
|
||||
# Install Elasticsearch plugins
|
||||
if [[ -n "$ELASTICSEARCH_PLUGINS" ]]; then
|
||||
elasticsearch_install_plugins
|
||||
fi
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
FROM oraclelinux:7-slim
|
||||
LABEL maintainer "Bitnami <containers@bitnami.com>"
|
||||
|
||||
ENV BITNAMI_PKG_CHMOD="-R g+rwX" \
|
||||
HOME="/" \
|
||||
OS_ARCH="x86_64" \
|
||||
OS_FLAVOUR="ol-7" \
|
||||
OS_NAME="linux"
|
||||
|
||||
COPY prebuildfs /
|
||||
# Install required system packages and dependencies
|
||||
RUN install_packages alsa-lib-devel ca-certificates curl freetype-devel glibc gzip hostname libX11-devel libXext-devel libXi-devel libXrender-devel libXtst-devel libaio-devel libgcc procps-ng sudo tar which zlib
|
||||
RUN . ./libcomponent.sh && component_unpack "java" "11.0.6-0" --checksum 44f5ec63dbe8d2ea9aaea237cec2d8821ac3ee6fa2812c295c0fa04068d0e08e
|
||||
RUN . ./libcomponent.sh && component_unpack "elasticsearch" "7.6.0-0" --checksum afe1e20c5e86baea3566c2bace27e4df24b3960cd85eeb1bd0826b30f800b48e
|
||||
RUN yum upgrade -y && \
|
||||
rm -r /var/cache/yum
|
||||
RUN /build/install-gosu.sh
|
||||
RUN curl --silent -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_amd64 > /usr/local/bin/yq && echo 99a01ae32f0704773c72103adb7050ef5c5cad14b517a8612543821ef32d6cc9 /usr/local/bin/yq | sha256sum --check && chmod +x /usr/local/bin/yq && mkdir -p /opt/bitnami/licenses && curl --silent -L https://raw.githubusercontent.com/mikefarah/yq/master/LICENSE > /opt/bitnami/licenses/yq-2.4.0.txt
|
||||
|
||||
COPY rootfs /
|
||||
RUN /postunpack.sh
|
||||
ENV BITNAMI_APP_NAME="elasticsearch" \
|
||||
BITNAMI_IMAGE_VERSION="7.6.0-ol-7-r12" \
|
||||
LD_LIBRARY_PATH="/opt/bitnami/elasticsearch/jdk/lib:/opt/bitnami/elasticsearch/jdk/lib/server:$LD_LIBRARY_PATH" \
|
||||
NAMI_PREFIX="/.nami" \
|
||||
PATH="/opt/bitnami/java/bin:/opt/bitnami/elasticsearch/bin:$PATH"
|
||||
|
||||
EXPOSE 9200 9300
|
||||
|
||||
USER 1001
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
CMD [ "/run.sh" ]
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
version: '2'
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: 'bitnami/elasticsearch:7-ol-7'
|
||||
ports:
|
||||
- '9200:9200'
|
||||
- '9300:9300'
|
||||
volumes:
|
||||
- 'elasticsearch_data:/bitnami/elasticsearch/data'
|
||||
volumes:
|
||||
elasticsearch_data:
|
||||
driver: local
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
VERSION="1.11"
|
||||
SHA256="0b843df6d86e270c5b0f5cbd3c326a04e18f4b7f9b8457fa497b0454c4b138d7"
|
||||
|
||||
curl --silent -L "https://github.com/tianon/gosu/releases/download/${VERSION}/gosu-amd64" > "/usr/local/bin/gosu"
|
||||
echo "$SHA256" "/usr/local/bin/gosu" | sha256sum --check
|
||||
chmod u+x "/usr/local/bin/gosu"
|
||||
mkdir -p "/opt/bitnami/licenses"
|
||||
curl --silent -L "https://raw.githubusercontent.com/tianon/gosu/master/LICENSE" > "/opt/bitnami/licenses/gosu-${VERSION}.txt"
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Bitnami custom library
|
||||
|
||||
# Load Generic Libraries
|
||||
. /liblog.sh
|
||||
|
||||
# Constants
|
||||
BOLD='\033[1m'
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Print the welcome page
|
||||
# Globals:
|
||||
# DISABLE_WELCOME_MESSAGE
|
||||
# BITNAMI_APP_NAME
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
print_welcome_page() {
|
||||
if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then
|
||||
if [[ -n "$BITNAMI_APP_NAME" ]]; then
|
||||
print_image_welcome_page
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Print the welcome page for a Bitnami Docker image
|
||||
# Globals:
|
||||
# BITNAMI_APP_NAME
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
print_image_welcome_page() {
|
||||
local github_url="https://github.com/bitnami/bitnami-docker-${BITNAMI_APP_NAME}"
|
||||
|
||||
log ""
|
||||
log "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}"
|
||||
log "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}"
|
||||
log "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}"
|
||||
log "Send us your feedback at ${BOLD}containers@bitnami.com${RESET}"
|
||||
log ""
|
||||
}
|
||||
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing Bitnami components
|
||||
|
||||
# Constants
|
||||
CACHE_ROOT="/tmp/bitnami/pkg/cache"
|
||||
DOWNLOAD_URL="https://downloads.bitnami.com/files/stacksmith"
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Download and unpack a Bitnami package
|
||||
# Globals:
|
||||
# OS_NAME
|
||||
# OS_ARCH
|
||||
# OS_FLAVOUR
|
||||
# Arguments:
|
||||
# $1 - component's name
|
||||
# $2 - component's version
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
component_unpack() {
|
||||
local name="${1:?name is required}"
|
||||
local version="${2:?version is required}"
|
||||
local base_name="${name}-${version}-${OS_NAME}-${OS_ARCH}-${OS_FLAVOUR}"
|
||||
local package_sha256=""
|
||||
|
||||
# Validate arguments
|
||||
shift 2
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-c|--checksum)
|
||||
shift
|
||||
package_sha256="${1:?missing package checksum}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
echo "Downloading $base_name package"
|
||||
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz" ]; then
|
||||
echo "${CACHE_ROOT}/${base_name}.tar.gz already exists, skipping download."
|
||||
cp "${CACHE_ROOT}/${base_name}.tar.gz" .
|
||||
rm "${CACHE_ROOT}/${base_name}.tar.gz"
|
||||
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz.sha256" ]; then
|
||||
echo "Using the local sha256 from ${CACHE_ROOT}/${base_name}.tar.gz.sha256"
|
||||
package_sha256="$(< "${CACHE_ROOT}/${base_name}.tar.gz.sha256")"
|
||||
rm "${CACHE_ROOT}/${base_name}.tar.gz.sha256"
|
||||
fi
|
||||
else
|
||||
curl --remote-name --silent "${DOWNLOAD_URL}/${base_name}.tar.gz"
|
||||
fi
|
||||
if [ -n "$package_sha256" ]; then
|
||||
echo "Verifying package integrity"
|
||||
echo "$package_sha256 ${base_name}.tar.gz" | sha256sum --check -
|
||||
fi
|
||||
tar --directory /opt/bitnami --extract --gunzip --file "${base_name}.tar.gz" --no-same-owner --strip-components=2 "${base_name}/files/"
|
||||
rm "${base_name}.tar.gz"
|
||||
}
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing files
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Replace a regex in a file
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - substitute regex
|
||||
# $4 - use POSIX regex. Default: true
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
replace_in_file() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local substitute_regex="${3:?substitute regex is required}"
|
||||
local posix_regex=${4:-true}
|
||||
|
||||
local result
|
||||
|
||||
# We should avoid using 'sed in-place' substitutions
|
||||
# 1) They are not compatible with files mounted from ConfigMap(s)
|
||||
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
|
||||
if [[ $posix_regex = true ]]; then
|
||||
result="$(sed -E "s@$match_regex@$substitute_regex@g" "$filename")"
|
||||
else
|
||||
result="$(sed "s@$match_regex@$substitute_regex@g" "$filename")"
|
||||
fi
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
||||
########################
|
||||
# Remove a line in a file based on a regex
|
||||
# Arguments:
|
||||
# $1 - filename
|
||||
# $2 - match regex
|
||||
# $3 - use POSIX regex. Default: true
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
remove_in_file() {
|
||||
local filename="${1:?filename is required}"
|
||||
local match_regex="${2:?match regex is required}"
|
||||
local posix_regex=${3:-true}
|
||||
local result
|
||||
|
||||
# We should avoid using 'sed in-place' substitutions
|
||||
# 1) They are not compatible with files mounted from ConfigMap(s)
|
||||
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
|
||||
if [[ $posix_regex = true ]]; then
|
||||
result="$(sed -E "/$match_regex/d" "$filename")"
|
||||
else
|
||||
result="$(sed "/$match_regex/d" "$filename")"
|
||||
fi
|
||||
echo "$result" > "$filename"
|
||||
}
|
||||
|
|
@ -1,129 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for file system actions
|
||||
|
||||
# Load Generic Libraries
|
||||
. /liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Ensure a file/directory is owned (user and group) but the given user
|
||||
# Arguments:
|
||||
# $1 - filepath
|
||||
# $2 - owner
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
owned_by() {
|
||||
local path="${1:?path is missing}"
|
||||
local owner="${2:?owner is missing}"
|
||||
|
||||
chown "$owner":"$owner" "$path"
|
||||
}
|
||||
|
||||
########################
|
||||
# Ensure a directory exists and, optionally, is owned by the given user
|
||||
# Arguments:
|
||||
# $1 - directory
|
||||
# $2 - owner
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_dir_exists() {
|
||||
local dir="${1:?directory is missing}"
|
||||
local owner="${2:-}"
|
||||
|
||||
mkdir -p "${dir}"
|
||||
if [[ -n $owner ]]; then
|
||||
owned_by "$dir" "$owner"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Checks whether a directory is empty or not
|
||||
# Arguments:
|
||||
# $1 - directory
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_dir_empty() {
|
||||
local dir="${1:?missing directory}"
|
||||
|
||||
if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure permisions and ownership recursively
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# $1 - paths (as a string).
|
||||
# Flags:
|
||||
# -f|--file-mode - mode for directories.
|
||||
# -d|--dir-mode - mode for files.
|
||||
# -u|--user - user
|
||||
# -g|--group - group
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
configure_permissions_ownership() {
|
||||
local -r paths="${1:?paths is missing}"
|
||||
local dir_mode=""
|
||||
local file_mode=""
|
||||
local user=""
|
||||
local group=""
|
||||
|
||||
# Validate arguments
|
||||
shift 1
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-f|--file-mode)
|
||||
shift
|
||||
file_mode="${1:?missing mode for files}"
|
||||
;;
|
||||
-d|--dir-mode)
|
||||
shift
|
||||
dir_mode="${1:?missing mode for directories}"
|
||||
;;
|
||||
-u|--user)
|
||||
shift
|
||||
user="${1:?missing user}"
|
||||
;;
|
||||
-g|--group)
|
||||
shift
|
||||
group="${1:?missing group}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid command line flag $1" >&2
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
read -r -a filepaths <<< "$paths"
|
||||
for p in "${filepaths[@]}"; do
|
||||
if [[ -e "$p" ]]; then
|
||||
if [[ -n $dir_mode ]]; then
|
||||
find -L "$p" -type d -exec chmod "$dir_mode" {} \;
|
||||
fi
|
||||
if [[ -n $file_mode ]]; then
|
||||
find -L "$p" -type f -exec chmod "$file_mode" {} \;
|
||||
fi
|
||||
if [[ -n $user ]] && [[ -n $group ]]; then
|
||||
chown -LR "$user":"$group" "$p"
|
||||
elif [[ -n $user ]] && [[ -z $group ]]; then
|
||||
chown -LR "$user" "$p"
|
||||
elif [[ -z $user ]] && [[ -n $group ]]; then
|
||||
chgrp -LR "$group" "$p"
|
||||
fi
|
||||
else
|
||||
stderr_print "$p does not exist"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for logging functions
|
||||
|
||||
# Constants
|
||||
RESET='\033[0m'
|
||||
RED='\033[38;5;1m'
|
||||
GREEN='\033[38;5;2m'
|
||||
YELLOW='\033[38;5;3m'
|
||||
MAGENTA='\033[38;5;5m'
|
||||
CYAN='\033[38;5;6m'
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Print to STDERR
|
||||
# Arguments:
|
||||
# Message to print
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
stderr_print() {
|
||||
printf "%b\\n" "${*}" >&2
|
||||
}
|
||||
|
||||
########################
|
||||
# Log message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
log() {
|
||||
stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}"
|
||||
}
|
||||
########################
|
||||
# Log an 'info' message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
info() {
|
||||
log "${GREEN}INFO ${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
warn() {
|
||||
log "${YELLOW}WARN ${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log an 'error' message
|
||||
# Arguments:
|
||||
# Message to log
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
error() {
|
||||
log "${RED}ERROR${RESET} ==> ${*}"
|
||||
}
|
||||
########################
|
||||
# Log a 'debug' message
|
||||
# Globals:
|
||||
# BITNAMI_DEBUG
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
debug() {
|
||||
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
|
||||
local -r bool="${BITNAMI_DEBUG:-false}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
|
||||
log "${MAGENTA}DEBUG${RESET} ==> ${*}"
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for network functions
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Resolve dns
|
||||
# Arguments:
|
||||
# $1 - Hostname to resolve
|
||||
# Returns:
|
||||
# IP
|
||||
#########################
|
||||
dns_lookup() {
|
||||
local host="${1:?host is missing}"
|
||||
getent ahosts "$host" | awk '/STREAM/ {print $1 }'
|
||||
}
|
||||
|
||||
########################
|
||||
# Get machine's IP
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Machine IP
|
||||
#########################
|
||||
get_machine_ip() {
|
||||
dns_lookup "$(hostname)"
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a resolved hostname
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_hostname_resolved() {
|
||||
local -r host="${1:?missing value}"
|
||||
if [[ -n "$(dns_lookup "$host")" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,132 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for operating system actions
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Check if an user exists in the system
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
user_exists() {
|
||||
local user="${1:?user is missing}"
|
||||
id "$user" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if a group exists in the system
|
||||
# Arguments:
|
||||
# $1 - group
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
group_exists() {
|
||||
local group="${1:?group is missing}"
|
||||
getent group "$group" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
########################
|
||||
# Create a group in the system if it does not exist already
|
||||
# Arguments:
|
||||
# $1 - group
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_group_exists() {
|
||||
local group="${1:?group is missing}"
|
||||
|
||||
if ! group_exists "$group"; then
|
||||
groupadd "$group" >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Create an user in the system if it does not exist already
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# $2 - group
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
ensure_user_exists() {
|
||||
local user="${1:?user is missing}"
|
||||
local group="${2:-}"
|
||||
|
||||
if ! user_exists "$user"; then
|
||||
useradd "$user" >/dev/null 2>&1
|
||||
if [[ -n "$group" ]]; then
|
||||
ensure_group_exists "$group"
|
||||
usermod -a -G "$group" "$user" >/dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the script is currently running as root
|
||||
# Arguments:
|
||||
# $1 - user
|
||||
# $2 - group
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
am_i_root() {
|
||||
if [[ "$(id -u)" = "0" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Get total memory available
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Memory in bytes
|
||||
#########################
|
||||
get_total_memory() {
|
||||
echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024))
|
||||
}
|
||||
|
||||
#########################
|
||||
# Redirects output to /dev/null if debug mode is disabled
|
||||
# Globals:
|
||||
# BITNAMI_DEBUG
|
||||
# Arguments:
|
||||
# $@ - Command to execute
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
debug_execute() {
|
||||
if ${BITNAMI_DEBUG:-false}; then
|
||||
"$@"
|
||||
else
|
||||
"$@" >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Retries a command a given number of times
|
||||
# Arguments:
|
||||
# $1 - cmd (as a string)
|
||||
# $2 - max retries. Default: 12
|
||||
# $3 - sleep between retries (in seconds). Default: 5
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
retry_while() {
|
||||
local -r cmd="${1:?cmd is missing}"
|
||||
local -r retries="${2:-12}"
|
||||
local -r sleep_time="${3:-5}"
|
||||
local return_value=1
|
||||
|
||||
read -r -a command <<< "$cmd"
|
||||
for ((i = 1 ; i <= retries ; i+=1 )); do
|
||||
"${command[@]}" && return_value=0 && break
|
||||
sleep "$sleep_time"
|
||||
done
|
||||
return $return_value
|
||||
}
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing services
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Read the provided pid file and returns a PID
|
||||
# Arguments:
|
||||
# $1 - Pid file
|
||||
# Returns:
|
||||
# PID
|
||||
#########################
|
||||
get_pid_from_file() {
|
||||
local pid_file="${1:?pid file is missing}"
|
||||
|
||||
if [[ -f "$pid_file" ]]; then
|
||||
if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then
|
||||
echo "$(< "$pid_file")"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if a provided PID corresponds to a running service
|
||||
# Arguments:
|
||||
# $1 - PID
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_service_running() {
|
||||
local pid="${1:?pid is missing}"
|
||||
|
||||
kill -0 "$pid" 2>/dev/null
|
||||
}
|
||||
|
||||
########################
|
||||
# Stop a service by sending a termination signal to its pid
|
||||
# Arguments:
|
||||
# $1 - Pid file
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
stop_service_using_pid() {
|
||||
local pid_file="${1:?pid file is missing}"
|
||||
local pid
|
||||
|
||||
pid="$(get_pid_from_file "$pid_file")"
|
||||
[[ -z "$pid" ]] || ! is_service_running "$pid" && return
|
||||
|
||||
kill "$pid"
|
||||
local counter=10
|
||||
while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do
|
||||
sleep 1
|
||||
counter=$((counter - 1))
|
||||
done
|
||||
}
|
||||
|
|
@ -1,246 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Validation functions library
|
||||
|
||||
# Load Generic Libraries
|
||||
. /liblog.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Check if the provided argument is an integer
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_int() {
|
||||
local -r int="${1:?missing value}"
|
||||
if [[ "$int" =~ ^-?[0-9]+ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a positive integer
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_positive_int() {
|
||||
local -r int="${1:?missing value}"
|
||||
if is_int "$int" && (( "${int}" >= 0 )); then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean or is the string 'yes/true'
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_boolean_yes() {
|
||||
local -r bool="${1:-}"
|
||||
# comparison is performed without regard to the case of alphabetic characters
|
||||
shopt -s nocasematch
|
||||
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean yes/no value
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_yes_no_value() {
|
||||
local -r bool="${1:-}"
|
||||
if [[ "$bool" =~ ^(yes|no)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is a boolean true/false value
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_true_false_value() {
|
||||
local -r bool="${1:-}"
|
||||
if [[ "$bool" =~ ^(true|false)$ ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if the provided argument is an empty string or not defined
|
||||
# Arguments:
|
||||
# $1 - Value to check
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_empty_value() {
|
||||
local -r val="${1:-}"
|
||||
if [[ -z "$val" ]]; then
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate if the provided argument is a valid port
|
||||
# Arguments:
|
||||
# $1 - Port to validate
|
||||
# Returns:
|
||||
# Boolean and error message
|
||||
#########################
|
||||
validate_port() {
|
||||
local value
|
||||
local unprivileged=0
|
||||
|
||||
# Parse flags
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-unprivileged)
|
||||
unprivileged=1
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
stderr_print "unrecognized flag $1"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [[ "$#" -gt 1 ]]; then
|
||||
echo "too many arguments provided"
|
||||
return 2
|
||||
elif [[ "$#" -eq 0 ]]; then
|
||||
stderr_print "missing port argument"
|
||||
return 1
|
||||
else
|
||||
value=$1
|
||||
fi
|
||||
|
||||
if [[ -z "$value" ]]; then
|
||||
echo "the value is empty"
|
||||
return 1
|
||||
else
|
||||
if ! is_int "$value"; then
|
||||
echo "value is not an integer"
|
||||
return 2
|
||||
elif [[ "$value" -lt 0 ]]; then
|
||||
echo "negative value provided"
|
||||
return 2
|
||||
elif [[ "$value" -gt 65535 ]]; then
|
||||
echo "requested port is greater than 65535"
|
||||
return 2
|
||||
elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then
|
||||
echo "privileged port requested"
|
||||
return 3
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate if the provided argument is a valid IPv4 address
|
||||
# Arguments:
|
||||
# $1 - IP to validate
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
validate_ipv4() {
|
||||
local ip="${1:?ip is missing}"
|
||||
local stat=1
|
||||
|
||||
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")"
|
||||
[[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \
|
||||
&& ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]]
|
||||
stat=$?
|
||||
fi
|
||||
return $stat
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate a string format
|
||||
# Arguments:
|
||||
# $1 - String to validate
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
validate_string() {
|
||||
local string
|
||||
local min_length=-1
|
||||
local max_length=-1
|
||||
|
||||
# Parse flags
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-min-length)
|
||||
shift
|
||||
min_length=${1:-}
|
||||
;;
|
||||
-max-length)
|
||||
shift
|
||||
max_length=${1:-}
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-*)
|
||||
stderr_print "unrecognized flag $1"
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$#" -gt 1 ]; then
|
||||
stderr_print "too many arguments provided"
|
||||
return 2
|
||||
elif [ "$#" -eq 0 ]; then
|
||||
stderr_print "missing string"
|
||||
return 1
|
||||
else
|
||||
string=$1
|
||||
fi
|
||||
|
||||
if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then
|
||||
echo "string length is less than $min_length"
|
||||
return 1
|
||||
fi
|
||||
if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then
|
||||
echo "string length is great than $max_length"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Library for managing versions strings
|
||||
|
||||
# Load Generic Libraries
|
||||
. ./liblog.sh
|
||||
|
||||
# Functions
|
||||
########################
|
||||
# Gets semantic version
|
||||
# Arguments:
|
||||
# $1 - version: string to extract major.minor.patch
|
||||
# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch
|
||||
# Returns:
|
||||
# array with the major, minor and release
|
||||
#########################
|
||||
get_sematic_version () {
|
||||
local version="${1:?version is required}"
|
||||
local section="${2:?section is required}"
|
||||
local -a version_sections
|
||||
|
||||
#Regex to parse versions: x.y.z
|
||||
local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?'
|
||||
|
||||
if [[ "$version" =~ $regex ]]; then
|
||||
local i=1
|
||||
local j=1
|
||||
local n=${#BASH_REMATCH[*]}
|
||||
|
||||
while [[ $i -lt $n ]]; do
|
||||
if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then
|
||||
version_sections[$j]=${BASH_REMATCH[$i]}
|
||||
((j++))
|
||||
fi
|
||||
((i++))
|
||||
done
|
||||
|
||||
local number_regex='^[0-9]+$'
|
||||
if [[ "$section" =~ $number_regex ]] && (( $section > 0 )) && (( $section <= 3 )); then
|
||||
echo "${version_sections[$section]}"
|
||||
return
|
||||
else
|
||||
stderr_print "Section allowed values are: 1, 2, and 3"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -eu
|
||||
|
||||
if [[ -n "oracle-epel-release-el7" ]]; then
|
||||
if ! yum list installed oracle-epel-release-el7 >/dev/null 2>&1; then
|
||||
yum -y install oracle-epel-release-el7 >/dev/null 2>&1
|
||||
CODE=$?
|
||||
if (( $CODE != 0 )); then
|
||||
echo "EPEL repository installation failed"
|
||||
exit $CODE
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
max=2
|
||||
for ((n = 1 ; n <= max ; n+=1 )); do
|
||||
set +e
|
||||
yum --enablerepo base,updates,ol7_developer_EPEL,ol7_optional_latest install -y "$@"
|
||||
CODE=$?
|
||||
set -e
|
||||
if (( $CODE == 0 )); then
|
||||
break
|
||||
fi
|
||||
if (( $n == $max )); then
|
||||
exit $CODE
|
||||
fi
|
||||
echo "yum failed, retrying"
|
||||
done
|
||||
rm -r /var/cache/yum
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
#set -o xtrace
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /libbitnami.sh
|
||||
. /libelasticsearch.sh
|
||||
|
||||
# Load Elasticsearch environment variables
|
||||
eval "$(elasticsearch_env)"
|
||||
|
||||
print_welcome_page
|
||||
|
||||
if [[ "$*" = "/run.sh" ]]; then
|
||||
info "** Starting Elasticsearch setup **"
|
||||
/setup.sh
|
||||
info "** Elasticsearch setup finished! **"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
exec "$@"
|
||||
|
|
@ -1,469 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Bitnami Elasticsearch library
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load Generic Libraries
|
||||
. /libfile.sh
|
||||
. /liblog.sh
|
||||
. /libnet.sh
|
||||
. /libos.sh
|
||||
. /libservice.sh
|
||||
. /libvalidations.sh
|
||||
|
||||
# Functions
|
||||
|
||||
########################
|
||||
# Write a configuration setting value
|
||||
# Globals:
|
||||
# ELASTICSEARCH_CONF_FILE
|
||||
# Arguments:
|
||||
# $1 - key
|
||||
# $2 - value
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_conf_write() {
|
||||
local key="${1:?missing key}"
|
||||
local value="${2:?missing value}"
|
||||
|
||||
if [[ -s "$ELASTICSEARCH_CONF_FILE" ]]; then
|
||||
yq w -i "$ELASTICSEARCH_CONF_FILE" "$key" "$value"
|
||||
else
|
||||
yq n "$key" "$value" > "$ELASTICSEARCH_CONF_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Set a configuration setting value
|
||||
# Globals:
|
||||
# ELASTICSEARCH_CONF_FILE
|
||||
# Arguments:
|
||||
# $1 - key
|
||||
# $2 - values (array)
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_conf_set() {
|
||||
local key="${1:?missing key}"
|
||||
shift
|
||||
local values=("${@}")
|
||||
|
||||
if [[ "${#values[@]}" -eq 0 ]]; then
|
||||
stderr_print "missing values"
|
||||
return 1
|
||||
elif [[ "${#values[@]}" -eq 1 ]] && [[ -n "${values[0]}" ]]; then
|
||||
elasticsearch_conf_write "$key" "${values[0]}"
|
||||
else
|
||||
for i in "${!values[@]}"; do
|
||||
if [[ -n "${values[$i]}" ]]; then
|
||||
elasticsearch_conf_write "$key" "${values[$i]}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Check if Elasticsearch is running
|
||||
# Globals:
|
||||
# ELASTICSEARCH_TMPDIR
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Boolean
|
||||
#########################
|
||||
is_elasticsearch_running() {
|
||||
local pid
|
||||
pid="$(get_pid_from_file "${ELASTICSEARCH_TMPDIR}/elasticsearch.pid")"
|
||||
|
||||
if [[ -z "$pid" ]]; then
|
||||
false
|
||||
else
|
||||
is_service_running "$pid"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Stop Elasticsearch
|
||||
# Globals:
|
||||
# ELASTICSEARCH_TMPDIR
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_stop() {
|
||||
! is_elasticsearch_running && return
|
||||
debug "Stopping Elasticsearch..."
|
||||
stop_service_using_pid "$ELASTICSEARCH_TMPDIR/elasticsearch.pid"
|
||||
}
|
||||
|
||||
########################
|
||||
# Start Elasticsearch and wait until it's ready
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_start() {
|
||||
is_elasticsearch_running && return
|
||||
|
||||
debug "Starting Elasticsearch..."
|
||||
local command=("${ELASTICSEARCH_BASEDIR}/bin/elasticsearch" "-d" "-p" "${ELASTICSEARCH_TMPDIR}/elasticsearch.pid" "-Epath.data=$ELASTICSEARCH_DATADIR")
|
||||
am_i_root && command=("gosu" "$ELASTICSEARCH_DAEMON_USER" "${command[@]}")
|
||||
if [[ "$BITNAMI_DEBUG" = true ]]; then
|
||||
"${command[@]}" &
|
||||
else
|
||||
"${command[@]}" >/dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
local counter=50
|
||||
while ! is_elasticsearch_running ; do
|
||||
if [[ "$counter" -ne 0 ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 2;
|
||||
counter=$((counter - 1))
|
||||
done
|
||||
local log_result=""
|
||||
local log_counter=30
|
||||
while [[ -z "$log_result" ]] && [[ "$log_counter" -ne 0 ]]; do
|
||||
log_counter=$(("$log_counter" - 1))
|
||||
log_result="$(tail -7 "${ELASTICSEARCH_LOGDIR}/elasticsearch.log" | grep -i "Node" | grep -i "started")"
|
||||
sleep 2
|
||||
done
|
||||
}
|
||||
|
||||
########################
|
||||
# Load global variables used on Elasticsearch configuration
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# Series of exports to be used as 'eval' arguments
|
||||
#########################
|
||||
elasticsearch_env() {
|
||||
cat <<"EOF"
|
||||
export ELASTICSEARCH_BASEDIR="/opt/bitnami/elasticsearch"
|
||||
export ELASTICSEARCH_DATADIR="/bitnami/elasticsearch/data"
|
||||
export ELASTICSEARCH_CONFDIR="${ELASTICSEARCH_BASEDIR}/config"
|
||||
export ELASTICSEARCH_CONF_FILE="${ELASTICSEARCH_CONFDIR}/elasticsearch.yml"
|
||||
export ELASTICSEARCH_TMPDIR="${ELASTICSEARCH_BASEDIR}/tmp"
|
||||
export ELASTICSEARCH_LOGDIR="${ELASTICSEARCH_BASEDIR}/logs"
|
||||
export PATH="${ELASTICSEARCH_BASEDIR}/bin:$PATH"
|
||||
export ELASTICSEARCH_DAEMON_USER="${ELASTICSEARCH_DAEMON_USER:-elasticsearch}"
|
||||
export ELASTICSEARCH_DAEMON_GROUP="${ELASTICSEARCH_DAEMON_GROUP:-elasticsearch}"
|
||||
export ELASTICSEARCH_BIND_ADDRESS="${ELASTICSEARCH_BIND_ADDRESS:-}"
|
||||
export ELASTICSEARCH_CLUSTER_HOSTS="${ELASTICSEARCH_CLUSTER_HOSTS:-}"
|
||||
export ELASTICSEARCH_CLUSTER_MASTER_HOSTS="${ELASTICSEARCH_CLUSTER_MASTER_HOSTS:-}"
|
||||
export ELASTICSEARCH_CLUSTER_NAME="${ELASTICSEARCH_CLUSTER_NAME:-}"
|
||||
export ELASTICSEARCH_HEAP_SIZE="${ELASTICSEARCH_HEAP_SIZE:-1024m}"
|
||||
export ELASTICSEARCH_IS_DEDICATED_NODE="${ELASTICSEARCH_IS_DEDICATED_NODE:-no}"
|
||||
export ELASTICSEARCH_MINIMUM_MASTER_NODES="${ELASTICSEARCH_MINIMUM_MASTER_NODES:-}"
|
||||
export ELASTICSEARCH_NODE_NAME="${ELASTICSEARCH_NODE_NAME:-}"
|
||||
export ELASTICSEARCH_NODE_PORT_NUMBER="${ELASTICSEARCH_NODE_PORT_NUMBER:-9300}"
|
||||
export ELASTICSEARCH_NODE_TYPE="${ELASTICSEARCH_NODE_TYPE:-master}"
|
||||
export ELASTICSEARCH_PLUGINS="${ELASTICSEARCH_PLUGINS:-}"
|
||||
export ELASTICSEARCH_PORT_NUMBER="${ELASTICSEARCH_PORT_NUMBER:-9200}"
|
||||
## JVM
|
||||
export JAVA_HOME="${JAVA_HOME:-/opt/bitnami/java}"
|
||||
EOF
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate kernel settings
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_validate_kernel() {
|
||||
# Auxiliary functions
|
||||
validate_sysctl_key() {
|
||||
local key="${1:?key is missing}"
|
||||
local value="${2:?value is missing}"
|
||||
local current_value
|
||||
current_value="$(sysctl -n "$key")"
|
||||
if [[ "$current_value" -lt "$value" ]]; then
|
||||
error "Invalid kernel settings. Elasticsearch requires at least: $key = $value"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
debug "Validating Kernel settings..."
|
||||
validate_sysctl_key "vm.max_map_count" 262144
|
||||
validate_sysctl_key "fs.file-max" 65536
|
||||
}
|
||||
|
||||
########################
|
||||
# Validate settings in ELASTICSEARCH_* env vars
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_validate() {
|
||||
local error_code=0
|
||||
|
||||
# Auxiliary functions
|
||||
print_validation_error() {
|
||||
error "$1"
|
||||
error_code=1
|
||||
}
|
||||
|
||||
validate_node_type() {
|
||||
case "$ELASTICSEARCH_NODE_TYPE" in
|
||||
coordinating|data|ingest|master)
|
||||
;;
|
||||
*)
|
||||
print_validation_error "Invalid node type $ELASTICSEARCH_NODE_TYPE. Supported types are 'coordinating/data/ingest/master'"
|
||||
esac
|
||||
}
|
||||
|
||||
debug "Validating settings in ELASTICSEARCH_* env vars..."
|
||||
local validate_port_args=()
|
||||
! am_i_root && validate_port_args+=("-unprivileged")
|
||||
for var in "ELASTICSEARCH_PORT_NUMBER" "ELASTICSEARCH_NODE_PORT_NUMBER"; do
|
||||
if ! err=$(validate_port "${validate_port_args[@]}" "${!var}"); then
|
||||
print_validation_error "An invalid port was specified in the environment variable $var: $err"
|
||||
fi
|
||||
done
|
||||
is_boolean_yes "$ELASTICSEARCH_IS_DEDICATED_NODE" && validate_node_type
|
||||
if [[ -n "$ELASTICSEARCH_BIND_ADDRESS" ]] && ! validate_ipv4 "$ELASTICSEARCH_BIND_ADDRESS"; then
|
||||
print_validation_error "The Bind Address specified in the environment variable ELASTICSEARCH_BIND_ADDRESS is not a valid IPv4"
|
||||
fi
|
||||
|
||||
[[ "$error_code" -eq 0 ]] || exit "$error_code"
|
||||
}
|
||||
|
||||
# Bash use floor by default. You can use it to get ceil.
|
||||
# ceil( a/b ) = floor( (a+b-1)/b )
|
||||
ceiling45() {
|
||||
local num=$(($1*4))
|
||||
local div=5
|
||||
echo $(( (num + div - 1) / div ))
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure Elasticsearch cluster settings
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_cluster_configuration() {
|
||||
# Auxiliary functions
|
||||
bind_address() {
|
||||
if [[ -n "$ELASTICSEARCH_BIND_ADDRESS" ]]; then
|
||||
echo "[$ELASTICSEARCH_BIND_ADDRESS, _local_]"
|
||||
else
|
||||
echo "0.0.0.0"
|
||||
fi
|
||||
}
|
||||
|
||||
info "Configuring Elasticsearch cluster settings..."
|
||||
elasticsearch_conf_set network.host "$(get_machine_ip)"
|
||||
elasticsearch_conf_set network.publish_host "$(get_machine_ip)"
|
||||
elasticsearch_conf_set network.bind_host "$(bind_address)"
|
||||
elasticsearch_conf_set cluster.name "$ELASTICSEARCH_CLUSTER_NAME"
|
||||
elasticsearch_conf_set node.name "${ELASTICSEARCH_NODE_NAME:-$(hostname)}"
|
||||
if [[ -n "$ELASTICSEARCH_CLUSTER_HOSTS" ]]; then
|
||||
read -r -a host_list <<< "$(tr ',;' ' ' <<< "$ELASTICSEARCH_CLUSTER_HOSTS")"
|
||||
master_list=( "${host_list[@]}" )
|
||||
if [[ -n "$ELASTICSEARCH_CLUSTER_MASTER_HOSTS" ]]; then
|
||||
read -r -a master_list <<< "$(tr ',;' ' ' <<< "$ELASTICSEARCH_CLUSTER_MASTER_HOSTS")"
|
||||
fi
|
||||
ELASTICSEARCH_MAJOR_VERSION=$(elasticsearch --version | grep Version: | awk -F "," '{print $1}' | awk -F ":" '{print $2}' | awk -F "." '{print $1}')
|
||||
if [[ "$ELASTICSEARCH_MAJOR_VERSION" -le 6 ]]; then
|
||||
elasticsearch_conf_set discovery.zen.ping.unicast.hosts "${host_list[@]}"
|
||||
else
|
||||
elasticsearch_conf_set discovery.seed_hosts "${host_list[@]}"
|
||||
fi
|
||||
elasticsearch_conf_set discovery.initial_state_timeout "5m"
|
||||
elasticsearch_conf_set gateway.recover_after_nodes "$(ceiling45 "${#host_list[@]}")"
|
||||
elasticsearch_conf_set gateway.expected_nodes "${#host_list[@]}"
|
||||
if [[ "$ELASTICSEARCH_NODE_TYPE" = "master" ]] && [[ "$ELASTICSEARCH_MAJOR_VERSION" -gt 6 ]]; then
|
||||
elasticsearch_conf_set cluster.initial_master_nodes "${master_list[@]}"
|
||||
fi
|
||||
if [[ -n "$ELASTICSEARCH_MINIMUM_MASTER_NODES" ]]; then
|
||||
debug "Setting minimum master nodes for quorum to $ELASTICSEARCH_MINIMUM_MASTER_NODES..."
|
||||
elasticsearch_conf_set discovery.zen.minimum_master_nodes "$ELASTICSEARCH_MINIMUM_MASTER_NODES"
|
||||
elif [[ "${#host_list[@]}" -gt 2 ]]; then
|
||||
local min_masters=""
|
||||
min_masters=$(((${#host_list[@]} / 2) +1))
|
||||
debug "Calculating minimum master nodes for quorum: $min_masters..."
|
||||
elasticsearch_conf_set discovery.zen.minimum_master_nodes "$min_masters"
|
||||
fi
|
||||
else
|
||||
elasticsearch_conf_set "discovery.type" "single-node"
|
||||
fi
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure Elasticsearch node type
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_configure_node_type() {
|
||||
local is_master="false"
|
||||
local is_data="false"
|
||||
local is_ingest="false"
|
||||
if is_boolean_yes "$ELASTICSEARCH_IS_DEDICATED_NODE"; then
|
||||
case "$ELASTICSEARCH_NODE_TYPE" in
|
||||
coordinating)
|
||||
;;
|
||||
data)
|
||||
is_data="true"
|
||||
;;
|
||||
ingest)
|
||||
is_ingest="true"
|
||||
;;
|
||||
master)
|
||||
is_master="true"
|
||||
;;
|
||||
*)
|
||||
error "Invalid node type '$ELASTICSEARCH_NODE_TYPE'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
else
|
||||
is_master="true"
|
||||
is_data="true"
|
||||
fi
|
||||
debug "Configure Elasticsearch Node type..."
|
||||
elasticsearch_conf_set node.master "$is_master"
|
||||
elasticsearch_conf_set node.data "$is_data"
|
||||
elasticsearch_conf_set node.ingest "$is_ingest"
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure Elasticsearch Heap Size
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_set_heap_size() {
|
||||
local heap_size
|
||||
if [[ -n "$ELASTICSEARCH_HEAP_SIZE" ]]; then
|
||||
debug "Using specified values for Xmx and Xms heap options..."
|
||||
heap_size="$ELASTICSEARCH_HEAP_SIZE"
|
||||
else
|
||||
debug "Calculating appropiate Xmx and Xms values..."
|
||||
local machine_mem=""
|
||||
machine_mem="$(get_total_memory)"
|
||||
if [[ "$machine_mem" -lt 65536 ]]; then
|
||||
heap_size="$(("$machine_mem" / 2))m"
|
||||
else
|
||||
heap_size=32768m
|
||||
fi
|
||||
fi
|
||||
debug "Setting '-Xmx${heap_size} -Xms${heap_size}' heap options..."
|
||||
replace_in_file "${ELASTICSEARCH_CONFDIR}/jvm.options" "-Xmx[0-9]+[mg]+" "-Xmx${heap_size}"
|
||||
replace_in_file "${ELASTICSEARCH_CONFDIR}/jvm.options" "-Xms[0-9]+[mg]+" "-Xms${heap_size}"
|
||||
}
|
||||
|
||||
########################
|
||||
# Migrate old Elasticsearch data
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
migrate_old_data() {
|
||||
warn "Persisted data follows old structure. Migrating to new one..."
|
||||
warn "Custom configuration files won't be persisted any longer!"
|
||||
local old_data_dir="${ELASTICSEARCH_DATADIR}/elasticsearch"
|
||||
local old_custom_conf_file="${old_data_dir}/conf/elasticsearch_custom.yml"
|
||||
local custom_conf_file="${ELASTICSEARCH_CONFDIR}/elasticsearch_custom.yml"
|
||||
if [[ -f "$old_custom_conf_file" ]]; then
|
||||
debug "Adding old custom configuration to user configuration"
|
||||
echo "" >> "$custom_conf_file"
|
||||
cat "$old_custom_conf_file" >> "$custom_conf_file"
|
||||
fi
|
||||
debug "Adapting data to new file structure"
|
||||
find "${old_data_dir}/data" -maxdepth 1 -mindepth 1 -exec mv {} "$ELASTICSEARCH_DATADIR" \;
|
||||
debug "Removing data that is not persisted anymore from persisted directory"
|
||||
rm -rf "$old_data_dir" "${ELASTICSEARCH_DATADIR}/java"
|
||||
}
|
||||
|
||||
########################
|
||||
# Configure/initialize Elasticsearch
|
||||
# Globals:
|
||||
# ELASTICSEARCH_*
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_initialize() {
|
||||
info "Configuring/Initializing Elasticsearch..."
|
||||
|
||||
# This fixes an issue where the trap would kill the entrypoint.sh, if a PID was left over from a previous run
|
||||
# Exec replaces the process without creating a new one, and when the container is restarted it may have the same PID
|
||||
rm -f "$ELASTICSEARCH_TMPDIR/elasticsearch.pid"
|
||||
|
||||
# Persisted data from old versions
|
||||
if ! is_dir_empty "$ELASTICSEARCH_DATADIR"; then
|
||||
debug "Detected persisted data from previous deployments"
|
||||
[[ -d "$ELASTICSEARCH_DATADIR/elasticsearch" ]] && [[ -f "$ELASTICSEARCH_DATADIR/elasticsearch/.initialized" ]] && migrate_old_data
|
||||
fi
|
||||
|
||||
debug "Ensuring expected directories/files exist..."
|
||||
for dir in "$ELASTICSEARCH_TMPDIR" "$ELASTICSEARCH_DATADIR" "$ELASTICSEARCH_LOGDIR" "$ELASTICSEARCH_BASEDIR/plugins" "$ELASTICSEARCH_BASEDIR/modules" "$ELASTICSEARCH_CONFDIR"; do
|
||||
ensure_dir_exists "$dir"
|
||||
am_i_root && chown -R "$ELASTICSEARCH_DAEMON_USER:$ELASTICSEARCH_DAEMON_GROUP" "$dir"
|
||||
done
|
||||
|
||||
if [[ -f "$ELASTICSEARCH_CONF_FILE" ]]; then
|
||||
info "Custom configuration file detected, using it..."
|
||||
else
|
||||
info "Setting default configuration"
|
||||
touch "$ELASTICSEARCH_CONF_FILE"
|
||||
elasticsearch_conf_set http.port "$ELASTICSEARCH_PORT_NUMBER"
|
||||
elasticsearch_conf_set path.data "$ELASTICSEARCH_DATADIR"
|
||||
elasticsearch_conf_set transport.tcp.port "$ELASTICSEARCH_NODE_PORT_NUMBER"
|
||||
elasticsearch_cluster_configuration
|
||||
elasticsearch_configure_node_type
|
||||
fi
|
||||
elasticsearch_set_heap_size
|
||||
}
|
||||
|
||||
########################
|
||||
# Install Elasticsearch plugins
|
||||
# Globals:
|
||||
# ELASTICSEARCH_PLUGINS
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
#########################
|
||||
elasticsearch_install_plugins() {
|
||||
read -r -a plugins_list <<< "$(tr ',;' ' ' <<< "$ELASTICSEARCH_PLUGINS")"
|
||||
debug "Installing plugins: ${plugins_list[*]}"
|
||||
elasticsearch_conf_set plugin.mandatory "$ELASTICSEARCH_PLUGINS"
|
||||
for plugin in "${plugins_list[@]}"; do
|
||||
debug "Installing plugin: $plugin"
|
||||
if [[ "${BITNAMI_DEBUG:-false}" = true ]]; then
|
||||
elasticsearch-plugin install -b -v "$plugin"
|
||||
else
|
||||
elasticsearch-plugin install -b -v "$plugin" >/dev/null 2>&1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
Bitnami containers ship with software bundles. You can find the licenses under:
|
||||
/opt/bitnami/nami/COPYING
|
||||
/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /libelasticsearch.sh
|
||||
. /libfs.sh
|
||||
|
||||
# Load Elasticsearch environment variables
|
||||
eval "$(elasticsearch_env)"
|
||||
|
||||
for dir in "$ELASTICSEARCH_TMPDIR" "$ELASTICSEARCH_DATADIR" "$ELASTICSEARCH_LOGDIR" "${ELASTICSEARCH_BASEDIR}/plugins" "${ELASTICSEARCH_BASEDIR}/modules" "${ELASTICSEARCH_CONFDIR}"; do
|
||||
ensure_dir_exists "$dir"
|
||||
chmod -R ug+rwX "$dir"
|
||||
# `elasticsearch-plugin install` command complains about being unable to create the a plugin's directory
|
||||
# even when having the proper permissions.
|
||||
# The reason: the code is checking trying to check the permissions by consulting the parent directory owner,
|
||||
# instead of checking if the ES user actually has writing permissions.
|
||||
#
|
||||
# As a workaround, we will ensure the container works (at least) with the non-root user 1001. However,
|
||||
# until we can avoid this hack, we can't guarantee this container to work on K8s distributions
|
||||
# where containers are exectued with non-privileged users with random user IDs.
|
||||
#
|
||||
# Issue reported at: https://github.com/bitnami/bitnami-docker-elasticsearch/issues/50
|
||||
chown -R 1001:0 "$dir"
|
||||
done
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
#set -o xtrace
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /libelasticsearch.sh
|
||||
. /libos.sh
|
||||
|
||||
# Load Elasticsearch environment variables
|
||||
eval "$(elasticsearch_env)"
|
||||
|
||||
# Constants
|
||||
EXEC=$(command -v elasticsearch)
|
||||
ARGS=("-p" "$ELASTICSEARCH_TMPDIR/elasticsearch.pid" "-Epath.data=$ELASTICSEARCH_DATADIR")
|
||||
[[ -z "${ELASTICSEARCH_EXTRA_FLAGS:-}" ]] || ARGS=("${ARGS[@]}" "${ELASTICSEARCH_EXTRA_FLAGS[@]}")
|
||||
export JAVA_HOME=/opt/bitnami/java
|
||||
|
||||
info "** Starting Elasticsearch **"
|
||||
if am_i_root; then
|
||||
exec gosu "$ELASTICSEARCH_DAEMON_USER" "$EXEC" "${ARGS[@]}"
|
||||
else
|
||||
exec "$EXEC" "${ARGS[@]}"
|
||||
fi
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
#set -o xtrace
|
||||
# shellcheck disable=SC1091
|
||||
|
||||
# Load libraries
|
||||
. /libos.sh
|
||||
. /libfs.sh
|
||||
. /libelasticsearch.sh
|
||||
|
||||
# Load Elasticsearch environment variables
|
||||
eval "$(elasticsearch_env)"
|
||||
|
||||
# Ensure kernel settings are valid
|
||||
elasticsearch_validate_kernel
|
||||
# Ensure Elasticsearch environment variables settings are valid
|
||||
elasticsearch_validate
|
||||
# Ensure Elasticsearch is stopped when this script ends
|
||||
trap "elasticsearch_stop" EXIT
|
||||
# Ensure 'daemon' user exists when running as 'root'
|
||||
am_i_root && ensure_user_exists "$ELASTICSEARCH_DAEMON_USER" "$ELASTICSEARCH_DAEMON_GROUP"
|
||||
# Ensure Elasticsearch is initialized
|
||||
elasticsearch_initialize
|
||||
# Install Elasticsearch plugins
|
||||
if [[ -n "$ELASTICSEARCH_PLUGINS" ]]; then
|
||||
elasticsearch_install_plugins
|
||||
fi
|
||||
Loading…
Reference in New Issue