Skip to content

Commit

Permalink
Merge pull request #534 from geoadmin/feat-BGDIINF_SB-2562_searchd_logs
Browse files Browse the repository at this point in the history
BGDIINF_SB-2562: make the logs searchd.log and query.log visible
  • Loading branch information
ltclm authored Sep 22, 2022
2 parents c025403 + 6727182 commit 873d69a
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 15 deletions.
8 changes: 4 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ RUN apt-get update && \
default-mysql-client \
gettext-base \
gosu \
jq \
procps \
rsync \
sphinxsearch && \
Expand All @@ -14,7 +15,9 @@ RUN apt-get update && \
gosu nobody true && \
# set up cron for non root user
chmod gu+rw /var/run && \
chmod gu+s /usr/sbin/cron
chmod gu+s /usr/sbin/cron &&\
mkfifo /tmp/stdout /tmp/stderr && \
chmod 0666 /tmp/stdout /tmp/stderr

# set up geodata, file permissions, copy files and run container as geodata
FROM sphinxsearch_base as sphinxsearch_geodata
Expand All @@ -25,9 +28,6 @@ RUN groupadd -r geodata -g 2500 && \
# create mountpoint folders with geodata ownership
install -o geodata -g geodata -d /var/lib/sphinxsearch/data/index/ && \
install -o geodata -p geodata -d /var/lib/sphinxsearch/data/index_efs/ && \
# TODO: redirect logs to stdout # only working if container is running as root
# ln -sv /dev/stdout /var/log/sphinxsearch/query.log && \
# ln -sv /dev/stdout /var/log/sphinxsearch/searchd.log && \
# change ownerships to geodata which will run the service or the maintenance scripts
# and mount the efs folder
chown -R geodata:geodata /var/run/sphinxsearch/ && \
Expand Down
14 changes: 13 additions & 1 deletion scripts/docker-cmd.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,16 @@ crontab < docker-crontab
# starting the searchd service
# will load the sphinx indexes from EFS --sync--> Volume --> into memory
echo -e "${green}starting searchd service ...${NC}"
/usr/bin/searchd --nodetach "$@"

# prepare the applogs for output on /proc/1/fd/1
tail --pid $$ -F /var/log/sphinxsearch/searchd.log &
tail --pid $$ -F /var/log/sphinxsearch/query.log &

# prepare the logs for the cronjobs
# Have the main Docker process tail the files to produce stdout and stderr
# for the main process that Docker will actually show in docker logs.
tail -f /tmp/stdout &
tail -f /tmp/stderr >&2 &

# searchd will own pid 1
exec /usr/bin/searchd --nodetach "$@"
6 changes: 5 additions & 1 deletion scripts/docker-crontab
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,9 @@
SHELL="/bin/bash"
USER="geodata"

*/5 * * * * bash /index-sync-rotate.sh
*/5 * * * * bash /index-sync-rotate.sh 1>/tmp/stdout 2>/tmp/stderr

# truncate sphinx logs
0 0 * * * : > /var/log/sphinxsearch/searchd.log
0 0 * * * : > /var/log/sphinxsearch/query.log
# it is important to add a new-line
37 changes: 28 additions & 9 deletions scripts/index-sync-rotate.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ SPHINX_EFS="/var/lib/sphinxsearch/data/index_efs/"
SPHINX_VOLUME="/var/lib/sphinxsearch/data/index/"
SPHINXCONFIG="/etc/sphinxsearch/sphinx.conf"
RSYNC_INCLUDE="/tmp/include.txt"
LOG_PREFIX="[ $$ - $(date +"%F %T")] "

# every 15 minutes
# lock only one script instance should be running
Expand Down Expand Up @@ -35,6 +34,26 @@ LOG_PREFIX="[ $$ - $(date +"%F %T")] "
# .spp
# .sps



json_logger() {
log_level=$1
timestamp=$(date --utc +%FT%T.%3NZ)
self=$(readlink -f "${BASH_SOURCE[0]}")
self=$(basename "$self")
jq --raw-input --compact-output --monochrome-output \
'{ "app":
{
"time": "'"${timestamp}"'",
"level": "'"${log_level}"'",
"logger": "'"${self}"'",
"pidTid": "'$$'",
"function": "'"${FUNCNAME[0]}"'",
"message": .
}
}'
}

SPHINX_FILE_EXTENSIONS=('spa' 'spd' 'spe' 'sph' 'spi' 'spk' 'spm' 'spp' 'sps')
SPHINX_INDEX_READY=('spd' 'spe' 'sph' 'spi' 'spp' 'sps')
SPHINX_INDEXES=$(grep -E "^[^#]+ path" "${SPHINXCONFIG}" | awk -F"=" '{print $2}' | sed -n -e 's|^.*/||p')
Expand All @@ -49,7 +68,7 @@ _no_more_locking() { _lock u; _lock xn && rm -f "$LOCKFILE"; }
_prepare_locking() { eval "exec $LOCKFD>\"$LOCKFILE\""; trap _no_more_locking EXIT; }

# do not continue if searchd is not running for crash or precaching reasons...
searchd --status &> /dev/null || { echo "${LOG_PREFIX}-> $(date +"%F %T") searchd service is not running, skip rsync"; exit 0; }
searchd --status &> /dev/null || { echo "searchd service is not running, skip rsync" | json_logger INFO; exit 0; }

# ON START
_prepare_locking
Expand All @@ -58,8 +77,8 @@ _prepare_locking
exlock_now() { _lock xn; } # obtain an exclusive lock immediately or fail

# avoiding running multiple instances of script.
exlock_now || { echo "${LOG_PREFIX}-> $(date +"%F %T") locked"; exit 1; }
echo "${LOG_PREFIX}-> $(date +"%F %T") start"
exlock_now || { echo "locked" | json_logger INFO; exit 1; }
echo "start" | json_logger INFO

check_if_index_is_ready() {
# input:
Expand Down Expand Up @@ -112,10 +131,10 @@ for sphinx_index in ${SPHINX_INDEXES[@]}; do
(( ${#new_files[@]} )) || continue

# check if index has been fully updated on EFS
check_if_index_is_ready "${sphinx_index}" || { echo "${LOG_PREFIX}-> $(date +"%F %T") skipping partially updated index: ${sphinx_index} ..."; continue; }
check_if_index_is_ready "${sphinx_index}" || { echo "skipping partially updated index: ${sphinx_index} ..." | json_logger INFO; continue; }

# sync EFS to VOLUME
echo "${LOG_PREFIX}-> $(date +"%F %T") start sync and rename files in target folder: ${sphinx_index} ..."
echo "start sync and rename files in target folder: ${sphinx_index} ..." | json_logger INFO
tmp_array=()

while IFS= read -r -d '' new_file; do
Expand All @@ -131,11 +150,11 @@ for sphinx_index in ${SPHINX_INDEXES[@]}; do
IFS=" " read -r -a new_files_merged <<< ${tmp_array[@]}
if ((${#new_files_merged[@]})); then
# start index rotation
echo "${LOG_PREFIX}-> $(date +"%F %T") restart searchd for index rotation..."
echo "restart searchd for index rotation..." | json_logger INFO
pkill -1 searchd

# wait until all files new_files and locally renamed files have been renamed / rotated in SPHINX_VOLUME
echo "${LOG_PREFIX}-> $(date +"%F %T") wait for index rotation..."
echo "wait for index rotation..." | json_logger INFO
all_files_are_gone=false
while ! ${all_files_are_gone}; do
all_files_are_gone=true
Expand All @@ -151,4 +170,4 @@ for sphinx_index in ${SPHINX_INDEXES[@]}; do
done


echo "${LOG_PREFIX}-> $(date +"%F %T") finished"
echo "finished" | json_logger INFO

0 comments on commit 873d69a

Please sign in to comment.