diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 77bd28b..ada738a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -4,9 +4,6 @@ on: push: branches: - 'main' - # - 'staging' - # - 'dev' - # - 'hotfix/**' jobs: build: runs-on: ubuntu-latest @@ -43,3 +40,48 @@ jobs: file: Dockerfile push: true tags: quay.io/denbicloud/cron-backup:${{ steps.tag.outputs.TAG }} + + special_builds: + runs-on: ubuntu-latest + needs: build + strategy: + fail-fast: false + matrix: + type: [ postgresql, mysql, mongodb ] + steps: + + - name: Workflow run cleanup action + uses: rokroskar/workflow-run-cleanup-action@v0.3.3 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - uses: actions/checkout@master + - name: Extract branch name + shell: bash + run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})" + id: extract_branch + + - name: Set tag + run: sed 's/\//-/g' <<< "::set-output name=TAG::${{ steps.extract_branch.outputs.branch }}" + id: tag + - name: Get tag + run: echo "The selected tag is ${{ steps.tag.outputs.TAG }}" + + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_TOKEN }} + - name: Build and publish image to Quay + uses: docker/build-push-action@v5 + env: + BASE_TAG: ${{ steps.tag.outputs.TAG }} + with: + file: ${{matrix.type}}/Dockerfile + context: ${{matrix.type}} + push: true + build-args: BASE_TAG + tags: quay.io/denbicloud/cron-backup:${{matrix.type}}-${{ steps.tag.outputs.TAG }} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index ecc4b94..03aae15 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,21 +7,25 @@ RUN apk add --update --no-cache \ fdupes \ python3 \ py3-pip \ + curl \ busybox-extras \ ssmtp -RUN pip3 install --upgrade pip -RUN pip3 install s3cmd +RUN pip3 install --upgrade pip --break-system-packages +RUN pip3 install s3cmd --break-system-packages RUN touch /var/log/cron.log COPY ./prepare-cron.sh /prepare-cron.sh COPY ./backup/rotate_backup.sh /rotate_backup.sh COPY ./backup/backup-cron /backup-cron +COPY ./backup/notify_uptime_kuma.sh /notify_uptime_kuma.sh + COPY ./s3/s3_backup.sh /s3_backup.sh COPY ./s3/s3_backup-cron /s3_backup-cron COPY ./s3/s3_backup_rotation.sh /s3_backup_rotation.sh COPY ./s3/s3_backup_rotation-cron /s3_backup_rotation-cron +COPY ./s3/s3_notify_uptime_kuma.sh /s3_notify_uptime_kuma.sh RUN chmod +x /prepare-cron.sh diff --git a/README.md b/README.md index e2a8f9a..909f6b9 100644 --- a/README.md +++ b/README.md @@ -30,9 +30,12 @@ base image with a shell script to prepare and run cron jobs. To use this you nee - S3_CONFIGS_PATH - Directory of the different site configs with variables see below (should be mounted) - S3_BACKUP_ROTATION_ENABLED - must to set to true to activate backup rotation in S3 - S3_BACKUP_ROTATION_TIME_LIMIT - expiration time in days for Backups - if rotation is enabled uploads older than this limit will be removed - (global - can be overwirtten per site.cfg) + - S3_KUMA_STATUS_ENDPOINT - when provided will be called after an successfull backup In addition, a cfg must be specified for each site to which the backups are to be pushed - with the following content [example](s3/configs/example.site.cfg): +6. An Endpoint can be provided for Status Updates for [Uptime Kuma](https://github.com/louislam/uptime-kuma). Following env variables must be set: + - KUMA_STATUS_ENDPOINT -- when provided will be called after an successfull backup ~~~Bash S3_HASHDIR=DIR #should be mounted - stores local checksum of pushed non-encrypted files (in S3 they are encrypted thus different checksum) S3_OBJECT_STORAGE_EP=SITE_SPECIFIC_OBJECT_STORAGE EP (e.g openstack.cebitec.uni-bielefeld.de:8080) @@ -52,10 +55,12 @@ Next use this image with your docker-compose.yml (here an example for a limesurv - ${general_PERSISTENT_PATH}backup/limesurvey:/etc/backup environment: - LIMESURVEY_DB_PASSWORD + - KUMA_STATUS_ENDPOINT - BACKUP_ROTATION_ENABLED=true - BACKUP_ROTATION_MAX_SIZE=10 - BACKUP_ROTATION_CUT_SIZE=5 - BACKUP_ROTATION_SIZE_TYPE=GiB + - S3_KUMA_STATUS_ENDPOINT - S3_BACKUP_ENABLED=true - S3_PATH=limesurvey - S3_CONFIGS_DIR=~/configs diff --git a/backup/notify_uptime_kuma.sh b/backup/notify_uptime_kuma.sh new file mode 100755 index 0000000..9c4786e --- /dev/null +++ b/backup/notify_uptime_kuma.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Set KUMA_STATUS_ENDPOINT variable from environment or default to empty string +KUMA_STATUS_ENDPOINT=${KUMA_STATUS_ENDPOINT:-} + +if [ -z "$KUMA_STATUS_ENDPOINT" ]; then + echo "INFO: KUMA_STATUS_ENDPOINT is not set. Skipping." +else + # Use curl to make a GET request to the status endpoint + response=$(curl -s -X GET "$KUMA_STATUS_ENDPOINT") + + # Check if the request was successful + if [ $? -eq 0 ]; then + echo "Status endpoint responded successfully: $response" + else + echo "Error: Failed to push status from $KUMA_STATUS_ENDPOINT. Status code: $?" + fi +fi \ No newline at end of file diff --git a/backup/rotate_backup.sh b/backup/rotate_backup.sh old mode 100644 new mode 100755 diff --git a/mongodb/install-packages.sh b/mongodb/install-packages.sh old mode 100644 new mode 100755 diff --git a/mongodb/mongodb-backup.sh b/mongodb/mongodb-backup.sh old mode 100644 new mode 100755 index fcbd310..149e4a9 --- a/mongodb/mongodb-backup.sh +++ b/mongodb/mongodb-backup.sh @@ -3,6 +3,7 @@ log() { echo "[$(date +"%Y-%m-%d %H:%M:%S")] - $1" } +trap 'log "Error occurred, exiting script"; exit 1' ERR NOW=$(date '+%y-%m-%d-%H%M') FILE="/etc/backup/${MONGODB_DB}-${NOW}.dump.gz" @@ -18,3 +19,16 @@ fi MONGODB_HOST=$(echo "$MONGODB_HOST" | cut -d: -f1) mongodump --archive="$FILE" --gzip --uri="$URI" + +# Check if the backup file is not empty and has a reasonable size +MIN_SIZE=$((1024 * 10)) # 10KB minimum size +if [ ! -s "$FILE" ] || [ $(stat -c%s "$FILE") -lt $MIN_SIZE ]; then + log "Backup file $FILE is too small (${MIN_SIZE}B required), aborting script" + exit 1 +fi + +# Send a notification using the notify_uptime_kuma.sh script +if ! /notify_uptime_kuma.sh; then + log "Failed to send notification" +fi +log "Backup completed successfully" \ No newline at end of file diff --git a/mysql/install-packages.sh b/mysql/install-packages.sh old mode 100644 new mode 100755 diff --git a/mysql/mysql-backup.sh b/mysql/mysql-backup.sh old mode 100644 new mode 100755 index bb13517..f48d10c --- a/mysql/mysql-backup.sh +++ b/mysql/mysql-backup.sh @@ -3,9 +3,23 @@ log() { echo "[$(date +"%Y-%m-%d %H:%M:%S")] - $1" } +trap 'log "Error occurred, exiting script"; exit 1' ERR NOW=$(date '+%y-%m-%d-%H%M') FILE=/etc/backup/${MYSQL_HOST}-${NOW}.sql.gz log "Create Backup $FILE" mysqldump -h ${MYSQL_HOST} -u ${MYSQL_USER} --password=${MYSQL_PASSWORD} --all-databases | gzip > $FILE + +# Check if the backup file is not empty and has a reasonable size +MIN_SIZE=$((1024 * 10)) # 10KB minimum size +if [ ! -s "$FILE" ] || [ $(stat -c%s "$FILE") -lt $MIN_SIZE ]; then + log "Backup file $FILE is too small (${MIN_SIZE}B required), aborting script" + exit 1 +fi + +# Send a notification using the notify_uptime_kuma.sh script +if ! ./notify_uptime_kuma.sh; then + log "Failed to send notification" +fi +log "Backup completed successfully" \ No newline at end of file diff --git a/postgresql/install-packages.sh b/postgresql/install-packages.sh old mode 100644 new mode 100755 diff --git a/postgresql/postgresql-backup.sh b/postgresql/postgresql-backup.sh old mode 100644 new mode 100755 index d83a3aa..d5d463f --- a/postgresql/postgresql-backup.sh +++ b/postgresql/postgresql-backup.sh @@ -1,18 +1,62 @@ #!/bin/sh +# Define a logging function log() { echo "[$(date +"%Y-%m-%d %H:%M:%S")] - $1" } -touch ~/.pgpass -log "Creating ~/.pgpass file" -echo ${POSTGRES_HOST}:${POSTGRES_PORT}:${POSTGRES_DB}:${POSTGRES_USER}:${POSTGRES_PASSWORD} > ~/.pgpass -chmod 600 ~/.pgpass +# Set up an error trap to exit the script on any error +trap 'log "Error occurred, exiting script"; exit 1' ERR + +# Create the ~/.pgpass file if it doesn't exist +if [ ! -f "~/.pgpass" ]; then + touch ~/.pgpass + log "Created ~/.pgpass file" +fi + +# Set the PostgreSQL connection details as environment variables +POSTGRES_HOST=${POSTGRES_HOST:-} +POSTGRES_PORT=${POSTGRES_PORT:-5432} # default to 5432 if not set +POSTGRES_DB=${POSTGRES_DB:-} +POSTGRES_USER=${POSTGRES_USER:-} +POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-} +# Check that all required environment variables are set +if [ -z "$POSTGRES_HOST" ] || [ -z "$POSTGRES_DB" ] || [ -z "$POSTGRES_USER" ] || [ -z "$POSTGRES_PASSWORD" ]; then + log "Error: Missing PostgreSQL connection details, exiting script" + exit 1 +fi + +# Set the PGPASSFILE environment variable to point to the ~/.pgpass file export PGPASSFILE='/root/.pgpass' +# Write the PostgreSQL connection details to the ~/.pgpass file +echo "${POSTGRES_HOST}:${POSTGRES_PORT}:${POSTGRES_DB}:${POSTGRES_USER}:${POSTGRES_PASSWORD}" > ~/.pgpass + +# Set permissions on the ~/.pgpass file +chmod 600 ~/.pgpass + +# Create a timestamp for the backup file name NOW=$(date '+%y-%m-%d-%H%M') -FILE=/etc/backup/${POSTGRES_DB}-${NOW}.dump.gz -log "Create Backup $FILE" -pg_dump -h ${POSTGRES_HOST} -U ${POSTGRES_USER} ${POSTGRES_DB} -Z 9 > $FILE +# Define the backup file path and name +FILE="/etc/backup/${POSTGRES_DB}-${NOW}.dump.gz" + +log "Creating Backup $FILE" + +# Perform the PostgreSQL database dump +pg_dump -h "${POSTGRES_HOST}" -U "${POSTGRES_USER}" "${POSTGRES_DB}" -Z 9 > "$FILE" + +# Check if the backup file is not empty and has a reasonable size +MIN_SIZE=$((1024 * 10)) # 10KB minimum size +if [ ! -s "$FILE" ] || [ $(stat -c%s "$FILE") -lt $MIN_SIZE ]; then + log "Backup file $FILE is too small (${MIN_SIZE}B required), aborting script" + exit 1 +fi + +# Send a notification using the notify_uptime_kuma.sh script +if ! /notify_uptime_kuma.sh; then + log "Failed to send notification" +fi + +log "Backup completed successfully" \ No newline at end of file diff --git a/postgresql/postgresql-cron b/postgresql/postgresql-cron old mode 100644 new mode 100755 diff --git a/prepare-cron.sh b/prepare-cron.sh old mode 100644 new mode 100755 diff --git a/s3/s3_backup.sh b/s3/s3_backup.sh old mode 100644 new mode 100755 index 617021d..dc38298 --- a/s3/s3_backup.sh +++ b/s3/s3_backup.sh @@ -3,7 +3,7 @@ log() { echo "[$(date +"%Y-%m-%d %H:%M:%S")] - $1" } - +trap 'log "Error occurred, exiting script"; exit 1' ERR log "Starting backup script" basedir="/etc/backup" @@ -89,3 +89,9 @@ find "$S3_CONFIGS_PATH" -type f -name "*.cfg" | while read -r env_data; do log "Removing temp config and password files" rm -f "$tmp_conf" done + + +# Send a notification using the s3_notify_uptime_kuma.sh script +if ! /s3_notify_uptime_kuma.sh; then + log "Failed to send notification" +fi \ No newline at end of file diff --git a/s3/s3_backup_rotation.sh b/s3/s3_backup_rotation.sh old mode 100644 new mode 100755 diff --git a/s3/s3_notify_uptime_kuma.sh b/s3/s3_notify_uptime_kuma.sh new file mode 100755 index 0000000..e04514a --- /dev/null +++ b/s3/s3_notify_uptime_kuma.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Set S3_KUMA_STATUS_ENDPOINT variable from environment or default to empty string +S3_KUMA_STATUS_ENDPOINT=${S3_KUMA_STATUS_ENDPOINT:-} + +if [ -z "$S3_KUMA_STATUS_ENDPOINT" ]; then + echo "INFO: S3_KUMA_STATUS_ENDPOINT is not set. Skipping." +else + # Use curl to make a POST request to the status endpoint + response=$(curl -s -X POST \\ + "$S3_KUMA_STATUS_ENDPOINT") + + # Check if the request was successful + if [ $? -eq 0 ]; then + echo "Status endpoint responded successfully: $response" + else + echo "Error: Failed to send status update to $S3_KUMA_STATUS_ENDPOINT. Status code: $?" + fi +fi \ No newline at end of file