diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..25f6404 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @heroku/languages diff --git a/.github/workflows/check_changelog.yml b/.github/workflows/check_changelog.yml index 282bf46..c0cc43b 100644 --- a/.github/workflows/check_changelog.yml +++ b/.github/workflows/check_changelog.yml @@ -2,16 +2,19 @@ name: Check Changelog on: pull_request: - types: [opened, reopened, edited, synchronize] + types: [opened, reopened, labeled, unlabeled, synchronize] + +permissions: + contents: read jobs: check-changelog: runs-on: ubuntu-latest - if: | - !contains(github.event.pull_request.body, '[skip changelog]') && - !contains(github.event.pull_request.body, '[changelog skip]') && - !contains(github.event.pull_request.body, '[skip ci]') + if: (!contains(github.event.pull_request.labels.*.name, 'skip changelog')) steps: - - uses: actions/checkout@v1 + - name: Checkout + uses: actions/checkout@v4 - name: Check that CHANGELOG is touched - run: git diff remotes/origin/${{ github.base_ref }} --name-only | grep CHANGELOG.md + run: | + git fetch origin ${{ github.base_ref }} --depth 1 && \ + git diff remotes/origin/${{ github.base_ref }} --name-only | grep CHANGELOG.md diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..958dad1 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,35 @@ +name: CI + +on: + push: + branches: ["main"] + pull_request: + +permissions: + contents: read + +jobs: + functional-test: + runs-on: ubuntu-22.04 + container: + image: heroku/heroku:${{ matrix.stack_number }}-build + options: --user root + strategy: + matrix: + stack_number: ["20", "22", "24"] + env: + STACK: heroku-${{ matrix.stack_number }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Functional tests on heroku:${{ matrix.stack_number }}-build + run: test/run + + shell-lint: + runs-on: ubuntu-22.04 + container: + image: koalaman/shellcheck-alpine:v0.9.0 + steps: + - uses: actions/checkout@v4 + - name: shellcheck + run: shellcheck -x bin/compile bin/detect bin/release bin/report diff --git a/.gitignore b/.gitignore index 7ee0c0f..94c93ea 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .anvil +.idea diff --git a/CHANGELOG.md b/CHANGELOG.md index 295492e..6666e72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,20 +1,50 @@ -# Changelog +# APT Buildpack Changelog -## To Be Released +## Unreleased -* Add support for comments in `Aptfile` ([#24](https://github.com/heroku/heroku-buildpack-apt/pull/24)). -* Prevent APT using source lists from `/etc/apt/sources.list.d/` ([#46](https://github.com/heroku/heroku-buildpack-apt/pull/46)). -* Stop using `force-yes` with newer version of apt-get ([#51](https://github.com/heroku/heroku-buildpack-apt/pull/51)). -* Flush the cache on stack change ([#58](https://github.com/heroku/heroku-buildpack-apt/pull/58)). -* Fail the build if `apt-get` or `curl` errors ([#79](https://github.com/heroku/heroku-buildpack-apt/pull/79)). -* Only try to add custom repositories when some are defined in `Aptfile` ([#79](https://github.com/heroku/heroku-buildpack-apt/pull/79)). -* Output a helpful error message when no `Aptfile` is found ([#87](https://github.com/heroku/heroku-buildpack-apt/pull/87)). +## 2024-03-28 -## Version 1.1 +- Warn when Aptfile contains no packages ([#126](https://github.com/heroku/heroku-buildpack-apt/pull/126)) +- Support sources parts directory for Heroku-24 compatibility ([#119](https://github.com/heroku/heroku-buildpack-apt/pull/119)) -* Add `$HOME/.apt/usr/sbin` into application PATH (`profile.d` script) -* Add `APT_FILE_MANIFEST` environment variable to use another file than `Aptfile` from build directory root +## 2024-03-14 -## Version 1.0 +- Shell hardening ([#115](https://github.com/heroku/heroku-buildpack-apt/pull/115)) +- Handle multi-package lines when capturing buildpack metadata ([#112](https://github.com/heroku/heroku-buildpack-apt/pull/112)) -Initial Version +## 2024-03-01 + +- Add `bin/report` script to capture buildpack metadata ([#110](https://github.com/heroku/heroku-buildpack-apt/pull/110)) + +## 2021-03-10 + +- Output a helpful error message when no `Aptfile` is found ([#87](https://github.com/heroku/heroku-buildpack-apt/pull/87)). + +## 2021-01-15 + +- Fail the build if `apt-get` or `curl` errors ([#79](https://github.com/heroku/heroku-buildpack-apt/pull/79)). +- Only try to add custom repositories when some are defined in `Aptfile` ([#79](https://github.com/heroku/heroku-buildpack-apt/pull/79)). + +## 2019-10-17 + +- Flush the cache on stack change ([#58](https://github.com/heroku/heroku-buildpack-apt/pull/58)). + +## 2019-09-06 + +- Stop using `force-yes` with newer version of apt-get ([#51](https://github.com/heroku/heroku-buildpack-apt/pull/51)). + +## 2019-06-11 + +- Prevent APT using source lists from `/etc/apt/sources.list.d/` ([#46](https://github.com/heroku/heroku-buildpack-apt/pull/46)). + +## 2019-06-10 + +- Add support for comments in `Aptfile` ([#24](https://github.com/heroku/heroku-buildpack-apt/pull/24)). + +## 2017-09-13 + +- Add support for custom repositories ([#18](https://github.com/heroku/heroku-buildpack-apt/pull/18)). + +## 2016 and earlier + +See the [Git log](https://github.com/heroku/heroku-buildpack-apt/commits/40883f0cb8e8ddb2876ca8be5d25ade4ff9617b1). diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..5dd5e97 --- /dev/null +++ b/Makefile @@ -0,0 +1,19 @@ +test: heroku-24-build heroku-22-build heroku-20-build + +shellcheck: + @shellcheck -x bin/compile bin/detect bin/release bin/report + +heroku-24-build: + @echo "Running tests in docker (heroku-24-build)..." + @docker run --user root -v $(shell pwd):/buildpack:ro --rm -it -e "STACK=heroku-24" heroku/heroku:24-build bash -c 'cp -r /buildpack /buildpack_test; cd /buildpack_test/; test/run;' + @echo "" + +heroku-22-build: + @echo "Running tests in docker (heroku-22-build)..." + @docker run -v $(shell pwd):/buildpack:ro --rm -it -e "STACK=heroku-22" heroku/heroku:22-build bash -c 'cp -r /buildpack /buildpack_test; cd /buildpack_test/; test/run;' + @echo "" + +heroku-20-build: + @echo "Running tests in docker (heroku-20-build)..." + @docker run -v $(shell pwd):/buildpack:ro --rm -it -e "STACK=heroku-20" heroku/heroku:20-build bash -c 'cp -r /buildpack /buildpack_test; cd /buildpack_test/; test/run;' + @echo "" diff --git a/README.md b/README.md index c33b919..07f3e2e 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ Include a list of apt package names to be installed in a file named `Aptfile`. To find out what packages are available, see: +<<<<<<< HEAD See the [Scalingo stacks](https://doc.scalingo.com/platform/internals/stacks/stacks) documentation for which Ubuntu LTS version is used by each stack. #### Setup @@ -37,27 +38,13 @@ Note that the order of the buildpacks in the `.buildpacks` file matters. #### Aptfile # you can list packages - libpq-dev - + libexample-dev + # or include links to specific .deb files - http://downloads.sourceforge.net/project/wkhtmltopdf/0.12.1/wkhtmltox-0.12.1_linux-precise-amd64.deb - + https://downloads.example.com/example.deb + # or add custom apt repos (only required if using packages outside of the standard Ubuntu APT repositories) - :repo:deb http://cz.archive.ubuntu.com/ubuntu artful main universe - -#### Gemfile - - source "https://rubygems.org" - gem "pg" - -### Check out the PG library version - - $ scalingo run bash -a apt-pg-test - ~ $ irb - irb(main):001:0> require "pg" - => true - irb(main):002:0> PG::version_string - => "PG 0.15.1" + :repo:deb https://apt.example.com/ example-distro main ## License diff --git a/bin/compile b/bin/compile index d36967c..895f54a 100755 --- a/bin/compile +++ b/bin/compile @@ -12,7 +12,7 @@ fi # parse and derive params BUILD_DIR=$1 CACHE_DIR=$2 -LP_DIR=$(cd $(dirname $0); cd ..; pwd) +LP_DIR=$(cd "$(dirname "$0")"; cd ..; pwd) function error() { echo " ! $*" >&2 @@ -31,8 +31,18 @@ function indent() { esac } +if ! grep --invert-match -e "^\s*#" -e "^\s*$" -e "^:repo:" -q "${BUILD_DIR}/Aptfile"; then + echo " +! You have no packages listed in your Aptfile. If you don't need custom Apt packages, +! delete your Aptfile and remove the buildpack with: +! +! $ heroku buildpacks:remove heroku-community/apt +" + exit 0 +fi + # Store which STACK we are running on in the cache to bust the cache if it changes -if [ -f $CACHE_DIR/.apt/STACK ]; then +if [[ -f "$CACHE_DIR/.apt/STACK" ]]; then CACHED_STACK=$(cat "$CACHE_DIR/.apt/STACK") else CACHED_STACK=$STACK @@ -45,6 +55,7 @@ echo "$STACK" > "$CACHE_DIR/.apt/STACK" APT_CACHE_DIR="$CACHE_DIR/apt/cache" APT_STATE_DIR="$CACHE_DIR/apt/state" APT_SOURCELIST_DIR="$CACHE_DIR/apt/sources" # place custom sources.list here +APT_SOURCEPARTS_DIR="$APT_SOURCELIST_DIR/sources.list.d" APT_SOURCES="$APT_SOURCELIST_DIR/sources.list" APT_FILE_MANIFEST="${APT_FILE_MANIFEST:-Aptfile}" @@ -52,8 +63,8 @@ APT_FILE_MANIFEST="${APT_FILE_MANIFEST:-Aptfile}" APT_VERSION=$(apt-get -v | awk 'NR == 1{ print $2 }') case "$APT_VERSION" in - 0* | 1.0*) APT_FORCE_YES="--force-yes";; - *) APT_FORCE_YES="--allow-downgrades --allow-remove-essential --allow-change-held-packages";; + 0* | 1.0*) APT_FORCE_YES=("--force-yes");; + *) APT_FORCE_YES=("--allow-downgrades" "--allow-remove-essential" "--allow-change-held-packages");; esac if [ -f $APT_CACHE_DIR/$APT_FILE_MANIFEST ] && cmp -s $BUILD_DIR/$APT_FILE_MANIFEST $APT_CACHE_DIR/$APT_FILE_MANIFEST && [[ $CACHED_STACK == $STACK ]] ; then @@ -68,6 +79,7 @@ else mkdir -p "$APT_SOURCELIST_DIR" # make dir for sources cp -f "$BUILD_DIR/$APT_FILE_MANIFEST" "$APT_CACHE_DIR/$APT_FILE_MANIFEST" cat "/etc/apt/sources.list" > "$APT_SOURCES" # no cp here + cp -R "/etc/apt/sources.list.d" "$APT_SOURCEPARTS_DIR" # add custom repositories from Aptfile to sources.list # like>> :repo:deb http://cz.archive.ubuntu.com/ubuntu artful main universe if grep -q -e "^:repo:" $BUILD_DIR/$APT_FILE_MANIFEST; then @@ -76,39 +88,44 @@ else fi fi -APT_OPTIONS="-o debug::nolocking=true -o dir::cache=$APT_CACHE_DIR -o dir::state=$APT_STATE_DIR" +APT_OPTIONS=("-o" "debug::nolocking=true" "-o" "dir::cache=$APT_CACHE_DIR" "-o" "dir::state=$APT_STATE_DIR") # Override the use of /etc/apt/sources.list (sourcelist) and /etc/apt/sources.list.d/* (sourceparts). -APT_OPTIONS="$APT_OPTIONS -o dir::etc::sourcelist=$APT_SOURCES -o dir::etc::sourceparts=/dev/null" +APT_OPTIONS+=("-o" "dir::etc::sourcelist=$APT_SOURCES" "-o" "dir::etc::sourceparts=$APT_SOURCEPARTS_DIR") topic "Updating apt caches" -apt-get $APT_OPTIONS update | indent +apt-get "${APT_OPTIONS[@]}" update 2>&1 | indent for PACKAGE in $(cat $BUILD_DIR/$APT_FILE_MANIFEST | grep -v -s -e '^#' | grep -v -s -e "^:repo:"); do if [[ $PACKAGE == *deb ]]; then - PACKAGE_NAME=$(basename $PACKAGE .deb) + PACKAGE_NAME=$(basename "$PACKAGE" .deb) PACKAGE_FILE=$APT_CACHE_DIR/archives/$PACKAGE_NAME.deb topic "Fetching $PACKAGE" - curl --silent --show-error --fail -L -z $PACKAGE_FILE -o $PACKAGE_FILE $PACKAGE 2>&1 | indent + curl --silent --show-error --fail -L -z "$PACKAGE_FILE" -o "$PACKAGE_FILE" "$PACKAGE" 2>&1 | indent else topic "Fetching .debs for $PACKAGE" - apt-get $APT_OPTIONS -y $APT_FORCE_YES -d install --reinstall $PACKAGE | indent + # while this is not documented behavior, the Aptfile format technically + # did allow for multiple packages separated by spaces to be specified + # on a single line due to how the download command was implemented so we + # should respect that behavior since users are doing this + IFS=$' \t' read -ra PACKAGE_NAMES <<< "$PACKAGE" + apt-get "${APT_OPTIONS[@]}" -y "${APT_FORCE_YES[@]}" -d install --reinstall "${PACKAGE_NAMES[@]}" | indent fi -done +done < <(grep --invert-match -e "^\s*#" -e "^\s*$" -e "^:repo:" "${BUILD_DIR}/Aptfile") -mkdir -p $BUILD_DIR/.apt +mkdir -p "$BUILD_DIR/.apt" -for DEB in $(ls -1 $APT_CACHE_DIR/archives/*.deb); do - topic "Installing $(basename $DEB)" - dpkg -x $DEB $BUILD_DIR/.apt/ +for DEB in "$APT_CACHE_DIR/archives/"*.deb; do + topic "Installing $(basename "$DEB")" + dpkg -x "$DEB" "$BUILD_DIR/.apt/" done topic "Writing profile script" -mkdir -p $BUILD_DIR/.profile.d -cat <$BUILD_DIR/.profile.d/000_apt.sh -export PATH="\$HOME/.apt/usr/bin:\$HOME/.apt/usr/sbin:\$PATH" -export LD_LIBRARY_PATH="\$HOME/.apt/lib/x86_64-linux-gnu:\$HOME/.apt/usr/lib/x86_64-linux-gnu:\$HOME/.apt/usr/lib/i386-linux-gnu:\$HOME/.apt/lib:\$HOME/.apt/usr/lib:\$LD_LIBRARY_PATH" -export LIBRARY_PATH="\$HOME/.apt/lib/x86_64-linux-gnu:\$HOME/.apt/usr/lib/x86_64-linux-gnu:\$HOME/.apt/usr/lib/i386-linux-gnu:\$HOME/.apt/lib:\$HOME/.apt/usr/lib:\$LIBRARY_PATH" +mkdir -p "$BUILD_DIR/.profile.d" +cat <"$BUILD_DIR/.profile.d/000_apt.sh" +export PATH="\$HOME/.apt/usr/bin:\$PATH" +export LD_LIBRARY_PATH="\$HOME/.apt/usr/lib/x86_64-linux-gnu:\$HOME/.apt/usr/lib/i386-linux-gnu:\$HOME/.apt/usr/lib:\$LD_LIBRARY_PATH" +export LIBRARY_PATH="\$HOME/.apt/usr/lib/x86_64-linux-gnu:\$HOME/.apt/usr/lib/i386-linux-gnu:\$HOME/.apt/usr/lib:\$LIBRARY_PATH" export INCLUDE_PATH="\$HOME/.apt/usr/include:\$HOME/.apt/usr/include/x86_64-linux-gnu:\$INCLUDE_PATH" export CPATH="\$INCLUDE_PATH" export CPPPATH="\$INCLUDE_PATH" @@ -127,4 +144,4 @@ export PKG_CONFIG_PATH="$BUILD_DIR/.apt/usr/lib/x86_64-linux-gnu/pkgconfig:$BUIL export | grep -E -e ' (PATH|LD_LIBRARY_PATH|LIBRARY_PATH|INCLUDE_PATH|CPATH|CPPPATH|PKG_CONFIG_PATH)=' > "$LP_DIR/export" topic "Rewrite package-config files" -find $BUILD_DIR/.apt -type f -ipath '*/pkgconfig/*.pc' | xargs --no-run-if-empty -n 1 sed -i -e 's!^prefix=\(.*\)$!prefix='"$BUILD_DIR"'/.apt\1!g' +find "$BUILD_DIR/.apt" -type f -ipath '*/pkgconfig/*.pc' -print0 | xargs -0 --no-run-if-empty -n 1 sed -i -e 's!^prefix=\(.*\)$!prefix='"$BUILD_DIR"'/.apt\1!g' diff --git a/bin/report b/bin/report new file mode 100755 index 0000000..1421bba --- /dev/null +++ b/bin/report @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# bin/report + +### Configure environment + +set -o errexit # always exit on error +set -o pipefail # don't ignore exit codes when piping output + +BUILD_DIR=${1:-} + +packages=() +custom_packages=() +custom_repositories=() + +while IFS= read -r line; do + if grep --silent -e "^:repo:" <<< "${line}"; then + custom_repositories+=("${line//:repo:/}") + elif [[ $line == *deb ]]; then + custom_packages+=("${line}") + else + IFS=$' \t' read -ra package_names <<< "${line}" + for package_name in "${package_names[@]}"; do + packages+=("${package_name}") + done + fi +done < <(grep --invert-match -e "^\s*#" -e "^\s*$" "${BUILD_DIR}/Aptfile") + +output_key_value() { + local key value + key="$1" + shift + # sort & join the array values with a ',' then escape both '\' and '"' characters + value=$(printf '%s\n' "$@" | sort | tr '\n' ',' | sed 's/,$/\n/' | sed 's/\\/\\\\/g' | sed 's/"/\\"/g') + if [[ -n "${value}" ]]; then + echo "$key: \"$value\"" + fi +} + +output_key_value "packages" "${packages[@]}" +output_key_value "custom_packages" "${custom_packages[@]}" +output_key_value "custom_repositories" "${custom_repositories[@]}" diff --git a/test/compile_test.sh b/test/compile_test.sh deleted file mode 100644 index f5c4a40..0000000 --- a/test/compile_test.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -. ${BUILDPACK_TEST_RUNNER_HOME}/lib/test_utils.sh - -testCompile() { - loadFixture "Aptfile" - - compile - - assertCapturedSuccess - - assertCaptured "Fetching .debs for s3cmd" - assertCaptured "Installing s3cmd_" - assertCaptured "Fetching .debs for wget" - assertCaptured "Installing wget_" -} - -testStackChange() { - loadFixture "Aptfile" - - #Set the cached STACK value to a non-existent stack, so it is guaranteed to change. - mkdir -p "$CACHE_DIR/.apt/" - echo "cedar-10" > "$CACHE_DIR/.apt/STACK" - - #Load the Aptfile into the cache, to exclusively test the stack changes - mkdir -p "$CACHE_DIR/apt/cache" - cp $BUILD_DIR/Aptfile "$CACHE_DIR/apt/cache" - - compile - - assertCapturedSuccess - - assertCaptured "Detected Aptfile or Stack changes, flushing cache" -} - -testStackNoChange() { - loadFixture "Aptfile" - - #Load the Aptfile into the cache, to exclusively test the stack changes - mkdir -p "$CACHE_DIR/apt/cache" - cp $BUILD_DIR/Aptfile "$CACHE_DIR/apt/cache" - - compile - - assertCaptured "Reusing cache" -} - -testStackCached() { - loadFixture "Aptfile" - - compile - assertCapturedSuccess - - assertTrue 'STACK not cached' "[ -e $CACHE_DIR/.apt/STACK ]" -} - -loadFixture() { - cp -a $BUILDPACK_HOME/test/fixtures/$1/. ${BUILD_DIR} -} \ No newline at end of file diff --git a/test/fixtures/Aptfile/Aptfile b/test/fixtures/Aptfile/Aptfile deleted file mode 100644 index b24b956..0000000 --- a/test/fixtures/Aptfile/Aptfile +++ /dev/null @@ -1,3 +0,0 @@ -# Test comment -s3cmd -wget \ No newline at end of file diff --git a/test/fixtures/custom-package-url-heroku-20/Aptfile b/test/fixtures/custom-package-url-heroku-20/Aptfile new file mode 100644 index 0000000..0a2a322 --- /dev/null +++ b/test/fixtures/custom-package-url-heroku-20/Aptfile @@ -0,0 +1 @@ +https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb diff --git a/test/fixtures/custom-package-url-heroku-22/Aptfile b/test/fixtures/custom-package-url-heroku-22/Aptfile new file mode 100644 index 0000000..3808ef9 --- /dev/null +++ b/test/fixtures/custom-package-url-heroku-22/Aptfile @@ -0,0 +1 @@ +https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-2/wkhtmltox_0.12.6.1-2.jammy_amd64.deb diff --git a/test/fixtures/custom-package-url-heroku-24/Aptfile b/test/fixtures/custom-package-url-heroku-24/Aptfile new file mode 100644 index 0000000..dc351b5 --- /dev/null +++ b/test/fixtures/custom-package-url-heroku-24/Aptfile @@ -0,0 +1,2 @@ +# no noble package for wkhtmltopdf yet, so using jammy package +https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-2/wkhtmltox_0.12.6.1-2.jammy_amd64.deb diff --git a/test/fixtures/custom-repository-heroku-20/Aptfile b/test/fixtures/custom-repository-heroku-20/Aptfile new file mode 100644 index 0000000..8136ade --- /dev/null +++ b/test/fixtures/custom-repository-heroku-20/Aptfile @@ -0,0 +1,2 @@ +:repo:deb http://us.archive.ubuntu.com/ubuntu/ focal multiverse +fasttracker2 diff --git a/test/fixtures/custom-repository-heroku-22/Aptfile b/test/fixtures/custom-repository-heroku-22/Aptfile new file mode 100644 index 0000000..e595b46 --- /dev/null +++ b/test/fixtures/custom-repository-heroku-22/Aptfile @@ -0,0 +1,2 @@ +:repo:deb http://us.archive.ubuntu.com/ubuntu/ jammy multiverse +fasttracker2 diff --git a/test/fixtures/custom-repository-heroku-24/Aptfile b/test/fixtures/custom-repository-heroku-24/Aptfile new file mode 100644 index 0000000..6f33d3e --- /dev/null +++ b/test/fixtures/custom-repository-heroku-24/Aptfile @@ -0,0 +1,2 @@ +:repo:deb http://us.archive.ubuntu.com/ubuntu/ noble multiverse +fasttracker2 diff --git a/test/fixtures/custom-repository-no-packages/Aptfile b/test/fixtures/custom-repository-no-packages/Aptfile new file mode 100644 index 0000000..003c0c6 --- /dev/null +++ b/test/fixtures/custom-repository-no-packages/Aptfile @@ -0,0 +1 @@ +:repo:deb http://us.archive.ubuntu.com/ubuntu/ jammy multiverse diff --git a/test/fixtures/empty/Aptfile b/test/fixtures/empty/Aptfile new file mode 100644 index 0000000..e69de29 diff --git a/test/fixtures/only-comments/Aptfile b/test/fixtures/only-comments/Aptfile new file mode 100644 index 0000000..349c538 --- /dev/null +++ b/test/fixtures/only-comments/Aptfile @@ -0,0 +1,4 @@ +# no packages + # only comments + +# and whitespace diff --git a/test/fixtures/package-names/Aptfile b/test/fixtures/package-names/Aptfile new file mode 100644 index 0000000..15fa750 --- /dev/null +++ b/test/fixtures/package-names/Aptfile @@ -0,0 +1,10 @@ +# single package +xmlsec1 + +# globbed package +mysql-client-* + +# multiple packages on single line +s3cmd wget + + # comment with bad indent diff --git a/test/run b/test/run new file mode 100755 index 0000000..243237e --- /dev/null +++ b/test/run @@ -0,0 +1,171 @@ +#!/usr/bin/env bash + +testCompilePackageNames() { + compile "package-names" + assertCaptured "Updating apt caches" + assertCaptured "Fetching .debs for xmlsec1" + assertCaptured "Fetching .debs for s3cmd wget" + assertCaptured "Fetching .debs for mysql-client-*" + assertCaptured "Installing xmlsec1" + assertCaptured "Installing s3cmd" + assertCaptured "Installing wget" + assertCaptured "Installing mysql-client" + assertCaptured "Installing mysql-client-core" + assertCaptured "Writing profile script" + assertCaptured "Rewrite package-config files" + assertCapturedSuccess +} + +testReportPackageNames() { + report "package-names" + assertCaptured "packages: \"mysql-client-*,s3cmd,wget,xmlsec1\"" + assertNotCaptured "custom_packages" + assertNotCaptured "custom_repositories" + assertCapturedSuccess +} + +testCompileCustomPackageUrl() { + declare -A download_urls=( + [heroku-20]="https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb" + [heroku-22]="https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-2/wkhtmltox_0.12.6.1-2.jammy_amd64.deb" + # no noble package for wkhtmltopdf yet, so using jammy package + [heroku-24]="https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-2/wkhtmltox_0.12.6.1-2.jammy_amd64.deb" + ) + compile "custom-package-url-$STACK" + assertCaptured "Updating apt caches" + assertCaptured "Fetching ${download_urls[$STACK]}" + assertCaptured "Installing wkhtmltox" + assertCaptured "Writing profile script" + assertCaptured "Rewrite package-config files" + assertCapturedSuccess +} + +testReportCustomPackageUrl() { + declare -A download_urls=( + [heroku-20]="https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb" + [heroku-22]="https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-2/wkhtmltox_0.12.6.1-2.jammy_amd64.deb" + # no noble package for wkhtmltopdf yet, so using jammy package + [heroku-24]="https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-2/wkhtmltox_0.12.6.1-2.jammy_amd64.deb" + ) + report "custom-package-url-$STACK" + assertNotCaptured "^packages" + assertCaptured "custom_packages: \"${download_urls[$STACK]}\"" + assertNotCaptured "custom_repositories" + assertCapturedSuccess +} + +testCompileCustomRepository() { + declare -A ubuntu_release_names=( + [heroku-20]="focal" + [heroku-22]="jammy" + [heroku-24]="noble" + ) + compile "custom-repository-$STACK" + assertCaptured "Adding custom repositories" + assertCaptured "Updating apt caches" + assertCaptured "http://us.archive.ubuntu.com/ubuntu ${ubuntu_release_names[$STACK]}/multiverse amd64 Packages" + assertCaptured "Fetching .debs for fasttracker2" + assertCaptured "Installing fasttracker2" + assertCaptured "Writing profile script" + assertCaptured "Rewrite package-config files" + assertCapturedSuccess +} + +testReportCustomRepository() { + declare -A ubuntu_release_names=( + [heroku-20]="focal" + [heroku-22]="jammy" + [heroku-24]="noble" + ) + report "custom-repository-$STACK" + assertCaptured "packages: \"fasttracker2\"" + assertNotCaptured "custom_packages" + assertCaptured "custom_repositories: \"deb http://us.archive.ubuntu.com/ubuntu/ ${ubuntu_release_names[$STACK]} multiverse\"" + assertCapturedSuccess +} + +testCompileEmpty() { + compile "empty" + assertCaptured "You have no packages listed in your Aptfile" + assertNotCaptured "Updating apt caches" + assertCapturedSuccess +} + +testReportEmpty() { + report "empty" + assertNotCaptured "^packages" + assertNotCaptured "custom_packages" + assertNotCaptured "custom_repositories" + assertCapturedSuccess +} + +testCompileOnlyComments() { + compile "only-comments" + assertCaptured "You have no packages listed in your Aptfile" + assertNotCaptured "Updating apt caches" + assertCapturedSuccess +} + +testReportOnlyComments() { + report "only-comments" + assertNotCaptured "^packages" + assertNotCaptured "custom_packages" + assertNotCaptured "custom_repositories" + assertCapturedSuccess +} + +testCompileCustomRepositoryNoPackages() { + compile "custom-repository-no-packages" + assertCaptured "You have no packages listed in your Aptfile" + assertNotCaptured "Updating apt caches" + assertCapturedSuccess +} + +testReportCustomRepositoryNoPackages() { + report "custom-repository-no-packages" + assertNotCaptured "^packages" + assertNotCaptured "custom_packages" + assertCaptured "custom_repositories: \"deb http://us.archive.ubuntu.com/ubuntu/ jammy multiverse\"" + assertCapturedSuccess +} + +pushd "$(dirname 0)" >/dev/null || exit 1 +popd >/dev/null || exit 1 + +source "$(pwd)"/test/utils + +compile() { + default_process_types_cleanup + bp_dir=$(mktmpdir) + compile_dir=$(mktmpdir) + cp -a "$(pwd)"/* "${bp_dir}" + cp -a "${bp_dir}"/test/fixtures/"$1"/. "${compile_dir}" + capture "${bp_dir}"/bin/compile "${compile_dir}" "${2:-$(mktmpdir)}" "$3" +} + +report() { + default_process_types_cleanup + compile_dir=${1:-$(mktmpdir)} + cache_dir=${2:-$(mktmpdir)} + env_dir=${3:-$(mktmpdir)} + bp_dir=$(mktmpdir) + cp -a "$(pwd)"/* "${bp_dir}" + cp -a "${bp_dir}"/test/fixtures/"$1"/. "${compile_dir}" + capture "${bp_dir}"/bin/report "${compile_dir}" "${cache_dir}" "${env_dir}" +} + +mktmpdir() { + dir=$(mktemp -t testXXXXX) + rm -rf "$dir" + mkdir "$dir" + echo "$dir" +} + +default_process_types_cleanup() { + file="/tmp/default_process_types" + if [ -f "$file" ]; then + rm "$file" + fi +} + +source "$(pwd)"/test/shunit2 diff --git a/test/shunit2 b/test/shunit2 new file mode 100644 index 0000000..6239683 --- /dev/null +++ b/test/shunit2 @@ -0,0 +1,1343 @@ +#! /bin/sh +# vim:et:ft=sh:sts=2:sw=2 +# +# Copyright 2008-2020 Kate Ward. All Rights Reserved. +# Released under the Apache 2.0 license. +# http://www.apache.org/licenses/LICENSE-2.0 +# +# shUnit2 -- Unit testing framework for Unix shell scripts. +# https://github.com/kward/shunit2 +# +# Author: kate.ward@forestent.com (Kate Ward) +# +# shUnit2 is a xUnit based unit test framework for Bourne shell scripts. It is +# based on the popular JUnit unit testing framework for Java. +# +# $() are not fully portable (POSIX != portable). +# shellcheck disable=SC2006 +# expr may be antiquated, but it is the only solution in some cases. +# shellcheck disable=SC2003 + +# Return if shunit2 already loaded. +command [ -n "${SHUNIT_VERSION:-}" ] && exit 0 +SHUNIT_VERSION='2.1.8' + +# Return values that scripts can use. +SHUNIT_TRUE=0 +SHUNIT_FALSE=1 +SHUNIT_ERROR=2 + +# Logging functions. +_shunit_warn() { + ${__SHUNIT_CMD_ECHO_ESC} \ + "${__shunit_ansi_yellow}shunit2:WARN${__shunit_ansi_none} $*" >&2 +} +_shunit_error() { + ${__SHUNIT_CMD_ECHO_ESC} \ + "${__shunit_ansi_red}shunit2:ERROR${__shunit_ansi_none} $*" >&2 +} +_shunit_fatal() { + ${__SHUNIT_CMD_ECHO_ESC} \ + "${__shunit_ansi_red}shunit2:FATAL${__shunit_ansi_none} $*" >&2 + exit ${SHUNIT_ERROR} +} + +# Determine some reasonable command defaults. +__SHUNIT_CMD_ECHO_ESC='echo -e' +# shellcheck disable=SC2039 +command [ "`echo -e test`" = '-e test' ] && __SHUNIT_CMD_ECHO_ESC='echo' + +__SHUNIT_UNAME_S=`uname -s` +case "${__SHUNIT_UNAME_S}" in + BSD) __SHUNIT_CMD_EXPR='gexpr' ;; + *) __SHUNIT_CMD_EXPR='expr' ;; +esac +__SHUNIT_CMD_TPUT='tput' + +# Commands a user can override if needed. +SHUNIT_CMD_EXPR=${SHUNIT_CMD_EXPR:-${__SHUNIT_CMD_EXPR}} +SHUNIT_CMD_TPUT=${SHUNIT_CMD_TPUT:-${__SHUNIT_CMD_TPUT}} + +# Enable color output. Options are 'never', 'always', or 'auto'. +SHUNIT_COLOR=${SHUNIT_COLOR:-auto} + +# Specific shell checks. +if command [ -n "${ZSH_VERSION:-}" ]; then + setopt |grep "^shwordsplit$" >/dev/null + if command [ $? -ne ${SHUNIT_TRUE} ]; then + _shunit_fatal 'zsh shwordsplit option is required for proper operation' + fi + if command [ -z "${SHUNIT_PARENT:-}" ]; then + _shunit_fatal "zsh does not pass \$0 through properly. please declare \ +\"SHUNIT_PARENT=\$0\" before calling shUnit2" + fi +fi + +# +# Constants +# + +__SHUNIT_MODE_SOURCED='sourced' +__SHUNIT_MODE_STANDALONE='standalone' +__SHUNIT_PARENT=${SHUNIT_PARENT:-$0} + +# User provided test prefix to display in front of the name of the test being +# executed. Define by setting the SHUNIT_TEST_PREFIX variable. +__SHUNIT_TEST_PREFIX=${SHUNIT_TEST_PREFIX:-} + +# ANSI colors. +__SHUNIT_ANSI_NONE='\033[0m' +__SHUNIT_ANSI_RED='\033[1;31m' +__SHUNIT_ANSI_GREEN='\033[1;32m' +__SHUNIT_ANSI_YELLOW='\033[1;33m' +__SHUNIT_ANSI_CYAN='\033[1;36m' + +# Set the constants readonly. +__shunit_constants=`set |grep '^__SHUNIT_' |cut -d= -f1` +echo "${__shunit_constants}" |grep '^Binary file' >/dev/null && \ + __shunit_constants=`set |grep -a '^__SHUNIT_' |cut -d= -f1` +for __shunit_const in ${__shunit_constants}; do + if command [ -z "${ZSH_VERSION:-}" ]; then + readonly "${__shunit_const}" + else + case ${ZSH_VERSION} in + [123].*) readonly "${__shunit_const}" ;; + *) readonly -g "${__shunit_const}" # Declare readonly constants globally. + esac + fi +done +unset __shunit_const __shunit_constants + +# +# Internal variables. +# + +# Variables. +__shunit_lineno='' # Line number of executed test. +__shunit_mode=${__SHUNIT_MODE_SOURCED} # Operating mode. +__shunit_reportGenerated=${SHUNIT_FALSE} # Is report generated. +__shunit_script='' # Filename of unittest script (standalone mode). +__shunit_skip=${SHUNIT_FALSE} # Is skipping enabled. +__shunit_suite='' # Suite of tests to execute. +__shunit_clean=${SHUNIT_FALSE} # _shunit_cleanup() was already called. + +# ANSI colors (populated by _shunit_configureColor()). +__shunit_ansi_none='' +__shunit_ansi_red='' +__shunit_ansi_green='' +__shunit_ansi_yellow='' +__shunit_ansi_cyan='' + +# Counts of tests. +__shunit_testSuccess=${SHUNIT_TRUE} +__shunit_testsTotal=0 +__shunit_testsPassed=0 +__shunit_testsFailed=0 + +# Counts of asserts. +__shunit_assertsTotal=0 +__shunit_assertsPassed=0 +__shunit_assertsFailed=0 +__shunit_assertsSkipped=0 + +# +# Macros. +# + +# shellcheck disable=SC2016,SC2089 +_SHUNIT_LINENO_='eval __shunit_lineno=""; if command [ "${1:-}" = "--lineno" ]; then command [ -n "$2" ] && __shunit_lineno="[$2] "; shift 2; fi' + +#----------------------------------------------------------------------------- +# Assertion functions. +# + +# Assert that two values are equal to one another. +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertEquals() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertEquals() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_expected_=$1 + shunit_actual_=$2 + + shunit_return=${SHUNIT_TRUE} + if command [ "${shunit_expected_}" = "${shunit_actual_}" ]; then + _shunit_assertPass + else + failNotEquals "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}" + shunit_return=${SHUNIT_FALSE} + fi + + unset shunit_message_ shunit_expected_ shunit_actual_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_EQUALS_='eval assertEquals --lineno "${LINENO:-}"' + +# Assert that two values are not equal to one another. +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNotEquals() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertNotEquals() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_expected_=$1 + shunit_actual_=$2 + + shunit_return=${SHUNIT_TRUE} + if command [ "${shunit_expected_}" != "${shunit_actual_}" ]; then + _shunit_assertPass + else + failSame "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}" + shunit_return=${SHUNIT_FALSE} + fi + + unset shunit_message_ shunit_expected_ shunit_actual_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NOT_EQUALS_='eval assertNotEquals --lineno "${LINENO:-}"' + +# Assert that a container contains a content. +# +# Args: +# message: string: failure message [optional] +# container: string: container to analyze +# content: string: content to find +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertContains() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertContains() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_container_=$1 + shunit_content_=$2 + + shunit_return=${SHUNIT_TRUE} + if echo "$shunit_container_" | grep -F -- "$shunit_content_" > /dev/null; then + _shunit_assertPass + else + failNotFound "${shunit_message_}" "${shunit_content_}" + shunit_return=${SHUNIT_FALSE} + fi + + unset shunit_message_ shunit_container_ shunit_content_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_CONTAINS_='eval assertContains --lineno "${LINENO:-}"' + +# Assert that a container does not contain a content. +# +# Args: +# message: string: failure message [optional] +# container: string: container to analyze +# content: string: content to look for +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNotContains() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertNotContains() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_container_=$1 + shunit_content_=$2 + + shunit_return=${SHUNIT_TRUE} + if echo "$shunit_container_" | grep -F -- "$shunit_content_" > /dev/null; then + failFound "${shunit_message_}" "${shunit_content_}" + shunit_return=${SHUNIT_FALSE} + else + _shunit_assertPass + fi + + unset shunit_message_ shunit_container_ shunit_content_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NOT_CONTAINS_='eval assertNotContains --lineno "${LINENO:-}"' + +# Assert that a value is null (i.e. an empty string) +# +# Args: +# message: string: failure message [optional] +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNull() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "assertNull() requires one or two arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + assertTrue "${shunit_message_}" "[ -z '$1' ]" + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NULL_='eval assertNull --lineno "${LINENO:-}"' + +# Assert that a value is not null (i.e. a non-empty string) +# +# Args: +# message: string: failure message [optional] +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNotNull() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -gt 2 ]; then # allowing 0 arguments as $1 might actually be null + _shunit_error "assertNotNull() requires one or two arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_actual_=`_shunit_escapeCharactersInString "${1:-}"` + test -n "${shunit_actual_}" + assertTrue "${shunit_message_}" $? + shunit_return=$? + + unset shunit_actual_ shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NOT_NULL_='eval assertNotNull --lineno "${LINENO:-}"' + +# Assert that two values are the same (i.e. equal to one another). +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertSame() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertSame() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + assertEquals "${shunit_message_}" "$1" "$2" + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_SAME_='eval assertSame --lineno "${LINENO:-}"' + +# Assert that two values are not the same (i.e. not equal to one another). +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertNotSame() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "assertNotSame() requires two or three arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_:-}$1" + shift + fi + assertNotEquals "${shunit_message_}" "$1" "$2" + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_NOT_SAME_='eval assertNotSame --lineno "${LINENO:-}"' + +# Assert that a value or shell test condition is true. +# +# In shell, a value of 0 is true and a non-zero value is false. Any integer +# value passed can thereby be tested. +# +# Shell supports much more complicated tests though, and a means to support +# them was needed. As such, this function tests that conditions are true or +# false through evaluation rather than just looking for a true or false. +# +# The following test will succeed: +# assertTrue 0 +# assertTrue "[ 34 -gt 23 ]" +# The following test will fail with a message: +# assertTrue 123 +# assertTrue "test failed" "[ -r '/non/existent/file' ]" +# +# Args: +# message: string: failure message [optional] +# condition: string: integer value or shell conditional statement +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertTrue() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "assertTrue() takes one or two arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_condition_=$1 + + # See if condition is an integer, i.e. a return value. + shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'` + shunit_return=${SHUNIT_TRUE} + if command [ -z "${shunit_condition_}" ]; then + # Null condition. + shunit_return=${SHUNIT_FALSE} + elif command [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ] + then + # Possible return value. Treating 0 as true, and non-zero as false. + command [ "${shunit_condition_}" -ne 0 ] && shunit_return=${SHUNIT_FALSE} + else + # Hopefully... a condition. + ( eval "${shunit_condition_}" ) >/dev/null 2>&1 + command [ $? -ne 0 ] && shunit_return=${SHUNIT_FALSE} + fi + + # Record the test. + if command [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then + _shunit_assertPass + else + _shunit_assertFail "${shunit_message_}" + fi + + unset shunit_message_ shunit_condition_ shunit_match_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_TRUE_='eval assertTrue --lineno "${LINENO:-}"' + +# Assert that a value or shell test condition is false. +# +# In shell, a value of 0 is true and a non-zero value is false. Any integer +# value passed can thereby be tested. +# +# Shell supports much more complicated tests though, and a means to support +# them was needed. As such, this function tests that conditions are true or +# false through evaluation rather than just looking for a true or false. +# +# The following test will succeed: +# assertFalse 1 +# assertFalse "[ 'apples' = 'oranges' ]" +# The following test will fail with a message: +# assertFalse 0 +# assertFalse "test failed" "[ 1 -eq 1 -a 2 -eq 2 ]" +# +# Args: +# message: string: failure message [optional] +# condition: string: integer value or shell conditional statement +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +assertFalse() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "assertFalse() requires one or two arguments; $# given" + _shunit_assertFail + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_condition_=$1 + + # See if condition is an integer, i.e. a return value. + shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'` + shunit_return=${SHUNIT_TRUE} + if command [ -z "${shunit_condition_}" ]; then + # Null condition. + shunit_return=${SHUNIT_FALSE} + elif command [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ] + then + # Possible return value. Treating 0 as true, and non-zero as false. + command [ "${shunit_condition_}" -eq 0 ] && shunit_return=${SHUNIT_FALSE} + else + # Hopefully... a condition. + ( eval "${shunit_condition_}" ) >/dev/null 2>&1 + command [ $? -eq 0 ] && shunit_return=${SHUNIT_FALSE} + fi + + # Record the test. + if command [ "${shunit_return}" -eq "${SHUNIT_TRUE}" ]; then + _shunit_assertPass + else + _shunit_assertFail "${shunit_message_}" + fi + + unset shunit_message_ shunit_condition_ shunit_match_ + return "${shunit_return}" +} +# shellcheck disable=SC2016,SC2034 +_ASSERT_FALSE_='eval assertFalse --lineno "${LINENO:-}"' + +#----------------------------------------------------------------------------- +# Failure functions. +# + +# Records a test failure. +# +# Args: +# message: string: failure message [optional] +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +fail() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -gt 1 ]; then + _shunit_error "fail() requires zero or one arguments; $# given" + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 1 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + + _shunit_assertFail "${shunit_message_}" + + unset shunit_message_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_='eval fail --lineno "${LINENO:-}"' + +# Records a test failure, stating two values were not equal. +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failNotEquals() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "failNotEquals() requires one or two arguments; $# given" + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_expected_=$1 + shunit_actual_=$2 + + shunit_message_=${shunit_message_%% } + _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected:<${shunit_expected_}> but was:<${shunit_actual_}>" + + unset shunit_message_ shunit_expected_ shunit_actual_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_NOT_EQUALS_='eval failNotEquals --lineno "${LINENO:-}"' + +# Records a test failure, stating a value was found. +# +# Args: +# message: string: failure message [optional] +# content: string: found value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failFound() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "failFound() requires one or two arguments; $# given" + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + + shunit_message_=${shunit_message_%% } + _shunit_assertFail "${shunit_message_:+${shunit_message_} }Found" + + unset shunit_message_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_FOUND_='eval failFound --lineno "${LINENO:-}"' + +# Records a test failure, stating a content was not found. +# +# Args: +# message: string: failure message [optional] +# content: string: content not found +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failNotFound() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 1 -o $# -gt 2 ]; then + _shunit_error "failNotFound() requires one or two arguments; $# given" + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 2 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + shunit_content_=$1 + + shunit_message_=${shunit_message_%% } + _shunit_assertFail "${shunit_message_:+${shunit_message_} }Not found:<${shunit_content_}>" + + unset shunit_message_ shunit_content_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_NOT_FOUND_='eval failNotFound --lineno "${LINENO:-}"' + +# Records a test failure, stating two values should have been the same. +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failSame() +{ + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "failSame() requires two or three arguments; $# given" + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + + shunit_message_=${shunit_message_%% } + _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected not same" + + unset shunit_message_ + return ${SHUNIT_FALSE} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_SAME_='eval failSame --lineno "${LINENO:-}"' + +# Records a test failure, stating two values were not equal. +# +# This is functionally equivalent to calling failNotEquals(). +# +# Args: +# message: string: failure message [optional] +# expected: string: expected value +# actual: string: actual value +# Returns: +# integer: success (TRUE/FALSE/ERROR constant) +failNotSame() { + # shellcheck disable=SC2090 + ${_SHUNIT_LINENO_} + if command [ $# -lt 2 -o $# -gt 3 ]; then + _shunit_error "failNotSame() requires one or two arguments; $# given" + return ${SHUNIT_ERROR} + fi + _shunit_shouldSkip && return ${SHUNIT_TRUE} + + shunit_message_=${__shunit_lineno} + if command [ $# -eq 3 ]; then + shunit_message_="${shunit_message_}$1" + shift + fi + failNotEquals "${shunit_message_}" "$1" "$2" + shunit_return=$? + + unset shunit_message_ + return ${shunit_return} +} +# shellcheck disable=SC2016,SC2034 +_FAIL_NOT_SAME_='eval failNotSame --lineno "${LINENO:-}"' + +#----------------------------------------------------------------------------- +# Skipping functions. +# + +# Force remaining assert and fail functions to be "skipped". +# +# This function forces the remaining assert and fail functions to be "skipped", +# i.e. they will have no effect. Each function skipped will be recorded so that +# the total of asserts and fails will not be altered. +# +# Args: +# None +startSkipping() { __shunit_skip=${SHUNIT_TRUE}; } + +# Resume the normal recording behavior of assert and fail calls. +# +# Args: +# None +endSkipping() { __shunit_skip=${SHUNIT_FALSE}; } + +# Returns the state of assert and fail call skipping. +# +# Args: +# None +# Returns: +# boolean: (TRUE/FALSE constant) +isSkipping() { return ${__shunit_skip}; } + +#----------------------------------------------------------------------------- +# Suite functions. +# + +# Stub. This function should contains all unit test calls to be made. +# +# DEPRECATED (as of 2.1.0) +# +# This function can be optionally overridden by the user in their test suite. +# +# If this function exists, it will be called when shunit2 is sourced. If it +# does not exist, shunit2 will search the parent script for all functions +# beginning with the word 'test', and they will be added dynamically to the +# test suite. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#suite() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +# Adds a function name to the list of tests schedule for execution. +# +# This function should only be called from within the suite() function. +# +# Args: +# function: string: name of a function to add to current unit test suite +suite_addTest() { + shunit_func_=${1:-} + + __shunit_suite="${__shunit_suite:+${__shunit_suite} }${shunit_func_}" + __shunit_testsTotal=`expr ${__shunit_testsTotal} + 1` + + unset shunit_func_ +} + +# Stub. This function will be called once before any tests are run. +# +# Common one-time environment preparation tasks shared by all tests can be +# defined here. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#oneTimeSetUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +# Stub. This function will be called once after all tests are finished. +# +# Common one-time environment cleanup tasks shared by all tests can be defined +# here. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#oneTimeTearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +# Stub. This function will be called before each test is run. +# +# Common environment preparation tasks shared by all tests can be defined here. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#setUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +# Note: see _shunit_mktempFunc() for actual implementation +# Stub. This function will be called after each test is run. +# +# Common environment cleanup tasks shared by all tests can be defined here. +# +# This function should be overridden by the user in their unit test suite. +# Note: see _shunit_mktempFunc() for actual implementation +# +# Args: +# None +#tearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION + +#------------------------------------------------------------------------------ +# Internal shUnit2 functions. +# + +# Create a temporary directory to store various run-time files in. +# +# This function is a cross-platform temporary directory creation tool. Not all +# OSes have the `mktemp` function, so one is included here. +# +# Args: +# None +# Outputs: +# string: the temporary directory that was created +_shunit_mktempDir() { + # Try the standard `mktemp` function. + ( exec mktemp -dqt shunit.XXXXXX 2>/dev/null ) && return + + # The standard `mktemp` didn't work. Use our own. + # shellcheck disable=SC2039 + if command [ -r '/dev/urandom' -a -x '/usr/bin/od' ]; then + _shunit_random_=`/usr/bin/od -vAn -N4 -tx4 "${_shunit_file_}" +#! /bin/sh +exit ${SHUNIT_TRUE} +EOF + command chmod +x "${_shunit_file_}" + done + + unset _shunit_file_ +} + +# Final cleanup function to leave things as we found them. +# +# Besides removing the temporary directory, this function is in charge of the +# final exit code of the unit test. The exit code is based on how the script +# was ended (e.g. normal exit, or via Ctrl-C). +# +# Args: +# name: string: name of the trap called (specified when trap defined) +_shunit_cleanup() { + _shunit_name_=$1 + + case "${_shunit_name_}" in + EXIT) ;; + INT) _shunit_signal_=130 ;; # 2+128 + TERM) _shunit_signal_=143 ;; # 15+128 + *) + _shunit_error "unrecognized trap value (${_shunit_name_})" + _shunit_signal_=0 + ;; + esac + if command [ "${_shunit_name_}" != 'EXIT' ]; then + _shunit_warn "trapped and now handling the (${_shunit_name_}) signal" + fi + + # Do our work. + if command [ ${__shunit_clean} -eq ${SHUNIT_FALSE} ]; then + # Ensure tear downs are only called once. + __shunit_clean=${SHUNIT_TRUE} + + tearDown + command [ $? -eq ${SHUNIT_TRUE} ] \ + || _shunit_warn "tearDown() returned non-zero return code." + oneTimeTearDown + command [ $? -eq ${SHUNIT_TRUE} ] \ + || _shunit_warn "oneTimeTearDown() returned non-zero return code." + + command rm -fr "${__shunit_tmpDir}" + fi + + if command [ "${_shunit_name_}" != 'EXIT' ]; then + # Handle all non-EXIT signals. + trap - 0 # Disable EXIT trap. + exit ${_shunit_signal_} + elif command [ ${__shunit_reportGenerated} -eq ${SHUNIT_FALSE} ]; then + _shunit_assertFail 'unknown failure encountered running a test' + _shunit_generateReport + exit ${SHUNIT_ERROR} + fi + + unset _shunit_name_ _shunit_signal_ +} + +# configureColor based on user color preference. +# +# Args: +# color: string: color mode (one of `always`, `auto`, or `none`). +_shunit_configureColor() { + _shunit_color_=${SHUNIT_FALSE} # By default, no color. + case $1 in + 'always') _shunit_color_=${SHUNIT_TRUE} ;; + 'auto') + command [ "`_shunit_colors`" -ge 8 ] && _shunit_color_=${SHUNIT_TRUE} + ;; + 'none') ;; + *) _shunit_fatal "unrecognized color option '$1'" ;; + esac + + case ${_shunit_color_} in + ${SHUNIT_TRUE}) + __shunit_ansi_none=${__SHUNIT_ANSI_NONE} + __shunit_ansi_red=${__SHUNIT_ANSI_RED} + __shunit_ansi_green=${__SHUNIT_ANSI_GREEN} + __shunit_ansi_yellow=${__SHUNIT_ANSI_YELLOW} + __shunit_ansi_cyan=${__SHUNIT_ANSI_CYAN} + ;; + ${SHUNIT_FALSE}) + __shunit_ansi_none='' + __shunit_ansi_red='' + __shunit_ansi_green='' + __shunit_ansi_yellow='' + __shunit_ansi_cyan='' + ;; + esac + + unset _shunit_color_ _shunit_tput_ +} + +# colors returns the number of supported colors for the TERM. +_shunit_colors() { + _shunit_tput_=`${SHUNIT_CMD_TPUT} colors 2>/dev/null` + if command [ $? -eq 0 ]; then + echo "${_shunit_tput_}" + else + echo 16 + fi + unset _shunit_tput_ +} + +# The actual running of the tests happens here. +# +# Args: +# None +_shunit_execSuite() { + for _shunit_test_ in ${__shunit_suite}; do + __shunit_testSuccess=${SHUNIT_TRUE} + + # Disable skipping. + endSkipping + + # Execute the per-test setup function. + setUp + command [ $? -eq ${SHUNIT_TRUE} ] \ + || _shunit_fatal "setup() returned non-zero return code." + + # Execute the test. + echo "${__SHUNIT_TEST_PREFIX}${_shunit_test_}" + eval "${_shunit_test_}" + if command [ $? -ne ${SHUNIT_TRUE} ]; then + _shunit_error "${_shunit_test_}() returned non-zero return code." + __shunit_testSuccess=${SHUNIT_ERROR} + _shunit_incFailedCount + fi + + # Execute the per-test tear-down function. + tearDown + command [ $? -eq ${SHUNIT_TRUE} ] \ + || _shunit_fatal "tearDown() returned non-zero return code." + + # Update stats. + if command [ ${__shunit_testSuccess} -eq ${SHUNIT_TRUE} ]; then + __shunit_testsPassed=`expr ${__shunit_testsPassed} + 1` + else + __shunit_testsFailed=`expr ${__shunit_testsFailed} + 1` + fi + done + + unset _shunit_test_ +} + +# Generates the user friendly report with appropriate OK/FAILED message. +# +# Args: +# None +# Output: +# string: the report of successful and failed tests, as well as totals. +_shunit_generateReport() { + command [ "${__shunit_reportGenerated}" -eq ${SHUNIT_TRUE} ] && return + + _shunit_ok_=${SHUNIT_TRUE} + + # If no exit code was provided, determine an appropriate one. + command [ "${__shunit_testsFailed}" -gt 0 \ + -o ${__shunit_testSuccess} -eq ${SHUNIT_FALSE} ] \ + && _shunit_ok_=${SHUNIT_FALSE} + + echo + _shunit_msg_="Ran ${__shunit_ansi_cyan}${__shunit_testsTotal}${__shunit_ansi_none}" + if command [ "${__shunit_testsTotal}" -eq 1 ]; then + ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_} test." + else + ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_} tests." + fi + + if command [ ${_shunit_ok_} -eq ${SHUNIT_TRUE} ]; then + _shunit_msg_="${__shunit_ansi_green}OK${__shunit_ansi_none}" + command [ "${__shunit_assertsSkipped}" -gt 0 ] \ + && _shunit_msg_="${_shunit_msg_} (${__shunit_ansi_yellow}skipped=${__shunit_assertsSkipped}${__shunit_ansi_none})" + else + _shunit_msg_="${__shunit_ansi_red}FAILED${__shunit_ansi_none}" + _shunit_msg_="${_shunit_msg_} (${__shunit_ansi_red}failures=${__shunit_assertsFailed}${__shunit_ansi_none}" + command [ "${__shunit_assertsSkipped}" -gt 0 ] \ + && _shunit_msg_="${_shunit_msg_},${__shunit_ansi_yellow}skipped=${__shunit_assertsSkipped}${__shunit_ansi_none}" + _shunit_msg_="${_shunit_msg_})" + fi + + echo + ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_}" + __shunit_reportGenerated=${SHUNIT_TRUE} + + unset _shunit_msg_ _shunit_ok_ +} + +# Test for whether a function should be skipped. +# +# Args: +# None +# Returns: +# boolean: whether the test should be skipped (TRUE/FALSE constant) +_shunit_shouldSkip() { + command [ ${__shunit_skip} -eq ${SHUNIT_FALSE} ] && return ${SHUNIT_FALSE} + _shunit_assertSkip +} + +# Records a successful test. +# +# Args: +# None +_shunit_assertPass() { + __shunit_assertsPassed=`expr ${__shunit_assertsPassed} + 1` + __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1` +} + +# Records a test failure. +# +# Args: +# message: string: failure message to provide user +_shunit_assertFail() { + __shunit_testSuccess=${SHUNIT_FALSE} + _shunit_incFailedCount + + \[ $# -gt 0 ] && ${__SHUNIT_CMD_ECHO_ESC} \ + "${__shunit_ansi_red}ASSERT:${__shunit_ansi_none}$*" +} + +# Increment the count of failed asserts. +# +# Args: +# none +_shunit_incFailedCount() { + __shunit_assertsFailed=`expr "${__shunit_assertsFailed}" + 1` + __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1` +} + + +# Records a skipped test. +# +# Args: +# None +_shunit_assertSkip() { + __shunit_assertsSkipped=`expr "${__shunit_assertsSkipped}" + 1` + __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1` +} + +# Prepare a script filename for sourcing. +# +# Args: +# script: string: path to a script to source +# Returns: +# string: filename prefixed with ./ (if necessary) +_shunit_prepForSourcing() { + _shunit_script_=$1 + case "${_shunit_script_}" in + /*|./*) echo "${_shunit_script_}" ;; + *) echo "./${_shunit_script_}" ;; + esac + unset _shunit_script_ +} + +# Escape a character in a string. +# +# Args: +# c: string: unescaped character +# s: string: to escape character in +# Returns: +# string: with escaped character(s) +_shunit_escapeCharInStr() { + command [ -n "$2" ] || return # No point in doing work on an empty string. + + # Note: using shorter variable names to prevent conflicts with + # _shunit_escapeCharactersInString(). + _shunit_c_=$1 + _shunit_s_=$2 + + # Escape the character. + # shellcheck disable=SC1003,SC2086 + echo ''${_shunit_s_}'' |command sed 's/\'${_shunit_c_}'/\\\'${_shunit_c_}'/g' + + unset _shunit_c_ _shunit_s_ +} + +# Escape a character in a string. +# +# Args: +# str: string: to escape characters in +# Returns: +# string: with escaped character(s) +_shunit_escapeCharactersInString() { + command [ -n "$1" ] || return # No point in doing work on an empty string. + + _shunit_str_=$1 + + # Note: using longer variable names to prevent conflicts with + # _shunit_escapeCharInStr(). + for _shunit_char_ in '"' '$' "'" '`'; do + _shunit_str_=`_shunit_escapeCharInStr "${_shunit_char_}" "${_shunit_str_}"` + done + + echo "${_shunit_str_}" + unset _shunit_char_ _shunit_str_ +} + +# Extract list of functions to run tests against. +# +# Args: +# script: string: name of script to extract functions from +# Returns: +# string: of function names +_shunit_extractTestFunctions() { + _shunit_script_=$1 + + # Extract the lines with test function names, strip of anything besides the + # function name, and output everything on a single line. + _shunit_regex_='^\s*((function test[A-Za-z0-9_-]*)|(test[A-Za-z0-9_-]* *\(\)))' + # shellcheck disable=SC2196 + egrep "${_shunit_regex_}" "${_shunit_script_}" \ + |command sed 's/^[^A-Za-z0-9_-]*//;s/^function //;s/\([A-Za-z0-9_-]*\).*/\1/g' \ + |xargs + + unset _shunit_regex_ _shunit_script_ +} + +#------------------------------------------------------------------------------ +# Main. +# + +# Determine the operating mode. +if command [ $# -eq 0 -o "${1:-}" = '--' ]; then + __shunit_script=${__SHUNIT_PARENT} + __shunit_mode=${__SHUNIT_MODE_SOURCED} +else + __shunit_script=$1 + command [ -r "${__shunit_script}" ] || \ + _shunit_fatal "unable to read from ${__shunit_script}" + __shunit_mode=${__SHUNIT_MODE_STANDALONE} +fi + +# Create a temporary storage location. +__shunit_tmpDir=`_shunit_mktempDir` + +# Provide a public temporary directory for unit test scripts. +# TODO(kward): document this. +SHUNIT_TMPDIR="${__shunit_tmpDir}/tmp" +command mkdir "${SHUNIT_TMPDIR}" + +# Setup traps to clean up after ourselves. +trap '_shunit_cleanup EXIT' 0 +trap '_shunit_cleanup INT' 2 +trap '_shunit_cleanup TERM' 15 + +# Create phantom functions to work around issues with Cygwin. +_shunit_mktempFunc +PATH="${__shunit_tmpDir}:${PATH}" + +# Make sure phantom functions are executable. This will bite if `/tmp` (or the +# current `$TMPDIR`) points to a path on a partition that was mounted with the +# 'noexec' option. The noexec command was created with `_shunit_mktempFunc()`. +noexec 2>/dev/null || _shunit_fatal \ + 'Please declare TMPDIR with path on partition with exec permission.' + +# We must manually source the tests in standalone mode. +if command [ "${__shunit_mode}" = "${__SHUNIT_MODE_STANDALONE}" ]; then + # shellcheck disable=SC1090 + command . "`_shunit_prepForSourcing \"${__shunit_script}\"`" +fi + +# Configure default output coloring behavior. +_shunit_configureColor "${SHUNIT_COLOR}" + +# Execute the oneTimeSetUp function (if it exists). +oneTimeSetUp +command [ $? -eq ${SHUNIT_TRUE} ] \ + || _shunit_fatal "oneTimeSetUp() returned non-zero return code." + +# Command line selected tests or suite selected tests +if command [ "$#" -ge 2 ]; then + # Argument $1 is either the filename of tests or '--'; either way, skip it. + shift + # Remaining arguments ($2 .. $#) are assumed to be test function names. + # Interate through all remaining args in "$@" in a POSIX (likely portable) way. + # Helpful tip: https://unix.stackexchange.com/questions/314032/how-to-use-arguments-like-1-2-in-a-for-loop + for _shunit_arg_ do + suite_addTest "${_shunit_arg_}" + done + unset _shunit_arg_ +else + # Execute the suite function defined in the parent test script. + # DEPRECATED as of 2.1.0. + suite +fi + +# If no tests or suite specified, dynamically build a list of functions. +if command [ -z "${__shunit_suite}" ]; then + shunit_funcs_=`_shunit_extractTestFunctions "${__shunit_script}"` + for shunit_func_ in ${shunit_funcs_}; do + suite_addTest "${shunit_func_}" + done +fi +unset shunit_func_ shunit_funcs_ + +# Execute the suite of unit tests. +_shunit_execSuite + +# Execute the oneTimeTearDown function (if it exists). +oneTimeTearDown +command [ $? -eq ${SHUNIT_TRUE} ] \ + || _shunit_fatal "oneTimeTearDown() returned non-zero return code." + +# Generate a report summary. +_shunit_generateReport + +# That's it folks. +command [ "${__shunit_testsFailed}" -eq 0 ] +exit $? diff --git a/test/utils b/test/utils new file mode 100644 index 0000000..966bc2b --- /dev/null +++ b/test/utils @@ -0,0 +1,212 @@ +#!/bin/sh + +# taken from +# https://github.com/ryanbrainard/heroku-buildpack-testrunner/blob/master/lib/test_utils.sh + +oneTimeSetUp() +{ + TEST_SUITE_CACHE="$(mktemp -d ${SHUNIT_TMPDIR}/test_suite_cache.XXXX)" +} + +oneTimeTearDown() +{ + rm -rf ${TEST_SUITE_CACHE} +} + +setUp() +{ + OUTPUT_DIR="$(mktemp -d ${SHUNIT_TMPDIR}/output.XXXX)" + STD_OUT="${OUTPUT_DIR}/stdout" + STD_ERR="${OUTPUT_DIR}/stderr" + BUILD_DIR="${OUTPUT_DIR}/build" + CACHE_DIR="${OUTPUT_DIR}/cache" + mkdir -p ${OUTPUT_DIR} + mkdir -p ${BUILD_DIR} + mkdir -p ${CACHE_DIR} +} + +tearDown() +{ + rm -rf ${OUTPUT_DIR} +} + +capture() +{ + resetCapture + + LAST_COMMAND="$@" + + "$@" >${STD_OUT} 2>${STD_ERR} + RETURN=$? + rtrn=${RETURN} # deprecated +} + +resetCapture() +{ + if [ -f ${STD_OUT} ]; then + rm ${STD_OUT} + fi + + if [ -f ${STD_ERR} ]; then + rm ${STD_ERR} + fi + + unset LAST_COMMAND + unset RETURN + unset rtrn # deprecated +} + +detect() +{ + capture ${BUILDPACK_HOME}/bin/detect ${BUILD_DIR} +} + +compile() +{ + capture ${BUILDPACK_HOME}/bin/compile ${BUILD_DIR} ${CACHE_DIR} +} + +release() +{ + capture ${BUILDPACK_HOME}/bin/release ${BUILD_DIR} +} + +assertCapturedEquals() +{ + assertEquals "$@" "$(cat ${STD_OUT})" +} + +assertCapturedNotEquals() +{ + assertNotEquals "$@" "$(cat ${STD_OUT})" +} + +assertCaptured() +{ + assertFileContains "$@" "${STD_OUT}" +} + +assertNotCaptured() +{ + assertFileNotContains "$@" "${STD_OUT}" +} + +assertCapturedSuccess() +{ + assertEquals "Expected captured exit code to be 0; was <${RETURN}>" "0" "${RETURN}" + assertEquals "Expected STD_ERR to be empty; was <$(cat ${STD_ERR})>" "" "$(cat ${STD_ERR})" +} + +# assertCapturedError [[expectedErrorCode] expectedErrorMsg] +assertCapturedError() +{ + if [ $# -gt 1 ]; then + local expectedErrorCode=${1} + shift + fi + + local expectedErrorMsg=${1:-""} + + if [ -z ${expectedErrorCode} ]; then + assertTrue "Expected captured exit code to be greater than 0; was <${RETURN}>" "[ ${RETURN} -gt 0 ]" + else + assertTrue "Expected captured exit code to be <${expectedErrorCode}>; was <${RETURN}>" "[ ${RETURN} -eq ${expectedErrorCode} ]" + fi + + if [ "${expectedErrorMsg}" != "" ]; then + assertFileContains "Expected STD_ERR to contain error <${expectedErrorMsg}>" "${expectedErrorMsg}" "${STD_ERR}" + fi +} + +_assertContains() +{ + if [ 5 -eq $# ]; then + local msg=$1 + shift + elif [ ! 4 -eq $# ]; then + fail "Expected 4 or 5 parameters; Receieved $# parameters" + fi + + local needle=$1 + local haystack=$2 + local expectation=$3 + local haystack_type=$4 + + case "${haystack_type}" in + "file") grep -q -F -e "${needle}" ${haystack} ;; + "text") echo "${haystack}" | grep -q -F -e "${needle}" ;; + esac + + if [ "${expectation}" != "$?" ]; then + case "${expectation}" in + 0) default_msg="Expected <${haystack}> to contain <${needle}>" ;; + 1) default_msg="Did not expect <${haystack}> to contain <${needle}>" ;; + esac + + fail "${msg:-${default_msg}}" + fi +} + +assertFileContains() +{ + _assertContains "$@" 0 "file" +} + +assertFileNotContains() +{ + _assertContains "$@" 1 "file" +} + +assertFileContainsMatch() +{ + local needle=$1 + local haystack=$2 + + grep -q -E -e "${needle}" ${haystack} + if [ "$?" != 0 ]; then + fail "Expected <${haystack}> to contain <${needle}>" + fi +} + +command_exists () { + type "$1" > /dev/null 2>&1 ; +} + +assertFileMD5() +{ + expectedHash=$1 + filename=$2 + + if command_exists "md5sum"; then + md5_cmd="md5sum ${filename}" + expected_md5_cmd_output="${expectedHash} ${filename}" + elif command_exists "md5"; then + md5_cmd="md5 ${filename}" + expected_md5_cmd_output="MD5 (${filename}) = ${expectedHash}" + else + fail "no suitable MD5 hashing command found on this system" + fi + + assertEquals "${expected_md5_cmd_output}" "$(${md5_cmd})" +} + +assertDirectoryExists() { + if [[ ! -e "$1" ]]; then + fail "$1 does not exist" + fi + if [[ ! -d $1 ]]; then + fail "$1 is not a directory" + fi +} + +assertFileExists() +{ + filename=$1 + assertTrue "$filename doesn't exist" "[[ -e $filename ]]" +} + +assertFileDoesNotExist() +{ + filename=$1 + assertTrue "$filename exists" "[[ ! -e $filename ]]" +}