diff --git a/.github/workflows/linux-ci.yml b/.github/workflows/linux-ci.yml index 102d8a85d..ddc8532cb 100644 --- a/.github/workflows/linux-ci.yml +++ b/.github/workflows/linux-ci.yml @@ -11,10 +11,19 @@ jobs: - uses: actions/checkout@v1 - name: Configure and Build run: | - mkdir build && cd build - cmake -DCMAKE_BUILD_TYPE=Debug .. - make -j4 + ( + mkdir build-debug && cd build-debug + cmake -DCMAKE_BUILD_TYPE=Debug .. + make -j4 + ) + + ( + mkdir build-release && cd build-release + cmake -DCMAKE_BUILD_TYPE=Debug .. + make -j4 + ) + - name: Test run: | - cd build - make check JOBS=4 + cmake --build build-debug --target check + cmake --build build-release --target check diff --git a/.github/workflows/msvc-ci.yml b/.github/workflows/msvc-ci.yml index 8adb503bd..1f2c48dad 100644 --- a/.github/workflows/msvc-ci.yml +++ b/.github/workflows/msvc-ci.yml @@ -17,16 +17,10 @@ jobs: cmake --build . --config Debug cmake --build . --config Release - # This should probably be fixed to work from MSVC without needing to - # use a bash shell and the GNU userland tools... But for now, the - # GH Actions environment provides what we need. - name: Test run: | - cd build\Debug - bash.exe ..\..\tests\all_tests.sh - env: - PYTHON_EXE: python.exe - JOBS: 4 + cmake --build build --config Debug --target check + cmake --build build --config Release --target check - name: Upload artifact uses: actions/upload-artifact@v3 diff --git a/CMakeLists.txt b/CMakeLists.txt index f233ca251..476c96438 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -76,6 +76,10 @@ target_link_libraries(pycdc pycxx) install(TARGETS pycdc RUNTIME DESTINATION bin) -add_custom_target(check "${CMAKE_CURRENT_SOURCE_DIR}/tests/all_tests.sh" - WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}") -add_dependencies(check pycdc) +find_package(Python3 3.6 COMPONENTS Interpreter) +if(Python3_FOUND) + add_custom_target(check + COMMAND "${Python3_EXECUTABLE}" "${CMAKE_CURRENT_SOURCE_DIR}/tests/run_tests.py" + WORKING_DIRECTORY "$") + add_dependencies(check pycdc) +endif() diff --git a/tests/all_tests.sh b/tests/all_tests.sh deleted file mode 100755 index fac9772d7..000000000 --- a/tests/all_tests.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -srcdir="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" -jobs=${JOBS:-4} -filter=${FILTER:-""} - -find "${srcdir}/tests/tokenized" -type f -name '*.txt' -a -name "*${filter}*" -print0 | \ - xargs -0 -I '{}' -P $jobs \ - bash -c 'o=$('"$srcdir"'/tests/decompyle_test.sh "$(basename -s .txt "{}")" tests-out) r=$?; echo "$o"; exit $r' diff --git a/tests/decompyle_test.sh b/tests/decompyle_test.sh deleted file mode 100755 index 09feb13df..000000000 --- a/tests/decompyle_test.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/bash - -srcdir="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. && pwd)" -testdir="$srcdir/tests" -testname="$1" -outdir="$2" - -if [[ -z "$PYTHON_EXE" ]]; then - PYTHON_EXE="$(which python3)" -fi - -if [[ -z "$testname" ]]; then - echo "Missing required parameter: testname" >&2 - exit 1 -fi -if [[ -z "$outdir" ]]; then - echo "Missing required parameter: outdir" >&2 - exit 1 -fi - -shopt -s nullglob -compfiles=( "$testdir/compiled/$testname".?.?*.pyc ) -xfcfiles=( "$testdir/xfail/$testname".?.?*.pyc ) -shopt -u nullglob - -if (( ${#compfiles[@]} + ${#xfcfiles[@]} == 0 )); then - echo "No compiled/xfail modules found for $testname.*.pyc" - exit 1 -fi - -mkdir -p "$outdir" - -echo -ne "\033[1m*** $testname:\033[0m " - -fails=0 -xfails=0 -upass=0 -efiles=() -errors=() -upfiles=() -for pyc in "${compfiles[@]}" "${xfcfiles[@]}"; do - base="$outdir/$(basename "$pyc")" - - ./pycdc "$pyc" -o "$base.src.py" >"$base.err" 2>&1 - if (( $? )) || [[ -s "$base.err" ]] - then - if [[ "$(dirname "$pyc")" =~ xfail ]] - then - let xfails+=1 - else - let fails+=1 - efiles+=("$(basename "$pyc")") - errors+=("$(cat "$base.err")") - fi - continue - fi - - "$PYTHON_EXE" "$srcdir"/scripts/token_dump "$base.src.py" 2>"$base.tok.err" 1>"$base.tok.txt" - if (( $? )) || [[ -s "$base.tok.err" ]] - then - if [[ "$(dirname "$pyc")" =~ xfail ]] - then - let xfails+=1 - else - let fails+=1 - efiles+=("$(basename "$pyc")") - errors+=("$(cat "$base.tok.err")") - fi - continue - fi - - diff -u "$testdir/tokenized/$testname.txt" "$base.tok.txt" >"$base.tok.diff" - if (( $? )) - then - if [[ "$(dirname "$pyc")" =~ xfail ]] - then - let xfails+=1 - else - let fails+=1 - efiles+=("$(basename "$pyc")") - errors+=("$base.tok.txt does not match $testdir/tokenized/$testname.txt:\n$(cat "$base.tok.diff")") - fi - else - if [[ "$(dirname "$pyc")" =~ xfail ]] - then - let upass+=1 - upfiles+=("$(basename "$pyc")") - fi - fi -done - -if (( $fails == 0 )) -then - if (( $xfails != 0 )) - then - if (( ${#compfiles[@]} == 0 )) - then - echo -e "\033[33mXFAIL ($xfails)\033[0m" - else - echo -e "\033[32mPASS (${#compfiles[@]})\033[33m + XFAIL ($xfails)\033[0m" - fi - else - echo -e "\033[32mPASS (${#compfiles[@]})\033[0m" - fi -else - if (( $xfails != 0 )) - then - echo -e "\033[31mFAIL ($fails of ${#compfiles[@]})\033[33m + XFAIL ($xfails)\033[0m" - else - echo -e "\033[31mFAIL ($fails of ${#compfiles[@]})\033[0m" - fi - - for ((i=0; i<${#efiles[@]}; i++)) - do - echo -e "\t\033[31m${efiles[i]}\033[0m" - echo -e "${errors[i]}\n" - done -fi - -if (( $upass != 0 )) -then - echo -e "\033[1;34mUnexpected passes:\033[0m" - for ((i=0; i<${#upfiles[@]}; i++)) - do - echo -e "\t\033[33m${upfiles[i]}\033[0m" - done -fi - -if (( $fails != 0 )) -then - exit 1 -fi diff --git a/tests/run_tests.py b/tests/run_tests.py new file mode 100755 index 000000000..b619adcf1 --- /dev/null +++ b/tests/run_tests.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 + +import os +import sys +import glob +import difflib +import argparse +import subprocess +import multiprocessing + +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) +SCRIPTS_DIR = os.path.realpath(os.path.join(TEST_DIR, '..', 'scripts')) + +def decompyle_one(test_name, pyc_file, outdir, tokenized_expect): + out_base = os.path.join(outdir, os.path.basename(pyc_file)) + proc = subprocess.run( + [os.path.join(os.getcwd(), 'pycdc'), pyc_file, '-o', out_base + '.src.py'], + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, + encoding='utf-8', errors='replace') + pycdc_output = proc.stdout + if proc.returncode != 0 or pycdc_output: + with open(out_base + '.err', 'w') as errfile: + errfile.write(pycdc_output) + return False, [pycdc_output] + elif os.path.exists(out_base + '.err'): + os.unlink(out_base + '.err') + + proc = subprocess.run( + [sys.executable, os.path.join(SCRIPTS_DIR, 'token_dump'), out_base + '.src.py'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, + encoding='utf-8', errors='replace') + tokenized = proc.stdout + token_dump_err = proc.stderr + with open(out_base + '.tok.txt', 'w') as tokfile: + tokfile.write(tokenized) + if proc.returncode != 0 or token_dump_err: + with open(out_base + '.tok.err', 'w') as errfile: + errfile.write(token_dump_err) + return False, [token_dump_err] + elif os.path.exists(out_base + '.tok.err'): + os.unlink(out_base + '.tok.err') + + if tokenized != tokenized_expect: + fromfile = 'tokenized/{}.txt'.format(test_name) + tofile = 'tests-out/{}.tok.txt'.format(os.path.basename(pyc_file)) + diff = difflib.unified_diff(tokenized_expect.splitlines(True), tokenized.splitlines(True), + fromfile=fromfile, tofile=tofile) + diff = list(diff) + with open(out_base + '.tok.diff', 'w') as diff_file: + diff_file.writelines(diff) + return False, ['Tokenized output does not match expected output:\n'] + diff + + return True, [] + + +def run_test(test_file): + """ + Runs a single test, and returns a tuple containing the number of failed + tests and the output of the test. The output is not printed directly + in order to avoid interleaving output from multiple parallel tests. + """ + test_name = os.path.splitext(os.path.basename(test_file))[0] + compiled_files = glob.glob(os.path.join(TEST_DIR, 'compiled', test_name + '.?.*.pyc')) + xfail_files = glob.glob(os.path.join(TEST_DIR, 'xfail', test_name + '.?.*.pyc')) + if not compiled_files and not xfail_files: + return 1, 'No compiled/xfail modules found for {}\n'.format(test_name) + + outdir = os.path.join(os.getcwd(), 'tests-out') + os.makedirs(outdir, exist_ok=True) + + with open(os.path.join(TEST_DIR, 'tokenized', test_name + '.txt'), 'r', + encoding='utf-8', errors='replace') as tok_file: + tokenized_expect = tok_file.read() + + status_line = '\033[1m*** {}:\033[0m '.format(test_name) + errlines = [] + fails = 0 + xfails = 0 + upass = 0 + for xpass_file in compiled_files: + ok, errs = decompyle_one(test_name, xpass_file, outdir, tokenized_expect) + if not ok: + fails += 1 + errlines.append('\t\033[31m{}\033[0m\n'.format(os.path.basename(xpass_file))) + errlines.extend(errs) + for xfail_file in xfail_files: + ok, _ = decompyle_one(test_name, xfail_file, outdir, tokenized_expect) + if not ok: + xfails += 1 + else: + upass += 1 + + if fails == 0: + if xfails != 0: + if not compiled_files: + status_line += '\033[33mXFAIL ({})\033[0m\n'.format(xfails) + else: + status_line += '\033[32mPASS ({})\033[33m + XFAIL ()\033[0m\n' \ + .format(len(compiled_files), xfails) + else: + status_line += '\033[32mPASS ({})\033[0m\n'.format(len(compiled_files)) + else: + if xfails != 0: + status_line += '\033[31mFAIL ({} of {})\033[33m + XFAIL ({})\033[0m\n' \ + .format(fails, len(compiled_files), xfails) + else: + status_line += '\033[31mFAIL ({} of {})\033[0m\n'.format(fails, len(compiled_files)) + + return fails, [status_line] + errlines + + +def main(): + # For simpler invocation from CMake's check target, we also support setting + # these parameters via environment variables. + default_jobs = int(os.environ['JOBS']) if 'JOBS' in os.environ else multiprocessing.cpu_count() + default_filter = os.environ['FILTER'] if 'FILTER' in os.environ else '' + + parser = argparse.ArgumentParser() + parser.add_argument('--jobs', '-j', type=int, default=default_jobs, + help='Number of tests to run in parallel (default: {})'.format(default_jobs)) + parser.add_argument('--filter', type=str, default=default_filter, + help='Run only test(s) matching the supplied filter') + args = parser.parse_args() + + glob_pattern = '*{}*.txt'.format(args.filter) if args.filter else '*.txt' + test_files = sorted(glob.iglob(os.path.join(TEST_DIR, 'tokenized', glob_pattern))) + total_fails = 0 + with multiprocessing.Pool(args.jobs) as pool: + for fails, output in pool.imap(run_test, test_files): + total_fails += fails + sys.stdout.writelines(output) + + if total_fails: + print('{} test(s) failed'.format(total_fails)) + sys.exit(1) + +if __name__ == '__main__': + main()