Skip to content

Commit

Permalink
Test runner refactor (#508)
Browse files Browse the repository at this point in the history
* Port test runner script to Python.

This allows us to keep the test output in sorted order while still running the
tests in parallel.  It also now defaults to using the number of available CPU
threads for parallel execution, rather than the previously hard-coded default.

* Also port decompyle_test.sh script to python within run_tests.py

* Fix cmake check target for multi-config generators.

Adds testing of release builds on both MSVC and GCC.

* Fix diff comparisons on Windows

* Ubuntu runners don't have ninja by default
  • Loading branch information
zrax authored Aug 8, 2024
1 parent b939aeb commit f37caa8
Show file tree
Hide file tree
Showing 6 changed files with 161 additions and 158 deletions.
19 changes: 14 additions & 5 deletions .github/workflows/linux-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,19 @@ jobs:
- uses: actions/checkout@v1
- name: Configure and Build
run: |
mkdir build && cd build
cmake -DCMAKE_BUILD_TYPE=Debug ..
make -j4
(
mkdir build-debug && cd build-debug
cmake -DCMAKE_BUILD_TYPE=Debug ..
make -j4
)
(
mkdir build-release && cd build-release
cmake -DCMAKE_BUILD_TYPE=Debug ..
make -j4
)
- name: Test
run: |
cd build
make check JOBS=4
cmake --build build-debug --target check
cmake --build build-release --target check
10 changes: 2 additions & 8 deletions .github/workflows/msvc-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,10 @@ jobs:
cmake --build . --config Debug
cmake --build . --config Release
# This should probably be fixed to work from MSVC without needing to
# use a bash shell and the GNU userland tools... But for now, the
# GH Actions environment provides what we need.
- name: Test
run: |
cd build\Debug
bash.exe ..\..\tests\all_tests.sh
env:
PYTHON_EXE: python.exe
JOBS: 4
cmake --build build --config Debug --target check
cmake --build build --config Release --target check
- name: Upload artifact
uses: actions/upload-artifact@v3
Expand Down
10 changes: 7 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ target_link_libraries(pycdc pycxx)
install(TARGETS pycdc
RUNTIME DESTINATION bin)

add_custom_target(check "${CMAKE_CURRENT_SOURCE_DIR}/tests/all_tests.sh"
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
add_dependencies(check pycdc)
find_package(Python3 3.6 COMPONENTS Interpreter)
if(Python3_FOUND)
add_custom_target(check
COMMAND "${Python3_EXECUTABLE}" "${CMAKE_CURRENT_SOURCE_DIR}/tests/run_tests.py"
WORKING_DIRECTORY "$<TARGET_FILE_DIR:pycdc>")
add_dependencies(check pycdc)
endif()
10 changes: 0 additions & 10 deletions tests/all_tests.sh

This file was deleted.

132 changes: 0 additions & 132 deletions tests/decompyle_test.sh

This file was deleted.

138 changes: 138 additions & 0 deletions tests/run_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
#!/usr/bin/env python3

import os
import sys
import glob
import difflib
import argparse
import subprocess
import multiprocessing

TEST_DIR = os.path.dirname(os.path.realpath(__file__))
SCRIPTS_DIR = os.path.realpath(os.path.join(TEST_DIR, '..', 'scripts'))

def decompyle_one(test_name, pyc_file, outdir, tokenized_expect):
out_base = os.path.join(outdir, os.path.basename(pyc_file))
proc = subprocess.run(
[os.path.join(os.getcwd(), 'pycdc'), pyc_file, '-o', out_base + '.src.py'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True,
encoding='utf-8', errors='replace')
pycdc_output = proc.stdout
if proc.returncode != 0 or pycdc_output:
with open(out_base + '.err', 'w') as errfile:
errfile.write(pycdc_output)
return False, [pycdc_output]
elif os.path.exists(out_base + '.err'):
os.unlink(out_base + '.err')

proc = subprocess.run(
[sys.executable, os.path.join(SCRIPTS_DIR, 'token_dump'), out_base + '.src.py'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True,
encoding='utf-8', errors='replace')
tokenized = proc.stdout
token_dump_err = proc.stderr
with open(out_base + '.tok.txt', 'w') as tokfile:
tokfile.write(tokenized)
if proc.returncode != 0 or token_dump_err:
with open(out_base + '.tok.err', 'w') as errfile:
errfile.write(token_dump_err)
return False, [token_dump_err]
elif os.path.exists(out_base + '.tok.err'):
os.unlink(out_base + '.tok.err')

if tokenized != tokenized_expect:
fromfile = 'tokenized/{}.txt'.format(test_name)
tofile = 'tests-out/{}.tok.txt'.format(os.path.basename(pyc_file))
diff = difflib.unified_diff(tokenized_expect.splitlines(True), tokenized.splitlines(True),
fromfile=fromfile, tofile=tofile)
diff = list(diff)
with open(out_base + '.tok.diff', 'w') as diff_file:
diff_file.writelines(diff)
return False, ['Tokenized output does not match expected output:\n'] + diff

return True, []


def run_test(test_file):
"""
Runs a single test, and returns a tuple containing the number of failed
tests and the output of the test. The output is not printed directly
in order to avoid interleaving output from multiple parallel tests.
"""
test_name = os.path.splitext(os.path.basename(test_file))[0]
compiled_files = glob.glob(os.path.join(TEST_DIR, 'compiled', test_name + '.?.*.pyc'))
xfail_files = glob.glob(os.path.join(TEST_DIR, 'xfail', test_name + '.?.*.pyc'))
if not compiled_files and not xfail_files:
return 1, 'No compiled/xfail modules found for {}\n'.format(test_name)

outdir = os.path.join(os.getcwd(), 'tests-out')
os.makedirs(outdir, exist_ok=True)

with open(os.path.join(TEST_DIR, 'tokenized', test_name + '.txt'), 'r',
encoding='utf-8', errors='replace') as tok_file:
tokenized_expect = tok_file.read()

status_line = '\033[1m*** {}:\033[0m '.format(test_name)
errlines = []
fails = 0
xfails = 0
upass = 0
for xpass_file in compiled_files:
ok, errs = decompyle_one(test_name, xpass_file, outdir, tokenized_expect)
if not ok:
fails += 1
errlines.append('\t\033[31m{}\033[0m\n'.format(os.path.basename(xpass_file)))
errlines.extend(errs)
for xfail_file in xfail_files:
ok, _ = decompyle_one(test_name, xfail_file, outdir, tokenized_expect)
if not ok:
xfails += 1
else:
upass += 1

if fails == 0:
if xfails != 0:
if not compiled_files:
status_line += '\033[33mXFAIL ({})\033[0m\n'.format(xfails)
else:
status_line += '\033[32mPASS ({})\033[33m + XFAIL ()\033[0m\n' \
.format(len(compiled_files), xfails)
else:
status_line += '\033[32mPASS ({})\033[0m\n'.format(len(compiled_files))
else:
if xfails != 0:
status_line += '\033[31mFAIL ({} of {})\033[33m + XFAIL ({})\033[0m\n' \
.format(fails, len(compiled_files), xfails)
else:
status_line += '\033[31mFAIL ({} of {})\033[0m\n'.format(fails, len(compiled_files))

return fails, [status_line] + errlines


def main():
# For simpler invocation from CMake's check target, we also support setting
# these parameters via environment variables.
default_jobs = int(os.environ['JOBS']) if 'JOBS' in os.environ else multiprocessing.cpu_count()
default_filter = os.environ['FILTER'] if 'FILTER' in os.environ else ''

parser = argparse.ArgumentParser()
parser.add_argument('--jobs', '-j', type=int, default=default_jobs,
help='Number of tests to run in parallel (default: {})'.format(default_jobs))
parser.add_argument('--filter', type=str, default=default_filter,
help='Run only test(s) matching the supplied filter')
args = parser.parse_args()

glob_pattern = '*{}*.txt'.format(args.filter) if args.filter else '*.txt'
test_files = sorted(glob.iglob(os.path.join(TEST_DIR, 'tokenized', glob_pattern)))
total_fails = 0
with multiprocessing.Pool(args.jobs) as pool:
for fails, output in pool.imap(run_test, test_files):
total_fails += fails
sys.stdout.writelines(output)

if total_fails:
print('{} test(s) failed'.format(total_fails))
sys.exit(1)

if __name__ == '__main__':
main()

0 comments on commit f37caa8

Please sign in to comment.