forked from IBAMR/IBAMR
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathattest
executable file
·660 lines (572 loc) · 27.1 KB
/
attest
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
#!/usr/bin/env python3
# ---------------------------------------------------------------------
#
# Copyright (c) 2019 - 2024 by the IBAMR developers
# All rights reserved.
#
# This file is part of IBAMR.
#
# IBAMR is free software and is distributed under the 3-clause BSD
# license. The full text of the license can be found in the file
# COPYRIGHT at the top level directory of IBAMR.
#
# ---------------------------------------------------------------------
"""Test runner for autotools-based tests.
"""
import argparse
import ast
import concurrent.futures as cf
import configparser
import enum
import os
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
# We rely on subprocess.run, which is new in 3.5
assert sys.version_info >= (3, 5)
def string_to_boolean(string):
lower = string.lower()
if lower in ['y', 'yes', '1', 'on', 'true', 't']:
return True
elif lower in ['n', 'no', '0', 'off', 'false', 'f']:
return False
else:
raise ValueError("Cannot convert '{}' to a boolean".format(string))
def interrupt_handler(sig, frame):
"""Small interrupt handler to catch SIGINT.
"""
del sig, frame
print("SIGINT caught. Exiting.")
sys.exit(1)
signal.signal(signal.SIGINT, interrupt_handler)
class Parameters:
"""Class containing all input parameters to attest.
Parameters read from input arguments or the input file:
test_directory: relative path to the directory containing tests. Defaults
to "/tests/".
mpiexec: program used to launch MPI applications. Defaults to "mpiexec".
numdiff: program used for comparing output files. Defaults to "numdiff".
n_processors: number of processors to use at once. Defaults to 4.
show_only: only print test names and do not execute them.
include_regex: regular expression matching tests to run. Defaults to '.+'
(i.e., run all tests).
exclude_regex: regular expression matching tests to skip. Defaults to '^$'
(i.e., skip no tests).
keep_work_directories: whether or not work directories (usually in /tmp/)
should be deleted after the test is run. Defaults to False.
test_timeout: maximum length, in seconds, to run a test. Tests that do not
finish in time are considered as failing.
verbose: If verbose is True then, if a test fails, the first couple of
lines of stderr or the failing diff will be printed to stdout.
"""
def __init__(self, input_arguments):
self.keep_work_directories = input_arguments.keep_work_directories
self.mpiexec = input_arguments.mpiexec
self.numdiff = input_arguments.numdiff
self.n_processors = input_arguments.n_processors
self.show_only = input_arguments.show_only
self.test_directory = os.getcwd() + input_arguments.test_directory
self.include_regex = input_arguments.include_regex
self.exclude_regex = input_arguments.exclude_regex
self.test_timeout = input_arguments.test_timeout
self.verbose = input_arguments.verbose
# These are the only two required inputs:
if self.mpiexec == "":
raise ValueError("attest requires that mpiexec be provided either"
" to configure, stored in attest.conf, or"
" explicitly provided as a command-line argument"
" to attest itself.")
if not os.path.isfile(self.mpiexec):
raise ValueError("The given path to mpiexec <" + self.mpiexec + ">"
" is not valid.")
if not os.access(self.mpiexec, os.X_OK):
raise ValueError("The given path to mpiexec <" + self.mpiexec + ">"
" is not executable.")
# if we have a relative path, try to expand it
if self.mpiexec[0] == '.':
expanded_mpiexec = os.path.abspath(self.mpiexec)
if os.access(expanded_mpiexec, os.X_OK):
self.mpiexec = expanded_mpiexec
if self.numdiff == "":
raise ValueError("attest requires that NUMDIFF be provided either"
" to configure, stored in attest.conf, or"
" explicitly provided as a command-line argument"
" to attest itself.")
if not os.path.isfile(self.numdiff):
raise ValueError("The given path to numdiff <" + self.numdiff + ">"
" is not valid.")
if not os.access(self.numdiff, os.X_OK):
raise ValueError("The given path to numdiff <" + self.numdiff + ">"
" is not executable.")
# if we have a relative path, try to expand it
if self.numdiff[0] == '.':
expanded_numdiff = os.path.abspath(self.numdiff)
if os.access(expanded_numdiff, os.X_OK):
self.numdiff = expanded_numdiff
def n_mpi_processes(input_file):
"""Determine the number of MPI processes to use when running a test by
inspecting the input file. Defaults to 1.
"""
mpirun_index = input_file.find("mpirun")
if mpirun_index != -1:
next_equals_index = input_file.find('=', mpirun_index)
next_dot_index = input_file.find('.', mpirun_index)
assert next_equals_index < next_dot_index
return int(input_file[next_equals_index + 1:next_dot_index])
return 1
def expect_error(input_file):
"""Determine if a test is expected to raise an error. Defaults to False.
"""
error_index = input_file.find("expect_error")
if error_index != -1:
next_equals_index = input_file.find('=', error_index)
next_dot_index = input_file.find('.', error_index)
assert next_equals_index < next_dot_index
value = input_file[next_equals_index + 1:next_dot_index].lower()
return string_to_boolean(value)
return False
def restart_n(input_file):
"""Determine the restart snapshot number to use when restarting the
simulation. Defaults to zero.
"""
restart_index = input_file.find("restart")
if restart_index != -1:
next_equals_index = input_file.find('=', restart_index)
next_dot_index = input_file.find('.', restart_index)
assert next_equals_index < next_dot_index
return int(input_file[next_equals_index + 1:next_dot_index])
return 0
@enum.unique
class TestResult(enum.Enum):
"""Enumeration describing the status of a test run: it can either pass, the run
can fail (e.g., with a segmentation fault) or the diff can fail.
"""
passed = 0
run_failed = 1
diff_failed = 2
timeout = 3
class TestOutput:
"""Class storing all output (and input) for a test.
"""
def __init__(self, input_file, test_result, error_message, parameters):
assert isinstance(test_result, TestResult)
self.test_result = test_result
self.input_file = input_file
self.error_message = error_message
self._parameters = parameters
self._n_mpi_processes = n_mpi_processes(
os.path.split(self.input_file)[-1])
self._name = os.path.split(self.input_file)[-1][:-len(".input")]
path_length = len(self._parameters.test_directory) - 1
self._name = self.input_file[path_length + 1:]
def n_mpi_processes(self):
"""Return the number of MPI processes used by the test.
"""
return self._n_mpi_processes
def name(self):
"""Return the name of the test.
"""
return self._name
def print(self):
"""Print the name and status of the test, padded with hyphens.
"""
relative_file_name = self.name()
if self.test_result == TestResult.passed:
status_string = "PASSED"
elif self.test_result == TestResult.run_failed:
status_string = "RUN FAILED"
elif self.test_result == TestResult.diff_failed:
status_string = "DIFF FAILED"
elif self.test_result == TestResult.timeout:
status_string = "TIMEOUT"
else:
status_string = "FAILED"
hyphen_length = 70 - len(relative_file_name) - len(status_string)
print(relative_file_name + " " + "-" * hyphen_length + " "
+ status_string)
if self._parameters.verbose:
if self.test_result in [TestResult.run_failed,
TestResult.diff_failed]:
print("test failed with output:")
for line in self.error_message.split('\n')[:50]:
print(" ", line)
class Test:
"""Class encapsulating a single test: is responsible for running the test in a
subprocess and reporting results.
"""
def __init__(self, executable, input_file, output_file, parameters):
self.executable = executable
self.input_file = input_file
stripped_input_file = os.path.split(self.input_file)[-1]
self._n_mpi_processes = n_mpi_processes(stripped_input_file)
self._expect_error = expect_error(stripped_input_file)
self._restart_n = restart_n(stripped_input_file)
self.output_file = output_file
self._parameters = parameters
# check that we were provided with actual files
assert os.path.isfile(self.executable)
assert os.path.isfile(self.input_file)
assert os.path.isfile(self.output_file)
# check that the input file matches the output file
assert (os.path.splitext(self.input_file)[0] ==
os.path.splitext(self.output_file)[0])
if (self.do_restart() and self._expect_error):
raise ValueError("Restarts and expected errors cannot be used "
+ "together.")
def name(self):
"""Name of the test. Determined by the input file.
"""
path_length = len(self._parameters.test_directory) - 1
return self.input_file[path_length + 1:]
def n_mpi_processes(self):
"""Number of MPI processes required for the test. Returns an integer.
"""
return self._n_mpi_processes
def do_restart(self):
"""Whether or not we should, upon completion, run the test again in restart
mode.
"""
return self._restart_n != 0
def restart_n(self):
"""The restart snapshot number to use in case we restart the test.
"""
return self._restart_n
def run(self):
"""Actually execute the test and compare the output results. Returns a
TestOutput object describing what happened.
"""
output_root = os.path.split(self.output_file)[1]
temporary_directory = tempfile.mkdtemp(prefix="att-" + output_root)
run_succeeded = False
try:
n_processors = self.n_mpi_processes()
run_args = [self.executable, self.input_file]
if 1 < n_processors:
# Permit running more processes than we have cores. Also
# disable binding processes to specific cores. We need to
# avoid binding since on clusters this results in multiple
# tests being assigned to the same core (since we have
# concurrent mpiexec calls).
run_args = [self._parameters.mpiexec, "-np", str(n_processors),
"--bind-to", "none"] + run_args
try:
run_result = subprocess.run(
run_args,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=temporary_directory,
timeout=self._parameters.test_timeout)
if self._expect_error:
run_succeeded = run_result.returncode != 0
else:
run_succeeded = run_result.returncode == 0
if run_succeeded:
# The first run succeeded: if we are testing restart code
# we need to run again
if self.do_restart():
run_args += ["./restart/", str(self.restart_n())]
run_result = subprocess.run(run_args,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=temporary_directory)
run_succeeded = run_result.returncode == 0
test_timed_out = False
if run_succeeded:
# Nearly the same as deal.II: Configure numdiff with
# - relative differences of 1e-6
# - absolute differences of 1e-10 (i.e., ignore values
# near zero)
# - [space][tab][newline]=,:;<>[](){}^ as separators
# between numbers
numdiff_flags = ["-r", "1e-6", "-a", "1e-10", "-s",
"' \\t\\n=,:;<>[](){}^'"]
diff_result = subprocess.run(
[self._parameters.numdiff] + numdiff_flags +
[self.output_file, temporary_directory + os.sep
+ "output"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
# allow the test runner to crash if numdiff somehow
# times out
timeout=self._parameters.test_timeout)
diff_succeeded = diff_result.returncode == 0
else:
diff_succeeded = False
except subprocess.TimeoutExpired as timeout_exception:
test_timed_out = True
run_result = timeout_exception
finally:
if not self._parameters.keep_work_directories:
shutil.rmtree(temporary_directory)
test_result = TestResult.passed
error_message = ""
def maybe_decode_error(result):
# TimeoutExpired and CompletedProcess both define stderr
try:
return result.decode('utf-8')
except:
return ""
if not run_succeeded:
if test_timed_out:
test_result = TestResult.timeout
else:
test_result = TestResult.run_failed
error_message = maybe_decode_error(run_result.stderr)
elif not diff_succeeded:
test_result = TestResult.diff_failed
error_message = maybe_decode_error(diff_result.stdout)
return TestOutput(self.input_file, test_result, error_message,
self._parameters)
def get_input_files(test_directory):
"""Get the input files from the specified directory.
"""
def generator():
for root, _, file_names in os.walk(test_directory):
for file_name in file_names:
if file_name.endswith("input"):
yield os.path.join(root, file_name)
return generator()
def main():
"""Run the tests and print the results.
"""
# 0. Set up environment and input settings:
# a. permit running more MPI processes than we have logical cores
# (openmpi 4 and 5)
os.environ['OMPI_MCA_rmaps_base_oversubscribe'] = "1"
ompi_5_key = 'PRTE_MCA_rmaps_default_mapping_policy'
if ompi_5_key in os.environ.keys():
os.environ[ompi_5_key] = os.environ[ompi_5_key] + ":oversubscribe"
else:
os.environ[ompi_5_key] = ":oversubscribe"
# b. completely disable OMP threading. HYPRE may try to parallelize itself
# with threads which leads to disasterous performance since we expect that
# all parallelization is done with MPI.
os.environ['OMP_NUM_THREADS'] = "1"
os.environ['OMP_THREAD_LIMIT'] = "1"
config_file = configparser.ConfigParser()
config_file.read('attest.conf')
conf = config_file['attest'] # shorthand
for entry in sys.argv:
# argparse doesn't support writing -j8 (i.e., with no space), so
# manually pull that out:
if 2 < len(entry) and entry[0:2] == '-j':
conf['jobs'] = entry[2:]
sys.argv.remove(entry)
# also support -j for all available processors (this assumes
# hyperthreading and will ignore virtual cores):
elif entry == '-j':
conf['jobs'] = str(int(os.cpu_count()/2))
sys.argv.remove(entry)
description = (
"A simple test runner. attest is configured both with an input file "
"(attest.conf, located in the top level test directory) and command "
"line arguments. Command line arguments will override settings found "
"in attest.conf. attest requires both numdiff and MPI execution "
"program (i.e., mpiexec) to work correctly. The best way to specify "
"these values is to provide them to configure, which will then use "
"them to write a default attest.conf file.\n\n"
"attest will return 0 if it suceeded and 1 if any error (including "
"test failures) occurred.")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-j,--jobs', metavar='N', type=int,
default=conf['jobs'], dest='n_processors',
help=("Total number of processors to use across all "
"concurrent MPI jobs: defaults to 1. If \"-j\" "
"is given as an argument without a number then "
"half of the the total cores (including virtual "
"cores) will be used."))
parser.add_argument('--keep-work-directories',
default=bool(
string_to_boolean(conf['keep_work_directories'])),
dest='keep_work_directories',
action='store_true',
help=("Whether or not to keep temporary work " +
"directories. This is useful for debugging."))
parser.add_argument('--mpi-executable', type=str,
default=conf['mpiexec'], dest='mpiexec',
help="Path to mpiexec.")
parser.add_argument('--numdiff-executable', type=str,
default=conf['numdiff'],
dest='numdiff', help="Numdiff executable.")
parser.add_argument('-N,--show-only',
default=ast.literal_eval(conf['show_only']),
action='store_true', dest='show_only',
help="Disable actual execution of tests and instead "
"print the names of tests that would have been run.")
parser.add_argument('--test-directory', metavar='D', type=str,
default=conf['test_directory'],
dest='test_directory',
help=("Directory in which all test files " +
"(executables, input files, and output files) " +
"are located."))
parser.add_argument('-R,--include-regex', type=str,
default=conf['include_regex'],
dest='include_regex',
help="Regular expression for including test names; If "
"a test is in 'tests/dir/example.input', the regex "
"will be applied to 'dir/example.input'.")
parser.add_argument('-E,--exclude-regex', type=str,
default=conf['exclude_regex'],
dest='exclude_regex',
help="Regular expression for excluding test names. If "
"a test is in 'tests/dir/example.input', the regex "
"will be applied to 'dir/example.input'.")
parser.add_argument('--test-timeout', type=int,
default=conf['test_timeout'],
dest='test_timeout',
help="The time limit, in seconds, for each test.")
parser.add_argument('--verbose',
default=bool(
string_to_boolean(conf['verbose'])),
dest='verbose',
action='store_true',
help=("If true, print the stderr or failing diff for"
" each failing test."))
input_arguments = parser.parse_args()
parameters = Parameters(input_arguments)
include_pattern = re.compile(parameters.include_regex)
exclude_pattern = re.compile(parameters.exclude_regex)
# 1. set up input file list:
unstarted_tests = []
invalid_test_inputs = []
input_files = list(get_input_files(parameters.test_directory))
if len(input_files) == 0:
print("attest could not find any tests in the provided test directory.")
sys.exit(1)
for input_file in input_files:
path_length = len(os.path.split(input_file)[0])
input_file_name = input_file[path_length + 1:]
# include the directory when matching test input files:
regex_input = input_file[len(parameters.test_directory):]
if (re.search(include_pattern, regex_input)
and not re.search(exclude_pattern, regex_input)):
output_file = os.path.splitext(input_file)[0] + ".output"
base_name = input_file_name[:input_file_name.find('.')]
executable = os.path.split(input_file)[0] + os.sep + base_name
if all((os.path.isfile(f)
for f in [executable, input_file, output_file])):
unstarted_tests.append(Test(executable, input_file,
output_file,
parameters=parameters))
# only warn about tests with invalid input or output files: i.e.,
# skip checking executables here since we copy all input files
# regardless of what actually compiled and some tests may not be
# compiled if their dependencies are unavailable.
elif not all((os.path.isfile(f)
for f in [input_file, output_file])):
invalid_test_inputs.append(input_file_name)
if invalid_test_inputs:
print("\nWarning: The test(s) with input files \n")
for invalid_test_input in invalid_test_inputs:
print(" ", invalid_test_input)
print("\ncannot be run since either the input or output file missing. "
"This usually means\nthat one of the files is a broken link. "
"Broken links to test files occur when\na test exists in one "
"revision of IBAMR but not another: for example, if a test\nhas "
"been removed then links to its input and output files may "
"still be present\nin the build directory. This is usually "
"harmless. A good way to get rid of this\nwarning is to delete "
"all test input and output files. To achieve this, execute\nthe "
"following commands in the build directory:\n"
"\n"
" cd tests/\n"
" find ./ -name '*.input' -delete\n"
" find ./ -name '*.output' -delete\n")
unstarted_tests.sort(key=lambda u: u.n_mpi_processes())
if parameters.show_only:
for test in unstarted_tests:
print(test.name())
sys.exit(0)
# 2. Run the tests!
processors_in_use = 0
n_tests = len(unstarted_tests)
n_passed_tests = 0
n_finished_tests = 0
failed_test_outputs = list()
test_width = len(str(n_tests))
fraction_template = (("{: >" + str(test_width) + "}")
+ "/"
+ ("{: <" + str(test_width) + "}").format(n_tests))
whitespace_pad = ' ' * (1 + len(fraction_template.format(0)))
worker_available = threading.Event()
result_update_lock = threading.Lock()
def deal_with_result(future):
"""Unpack a Future object provided by a finished test execution thread: use it
to update the relevant counters and print the status to the screen.
"""
with result_update_lock:
nonlocal n_passed_tests
nonlocal processors_in_use
nonlocal failed_test_outputs
nonlocal n_finished_tests
try:
test_output = future.result()
processors_in_use -= test_output.n_mpi_processes()
n_finished_tests += 1
if test_output.test_result == TestResult.passed:
n_passed_tests += 1
else:
failed_test_outputs.append(test_output)
# pad the test result output
print(whitespace_pad, end='')
test_output.print()
except Exception as e:
print("Unhandled exception in attest when printing a test:")
print(e)
finally:
worker_available.set()
# allow some wiggle room for finished tests that are waiting for
# result_update_lock by using lots of threads:
n_processors = parameters.n_processors
with cf.ThreadPoolExecutor(2*n_processors) as executor:
while len(unstarted_tests) != 0 or n_finished_tests < n_tests:
# 2a. Start new tests:
n_new_tests = 0
index = len(unstarted_tests) - 1
while (processors_in_use <= n_processors
and len(unstarted_tests) != 0 and index != -1):
test = unstarted_tests[index]
# Its possible that some tests require more processors than
# we have available, so special case for that. Since we sort
# tests we will do all of those tests first:
if (test.n_mpi_processes() + processors_in_use <= n_processors
or (processors_in_use == 0 and
n_processors <= test.n_mpi_processes())):
test = unstarted_tests.pop(index)
test_n = n_tests - len(unstarted_tests)
print(fraction_template.format(test_n),
"starting test", test.name())
future = executor.submit(test.run)
future.add_done_callback(deal_with_result)
processors_in_use += test.n_mpi_processes()
n_new_tests += 1
index -= 1
# Set 'no workers available' if either:
# 1. we have maxed out the number of processors
# 2. we cannot start any new tests (e.g., we have 6 processors and
# only tests that require 4 processors remaining)
with result_update_lock:
if n_processors <= processors_in_use or n_new_tests == 0:
worker_available.clear()
worker_available.wait()
if 0 < n_tests:
print("\n{}/{} ({}%) tests passed\n".format(
n_passed_tests,
n_tests,
round(100*float(n_passed_tests)/n_tests, 2)))
else:
print("\n0/0 (100%) tests passed\n")
if failed_test_outputs:
print("The following tests FAILED:\n")
for failed_test_result in failed_test_outputs:
print(" ", failed_test_result.name())
if failed_test_outputs:
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main()