diff --git a/integration/sections.npf b/integration/sections.npf new file mode 100644 index 0000000..f172bcd --- /dev/null +++ b/integration/sections.npf @@ -0,0 +1,20 @@ +%variables +N=[1-2] + +%late_variables +V=42 + +%require +test $N -lt 2 + +%init +echo "RESULT-INIT 1" + +%script +echo "RESULT-SCRIPT $V" + +%exit +echo "RESULT-CLEANUP 1" + +%pyexit +RESULTS["PY"] = 1 diff --git a/integration/test_unittest.py b/integration/test_unittest.py index 1607c79..a47f567 100644 --- a/integration/test_unittest.py +++ b/integration/test_unittest.py @@ -1,3 +1,6 @@ +from npf import repository +from npf.grapher import Grapher +from npf.loop import Comparator import npf.npf from npf.node import * import types @@ -10,6 +13,8 @@ from npf.variable import dtype, numeric_dict from npf.types.dataset import Run, ImmutableRun +import numpy as np + def get_args(): parser = argparse.ArgumentParser(description='NPF Tester') npf.add_verbosity_options(parser) @@ -122,3 +127,43 @@ def test_local_executor(): assert stdout == "" assert stderr == "" assert ret == 1 + + +def test_core(): + parser = argparse.ArgumentParser(description='NPF test') + v = npf.add_verbosity_options(parser) + b = npf.add_building_options(parser) + t = npf.add_testing_options(parser, regression=False) + a = npf.add_graph_options(parser) + parser.add_argument('repo', metavar='repo name', type=str, nargs='?', help='name of the repo/group of builds', default=None) + + full_args = ["--test", "integration/sections.npf",'--force-retest'] + args = parser.parse_args(full_args) + npf.initialize(args) + npf.create_local() + + repo_list = [repository.Repository.get_instance("local", options=args)] + + comparator = Comparator(repo_list) + + series, time_series = comparator.run(test_name=args.test_files, + tags=args.tags, + options=args) + assert len(series) == 1 + r = series[0][2] + assert len(r.items()) == 1 + run,results = list(r.items())[0] + assert run.variables["N"] == 1 + assert np.all(np.array(results["SCRIPT"]) == 42) + assert np.all(np.array(results["CLEANUP"]) == 1) + assert np.all(np.array(results["PY"]) == 1) + + + filename = npf.build_output_filename(args, repo_list) + grapher = Grapher() + + print("Generating graphs...") + g = grapher.graph(series=series, + filename=filename, + options=args + ) diff --git a/npf/executor/executor.py b/npf/executor/executor.py index cae74ef..38b5cd8 100644 --- a/npf/executor/executor.py +++ b/npf/executor/executor.py @@ -1,6 +1,6 @@ import re -from npf import npf + from colorama import Fore, Back, Style foreColors = [Fore.BLACK, Fore.RED, Fore.GREEN, Fore.YELLOW, Fore.BLUE, Fore.MAGENTA, Fore.CYAN, Fore.WHITE] @@ -10,6 +10,7 @@ class Executor: index = 0 def __init__(self): + from npf import npf if npf.options.color: self.color = foreColors[Executor.index % len(foreColors)] else: diff --git a/npf/npf.py b/npf/npf.py index a0d5a97..72a5a21 100755 --- a/npf/npf.py +++ b/npf/npf.py @@ -244,7 +244,7 @@ def set_args(args): sys.modules[__name__].options = args args.cwd = os.getcwd() -def parse_nodes(args): +def initialize(args): set_args(args) #other random stuffs to do @@ -265,17 +265,23 @@ def parse_nodes(args): if not os.path.isabs(options.experiment_folder): options.experiment_folder = os.path.abspath(options.experiment_folder) + + options.search_path = set(options.search_path) + for t in [options.test_files]: + options.search_path.add(os.path.dirname(t)) + +def create_local(): # Create the test file os.close(os.open(experiment_path() + ".access_test" , os.O_CREAT)) local = Node.makeLocal(options) #Delete the test file os.unlink(experiment_path() + ".access_test") - roles['default'] = [local] + return local - options.search_path = set(options.search_path) - for t in [options.test_files]: - options.search_path.add(os.path.dirname(t)) +def parse_nodes(args): + initialize(args) + local = create_local() for val in options.cluster: diff --git a/npf/regression.py b/npf/regression.py index d0fc465..7c52bc1 100644 --- a/npf/regression.py +++ b/npf/regression.py @@ -10,7 +10,17 @@ class Regression: def __init__(self, repo: Repository): self.repo = repo - def accept_diff(self, test, result, old_result): + def accept_diff(self, test, result, old_result)->Tuple[bool,float]: + """Compare two sets of results and tells if they difference is between a margin + + Args: + test (Test): The test object + result (_type_): The results of the last run + old_result (_type_): The previous results + + Returns: + [bool,float]: If the test passes and the difference + """ result = np.asarray(result) old_result = np.asarray(old_result) n = test.reject_outliers(result).mean() @@ -23,7 +33,7 @@ def accept_diff(self, test, result, old_result): def compare(self, test:Test, variable_list, all_results: Dataset, build, old_all_results, last_build, allow_supplementary=True,init_done=False) -> Tuple[int,int]: """ - Compare two sets of results for the given list of variables and returns the amount of failing test + Compare two sets of results for the given list of variables and returns the amount of failing and passing test :param init_done: True if initialization for current test is already done (init sections for the test and its import) :param test: One test to get the config from :param variable_list: @@ -52,6 +62,7 @@ def compare(self, test:Test, variable_list, all_results: Dataset, build, old_all continue need_supp = False + diff = None for result_type, result in results_types.items(): if run in old_all_results and not old_all_results[run] is None: old_result = old_all_results[run].get(result_type, None) @@ -108,8 +119,7 @@ def compare(self, test:Test, variable_list, all_results: Dataset, build, old_all ok = True except ScriptInitException: pass - - if r and len(results_types) > 0: + if r and len(results_types) > 0 and diff is not None: if not ok: print( "ERROR: Test %s is outside acceptable margin between %s and %s : difference of %.2f%% !" % ( @@ -144,7 +154,10 @@ def regress_all_tests(self, tests: List['Test'], options, history: int = 1, on_f nok = 0 for itest,test in enumerate(tests): - print("[%s] Running test %s on version %s..." % (repo.name, test.filename, build.version)) + if build.version != "local": + print("[%s] Running test %s on version %s..." % (repo.name, test.filename, build.version)) + else: + print("[%s] Running test %s..." % (repo.name, test.filename)) regression = self if repo.last_build: try: diff --git a/npf/repository.py b/npf/repository.py index a77f7d3..8d8f765 100755 --- a/npf/repository.py +++ b/npf/repository.py @@ -215,6 +215,7 @@ def __init__(self, repo, options): self.bin_name = self.reponame # Wild guess that may work some times... self.build_info = None self.configure = '' + self.last_build = None self._last_100 = None if self.reponame == 'None': diff --git a/npf/section.py b/npf/section.py index e2cadef..510f014 100644 --- a/npf/section.py +++ b/npf/section.py @@ -35,11 +35,11 @@ class SectionFactory: varPattern = "([a-zA-Z0-9_:-]+)[=](" + Variable.VALUE_REGEX + ")?" namePattern = re.compile( "^(?P" + Variable.TAGS_REGEX + "[:])?(?Pinfo|config|variables|exit|pypost|pyexit|late_variables|" - "include\s+(?P[a-zA-Z0-9_./-]+)(?P([ \t]+" + varPattern + ")+)?|" - "(init-)?file(:?[@](?P[a-zA-Z0-9]+))?\s+(?P[a-zA-Z0-9_.${}-]+)(:? (?Pnoparse))?(:? (?Pjinja))?|" - "require(:?\s+(?Pjinja))?|" + "include\\s+(?P[a-zA-Z0-9_./-]+)(?P([ \t]+" + varPattern + ")+)?|" + "(init-)?file(:?[@](?P[a-zA-Z0-9]+))?\\s+(?P[a-zA-Z0-9_.${}-]+)(:? (?Pnoparse))?(:? (?Pjinja))?|" + "require(:?\\s+(?Pjinja))?|" "import(:?[@](?P[a-zA-Z0-9]+)(:?[-](?P[*0-9]+))?)?[ \t]+(?P" + Variable.VALUE_REGEX + ")(?P([ \t]+" + varPattern + ")+)?|" - "sendfile(:?[@](?P[a-zA-Z0-9]+))?\s+(?P.*)|" + + "sendfile(:?[@](?P[a-zA-Z0-9]+))?\\s+(?P.*)|" + "(:?script|init|exit)(:?[@](?P[a-zA-Z0-9]+)(:?[-](?P[*0-9]+))?)?(:? (?Pjinja))?(?P([ \t]+" + varPattern + ")*))$") @staticmethod diff --git a/npf/test.py b/npf/test.py index 8c0e125..42deaa0 100755 --- a/npf/test.py +++ b/npf/test.py @@ -828,6 +828,7 @@ def execute(self, build, run, v, n_runs=1, n_retry=0, allowed_types=SectionScrip os.chdir('..') if not self.options.preserve_temp and f_mine: shutil.rmtree(test_folder) + print("Test failed, exiting...") sys.exit(1) if c != 0: n_err = n_err + 1 diff --git a/npf_compare.py b/npf_compare.py index d562a64..34ffc90 100755 --- a/npf_compare.py +++ b/npf_compare.py @@ -10,6 +10,7 @@ import argparse from npf import npf +from npf.loop import Comparator from npf.regression import * from pathlib import Path @@ -19,32 +20,6 @@ import multiprocessing -class Comparator(): - def __init__(self, repo_list: List[Repository]): - self.repo_list = repo_list - self.graphs_series = [] - self.kind_graphs_series = [] - - def build_list(self, on_finish, test, build, data_datasets, kind_datasets): - on_finish(self.graphs_series + [(test,build,data_datasets[0])], self.kind_graphs_series + [(test,build,kind_datasets[0])]) - - def run(self, test_name, options, tags, on_finish=None): - for irepo,repo in enumerate(self.repo_list): - regressor = Regression(repo) - tests = Test.expand_folder(test_name, options=options, tags=repo.tags + tags) - tests = npf.override(options, tests) - for itest,test in enumerate(tests): - build, data_dataset, kind_dataset = regressor.regress_all_tests(tests=[test], options=options, on_finish=lambda b,dd,td: self.build_list(on_finish,test,b,dd,td) if on_finish else None,iserie=irepo,nseries=len(self.repo_list) ) - if len(tests) > 0 and not build is None: - build._pretty_name = repo.name - self.graphs_series.append((test, build, data_dataset[0])) - self.kind_graphs_series.append((test, build, kind_dataset[0])) - if len(self.graphs_series) == 0: - print("No valid tags/test/repo combination.") - return None, None - - return self.graphs_series, self.kind_graphs_series - def do_graph(filename,args,series,kind_series,options): if series is None: @@ -177,7 +152,6 @@ def main(): repo_list = [] for repo_name in args.repos: repo = Repository.get_instance(repo_name, args) - repo.last_build = None repo_list.append(repo) comparator = Comparator(repo_list)