diff --git a/benchexec/tools/metaval_c_2-0.py b/benchexec/tools/metaval_c_2-0.py new file mode 100644 index 000000000..c57cb71e8 --- /dev/null +++ b/benchexec/tools/metaval_c_2-0.py @@ -0,0 +1,97 @@ +# This file is part of BenchExec, a framework for reliable benchmarking: +# https://github.com/sosy-lab/benchexec +# +# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer +# +# SPDX-License-Identifier: Apache-2.0 + +import logging + +import benchexec.result +from benchexec.tools.sv_benchmarks_util import ILP32, LP64, get_data_model_from_task +from benchexec.tools.template import BaseTool2 + + +class Tool(BaseTool2): + """ + Tool info for Deductive Validator. + """ + + REQUIRED_PATHS = ["lib", "src", "metaval_c_2.0.py"] + + def executable(self, tool_locator: BaseTool2.ToolLocator): + return tool_locator.find_executable("metaval_c_2.0.py") + + def program_files(self, executable): + return [executable] + self._program_files_from_executable( + executable, self.REQUIRED_PATHS, parent_dir=True + ) + + def version(self, executable): + return self._version_from_tool(executable) + + def name(self): + return "MetaVal-C 2.0" + + def project_url(self): + return "https://gitlab.com/sosy-lab/software/metaval-c-2.0" + + def cmdline(self, executable, options, task, rlimits): + if task.property_file: + options = options + ["--property", task.property_file] + + data_model_param = get_data_model_from_task( + task, {ILP32: "ILP32", LP64: "LP64"} + ) + + if data_model_param and "--data-model" not in options: + options += ["--data-model", data_model_param] + + return [executable] + options + list(task.input_files_or_identifier) + + def determine_result(self, run): + if not run.output: + return benchexec.result.RESULT_ERROR + lastline = run.output[-1] + if lastline.startswith("Witness is correct"): + status = benchexec.result.RESULT_TRUE_PROP + elif lastline.startswith("Witness could not be validated"): + if ":" in lastline: + status = ( + benchexec.result.RESULT_UNKNOWN + lastline.split(":")[1].strip() + ) + else: + status = benchexec.result.RESULT_UNKNOWN + elif lastline.startswith( + "There was an error validating the witness in the backend verifier" + ): + if ":" in lastline: + status = benchexec.result.RESULT_ERROR + lastline.split(":")[1].strip() + else: + status = benchexec.result.RESULT_ERROR + elif lastline.startswith("Witness file does not exist"): + status = benchexec.result.RESULT_ERROR + "(no witness)" + else: + status = benchexec.result.RESULT_ERROR + return status + + def get_value_from_output(self, output, identifier): + # search for the text in output and get its value, + # search the first line, that starts with the searched text + # warn if there are more lines (multiple statistics from sequential analysis?) + match = None + for line in output: + if line.lstrip().startswith(identifier): + startPosition = line.find(":") + 1 + endPosition = line.find("(", startPosition) + if endPosition == -1: + endPosition = len(line) + if match is None: + match = line[startPosition:endPosition].strip() + else: + logging.warning( + "skipping repeated match for identifier '%s': '%s'", + identifier, + line, + ) + return match diff --git a/benchexec/tools/shell.py b/benchexec/tools/shell.py new file mode 100644 index 000000000..5a3afbe41 --- /dev/null +++ b/benchexec/tools/shell.py @@ -0,0 +1,46 @@ +# This file can be used as benchexec tool info module to run arbitrary commands +# and scan the produced output to determine the result. +# For instance, combine it with ./run-test.sh to run individual unit tests with benchexec. +# +# As an example, see the usage in ./benchexec/og-proof-generation.xml . + +import benchexec.tools.template +import benchexec.result as result +import logging +import re + +class Tool(benchexec.tools.template.BaseTool2): + """ + """ + + def executable(self, tool_locator): + return tool_locator.find_executable("run-test.sh") + + def name(self): + return "shell" + + def cmdline(self, executable, options, task, rlimits): + success_index = options.index('success') + self.success = options[success_index + 1] + failure_index = options.index('failure') + self.failure = options[failure_index + 1] + command_index = options.index('command') + command = options[command_index + 1] + return command.split(' ') + + def determine_result(self, run): + for line in run.output: + if re.search(self.success, line) is not None: + return result.RESULT_DONE + if re.search(self.failure, line) is not None: + return result.RESULT_ERROR + return result.RESULT_UNKNOWN + + def get_value_from_output(self, output, identifier): + regex = re.compile(identifier) + for line in output: + match = regex.search(line) + if match and len(match.groups()) > 0: + return match.group(1) + logging.debug("Did not find a match with regex %s", identifier) + return None diff --git a/benchexec/tools/witnessverifast.py b/benchexec/tools/witnessverifast.py new file mode 100644 index 000000000..a0e368d04 --- /dev/null +++ b/benchexec/tools/witnessverifast.py @@ -0,0 +1,70 @@ +#!/usr/bin/python3 + +import benchexec.tools.template +import benchexec.result as result +import logging +import re + +class Tool(benchexec.tools.template.BaseTool2): + """ + """ + + def executable(self, tool_locator): + script = tool_locator.find_executable("verifast-validate-witness.sh") + return script + + def name(self): + return "witnessverifast" + + def cmdline(self, executable, options, task, rlimits): + witness_index = options.index('witness') + if witness_index is not None and len(options) > witness_index + 1: + witness = options[witness_index + 1] + options.pop(witness_index + 1) + options.pop(witness_index) + else: + raise UnsupportedFeatureException(f"invalid options: {options}") + return [executable, *task.input_files_or_empty, witness] + + def determine_result(self, run): + if run.exit_code.value is not None: + if run.exit_code.value == 0: + return result.RESULT_TRUE_PROP + for line in run.output: + if "Cannot prove condition" in line: + return f"{result.RESULT_FALSE_PROP}(Cannot prove condition)" + if "Parse error" in line: + return f"{result.RESULT_ERROR}(Parse error)" + if "Duplicate function prototype" in line: + return f"{result.RESULT_ERROR}(Duplicate function prototype)" + if "No such variable, constructor, regular function, predicate, enum element, global variable, or module" in line: + return f"{result.RESULT_ERROR}(No such variable, constructor, regular function, predicate, enum element, global variable, or module)" + if "Non-void function does not return a value" in line: + return f"{result.RESULT_ERROR}(Non-void function does not return a value)" + if "Contract required" in line: + return f"{result.RESULT_ERROR}(Contract required)" + if "Loop invariant required" in line: + return f"{result.RESULT_ERROR}(Loop invariant required)" + if "Type mismatch. Actual:" in line: + return f"{result.RESULT_ERROR}(Type mismatch)" + if "Expression of arithmetic or pointer type expected" in line: + return f"{result.RESULT_ERROR}(Expression of arithmetic or pointer type expected)" + if "hides existing local variable" in line: + return f"{result.RESULT_ERROR}(hidden existing local variable)" + if "No matching heap chunks" in line: + return f"{result.RESULT_ERROR}(No matching heap chunks)" + if "No such label" in line: + return f"{result.RESULT_ERROR}(No such label)" + if "Traceback (most recent call last):" in line: + return f"{result.RESULT_ERROR}(Python crash)" + return result.RESULT_ERROR + + def get_value_from_output(self, output, identifier): + regex = re.compile(identifier) + for line in output: + match = regex.search(line) + if match and len(match.groups()) > 0: + return match.group(1) + logging.debug("Did not find a match with regex %s", identifier) + return None +