Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def assertEqualNumberAndUnit(self, value, number, unit):
self.assertEqual(util.split_number_and_unit(value), (number, unit))
self.assertEqual(util.split_string_at_suffix(value, False), (number, unit))
if line.startswith("ERROR:"):
if "timeout" in line.lower():
return "TIMEOUT"
else:
return "ERROR ({0})".format(returncode)
elif line.startswith("Result: FALSE"):
return result.RESULT_FALSE_REACH
elif line.startswith("Result: TRUE"):
return result.RESULT_TRUE_PROP
elif line.startswith("Result: DONE"):
return result.RESULT_DONE
elif line.startswith("Result: ERROR"):
# matches ERROR and ERROR followed by some reason in parantheses
# e.g., "ERROR (TRUE)" or "ERROR(TRUE)"
return re.search(r"ERROR(\s*\(.*\))?", line).group(0)
return result.RESULT_UNKNOWN
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer
#
# SPDX-License-Identifier: Apache-2.0
import re
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for tbf test-suite validator (https://gitlab.com/sosy-lab/software/test-format).
"""
REQUIRED_PATHS = ["python_modules", "lib", "bin"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable(
"tbf-testsuite-validator", "bin/tbf-testsuite-validator"
)
def create_run(self, info_result=RESULT_UNKNOWN):
runSet = types.SimpleNamespace()
runSet.log_folder = "."
runSet.result_files_folder = "."
runSet.options = []
runSet.real_name = None
runSet.propertytag = None
runSet.benchmark = lambda: None
runSet.benchmark.base_dir = "."
runSet.benchmark.benchmark_file = "Test.xml"
runSet.benchmark.columns = []
runSet.benchmark.name = "Test"
runSet.benchmark.instance = "Test"
runSet.benchmark.rlimits = {}
runSet.benchmark.tool = BaseTool()
def determine_result(self, returncode, returnsignal, output, isTimeout=False):
return info_result
runSet.benchmark.tool.determine_result = determine_result
return Run(
identifier="test.c", sourcefiles=["test.c"], fileOptions=[], runSet=runSet
)
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
for line in reversed(output):
if line.startswith("ERROR:"):
if "timeout" in line.lower():
return "TIMEOUT"
else:
return "ERROR ({0})".format(returncode)
elif line.startswith("Result: FALSE"):
return result.RESULT_FALSE_REACH
elif line.startswith("Result: TRUE"):
return result.RESULT_TRUE_PROP
elif line.startswith("Result: DONE"):
return result.RESULT_DONE
elif line.startswith("Result: ERROR"):
# matches ERROR and ERROR followed by some reason in parantheses
# e.g., "ERROR (TRUE)" or "ERROR(TRUE)"
return re.search(r"ERROR(\s*\(.*\))?", line).group(0)
return result.RESULT_UNKNOWN
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
for line in reversed(output):
if line.startswith("ERROR:"):
if "timeout" in line.lower():
return "TIMEOUT"
else:
return "ERROR ({0})".format(returncode)
elif line.startswith("Result:") and "FALSE" in line:
return result.RESULT_FALSE_REACH
elif line.startswith("Result:") and "TRUE" in line:
return result.RESULT_TRUE_PROP
elif line.startswith("Result") and "DONE" in line:
return result.RESULT_DONE
return result.RESULT_UNKNOWN
def assert_file_content_equals(self, content, file):
if OVERWRITE_MODE:
benchexec.util.write_file(content, *file)
else:
self.assertMultiLineEqual(content, benchexec.util.read_file(*file))
def check_exitcode_extern(self, result, exitcode, msg=None):
exitcode = util.ProcessExitCode.from_raw(exitcode)
if exitcode.value is not None:
self.assertEqual(int(result["returnvalue"]), exitcode.value, msg)
else:
self.assertEqual(int(result["exitsignal"]), exitcode.signal, msg)
def test_column_init_no_error_on_default_scale(self):
Column("memUsed", None, None, None, self.measure_type, "B")
def test_column_init_no_error_on_same_unit_without_scale(self):
Column("memUsed", None, None, None, self.measure_type, "B", "B", None)