aboutsummaryrefslogtreecommitdiff
path: root/verif/runner
diff options
context:
space:
mode:
Diffstat (limited to 'verif/runner')
-rw-r--r--verif/runner/tosa_refmodel_sut_run.py4
-rw-r--r--verif/runner/tosa_test_runner.py50
-rw-r--r--verif/runner/tosa_verif_run_tests.py89
3 files changed, 91 insertions, 52 deletions
diff --git a/verif/runner/tosa_refmodel_sut_run.py b/verif/runner/tosa_refmodel_sut_run.py
index 6acaaf4..95f6e7b 100644
--- a/verif/runner/tosa_refmodel_sut_run.py
+++ b/verif/runner/tosa_refmodel_sut_run.py
@@ -21,9 +21,9 @@ class TosaRefReturnCode(IntEnum):
class TosaSUTRunner(TosaTestRunner):
"""TOSA Reference Model runner."""
- def __init__(self, args, runnerArgs, testDir):
+ def __init__(self, args, runnerArgs, testDirPath):
"""Initialize using the given test details."""
- super().__init__(args, runnerArgs, testDir)
+ super().__init__(args, runnerArgs, testDirPath)
def runTestGraph(self):
"""Run the test on the reference model."""
diff --git a/verif/runner/tosa_test_runner.py b/verif/runner/tosa_test_runner.py
index 65931d8..d8c2a87 100644
--- a/verif/runner/tosa_test_runner.py
+++ b/verif/runner/tosa_test_runner.py
@@ -33,47 +33,49 @@ class TosaTestInvalid(Exception):
class TosaTestRunner:
"""TOSA Test Runner template class for systems under test."""
- def __init__(self, args, runnerArgs, testDir):
+ def __init__(self, args, runnerArgs, testDirPath):
"""Initialize and load JSON meta data file."""
self.args = args
self.runnerArgs = runnerArgs
- self.testDir = testDir
- self.testName = Path(self.testDir).name
+ self.testDir = str(testDirPath)
+ self.testDirPath = testDirPath
+ self.testName = self.testDirPath.name
set_print_in_color(not args.no_color)
# Check if we want to run binary and if its already converted
- descFilePath = Path(testDir, "desc.json")
- descBinFilePath = Path(testDir, "desc_binary.json")
+ descFilePath = testDirPath / "desc.json"
+ descBinFilePath = testDirPath / "desc_binary.json"
if args.binary:
if descBinFilePath.is_file():
descFilePath = descBinFilePath
try:
# Load the json test file
- with open(descFilePath, "r") as fd:
+ with descFilePath.open("r") as fd:
self.testDesc = json.load(fd)
except Exception as e:
raise TosaTestInvalid(str(descFilePath), e)
# Convert to binary if needed
- tosaFilePath = Path(testDir, self.testDesc["tosa_file"])
+ tosaFilePath = testDirPath / self.testDesc["tosa_file"]
if args.binary and tosaFilePath.suffix == ".json":
# Convert tosa JSON to binary
json2fbbin.json_to_fbbin(
Path(args.flatc_path),
Path(args.operator_fbs),
tosaFilePath,
- Path(testDir),
+ testDirPath,
)
# Write new desc_binary file
self.testDesc["tosa_file"] = tosaFilePath.stem + ".tosa"
- with open(descBinFilePath, "w") as fd:
+ with descBinFilePath.open("w") as fd:
json.dump(self.testDesc, fd, indent=2)
descFilePath = descBinFilePath
# Set location of desc.json (or desc_binary.json) file in use
self.descFile = str(descFilePath)
+ self.descFilePath = descFilePath
def skipTest(self):
"""Check if the test is skipped due to test type or profile selection."""
@@ -109,9 +111,9 @@ class TosaTestRunner:
for resultNum, resultFileName in enumerate(self.testDesc["ofm_file"]):
if "expected_result_file" in self.testDesc:
try:
- conformanceFile = Path(
- self.testDir,
- self.testDesc["expected_result_file"][resultNum],
+ conformanceFilePath = (
+ self.testDirPath
+ / self.testDesc["expected_result_file"][resultNum]
)
except IndexError:
result = TosaTestRunner.Result.INTERNAL_ERROR
@@ -122,14 +124,14 @@ class TosaTestRunner:
print(msg)
break
else:
- conformanceFile = None
- resultFile = Path(self.testDir, resultFileName)
+ conformanceFilePath = None
+ resultFilePath = self.testDirPath / resultFileName
- if conformanceFile:
+ if conformanceFilePath:
print_result_line = False # Checker will print one for us
chkResult, tolerance, msg = test_check(
- str(conformanceFile),
- str(resultFile),
+ conformanceFilePath,
+ resultFilePath,
test_name=self.testName,
)
# Change EXPECTED_PASS assumption if we have any failures
@@ -140,18 +142,18 @@ class TosaTestRunner:
print(msg)
else:
# No conformance file to verify, just check results file exists
- if not resultFile.is_file():
+ if not resultFilePath.is_file():
result = TosaTestRunner.Result.UNEXPECTED_FAILURE
- msg = "Results file is missing: {}".format(resultFile)
+ msg = "Results file is missing: {}".format(resultFilePath)
messages.append(msg)
print(msg)
- if resultFile.is_file():
- # Move the resultFile to allow subsequent system under
+ if resultFilePath.is_file():
+ # Move the resultFilePath to allow subsequent system under
# tests to create them and to test they have been created
- resultFile = resultFile.rename(
- resultFile.with_suffix(
- ".{}{}".format(self.__module__, resultFile.suffix)
+ resultFilePath = resultFilePath.rename(
+ resultFilePath.with_suffix(
+ ".{}{}".format(self.__module__, resultFilePath.suffix)
)
)
diff --git a/verif/runner/tosa_verif_run_tests.py b/verif/runner/tosa_verif_run_tests.py
index 77394cc..ddb32a4 100644
--- a/verif/runner/tosa_verif_run_tests.py
+++ b/verif/runner/tosa_verif_run_tests.py
@@ -2,7 +2,6 @@
# Copyright (c) 2020-2022, ARM Limited.
# SPDX-License-Identifier: Apache-2.0
import argparse
-import glob
import importlib
import os
import queue
@@ -40,6 +39,13 @@ def parseArgs(argv):
help="File containing list of tests to run (one per line)",
)
parser.add_argument(
+ "-r",
+ "--recursive",
+ dest="recursive_tests",
+ action="store_true",
+ help="Recursively search for tests",
+ )
+ parser.add_argument(
"--operator-fbs",
dest="operator_fbs",
default="conformance_tests/third_party/serialization_lib/schema/tosa.fbs",
@@ -146,27 +152,28 @@ def parseArgs(argv):
EXCLUSION_PREFIX = ["test", "model", "desc"]
-def convert2Numpy(testDir):
+def convert2Numpy(test_path):
"""Convert all the JSON numpy files back into binary numpy."""
- jsons = glob.glob(os.path.join(testDir, "*.json"))
+ jsons = test_path.glob("*.json")
for json in jsons:
for exclude in EXCLUSION_PREFIX:
- if os.path.basename(json).startswith(exclude):
- json = ""
+ if json.name.startswith(exclude):
+ json = None
+ break
if json:
- # debug print("Converting " + json)
- json2numpy.json_to_npy(Path(json))
+ # debug print(f"Converting {json}")
+ json2numpy.json_to_npy(json)
def workerThread(task_queue, runnerList, args, result_queue):
"""Worker thread that runs the next test from the queue."""
while True:
try:
- test = task_queue.get(block=False)
+ test_path = task_queue.get(block=False)
except queue.Empty:
break
- if test is None:
+ if test_path is None:
break
msg = ""
@@ -176,21 +183,25 @@ def workerThread(task_queue, runnerList, args, result_queue):
start_time = datetime.now()
# Set up system under test runner
runnerName = runnerModule.__name__
- runner = runnerModule.TosaSUTRunner(args, runnerArgs, test)
+ runner = runnerModule.TosaSUTRunner(args, runnerArgs, test_path)
skip, reason = runner.skipTest()
if skip:
msg = "Skipping {} test".format(reason)
- print("{} {}".format(msg, test))
+ print("{} {}".format(msg, test_path))
rc = TosaTestRunner.Result.SKIPPED
else:
# Convert JSON data files into numpy format on first pass
if not converted:
- convert2Numpy(test)
+ convert2Numpy(test_path)
converted = True
if args.verbose:
- print("Running runner {} with test {}".format(runnerName, test))
+ print(
+ "Running runner {} with test {}".format(
+ runnerName, test_path
+ )
+ )
try:
grc, gmsg = runner.runTestGraph()
rc, msg = runner.testResult(grc, gmsg)
@@ -220,7 +231,9 @@ def workerThread(task_queue, runnerList, args, result_queue):
rc = TosaTestRunner.Result.INTERNAL_ERROR
finally:
end_time = datetime.now()
- result_queue.put((runnerName, test, rc, msg, end_time - start_time))
+ result_queue.put(
+ (runnerName, test_path, rc, msg, end_time - start_time)
+ )
task_queue.task_done()
@@ -262,10 +275,10 @@ def createXUnitResults(xunitFile, runnerList, resultLists, verbose):
xunit_suite = xunit_result.create_suite(runner)
# Sort by test name
- for test, rc, msg, time_delta in sorted(
+ for test_path, rc, msg, time_delta in sorted(
resultLists[runner], key=lambda tup: tup[0]
):
- test_name = test
+ test_name = str(test_path)
xt = xunit.xunit_test(test_name, runner)
xt.time = str(
@@ -293,12 +306,27 @@ def createXUnitResults(xunitFile, runnerList, resultLists, verbose):
xunit_result.write_results(xunitFile)
+def getTestsInPath(path):
+ # Recursively find any tests in this directory
+ desc_path = path / "desc.json"
+ if desc_path.is_file():
+ return [path]
+ elif path.is_dir():
+ path_list = []
+ for p in path.glob("*"):
+ path_list.extend(getTestsInPath(p))
+ return path_list
+ else:
+ return []
+
+
def main(argv=None):
"""Start worker threads to do the testing and outputs the results."""
args = parseArgs(argv)
- if TOSA_REFMODEL_RUNNER in args.sut_module and not os.path.isfile(
- args.ref_model_path
+ if (
+ TOSA_REFMODEL_RUNNER in args.sut_module
+ and not Path(args.ref_model_path).is_file()
):
print(
"Argument error: Reference Model not found ({})".format(args.ref_model_path)
@@ -307,7 +335,7 @@ def main(argv=None):
if args.test_list_file:
try:
- with open(args.test_list_file) as f:
+ with args.test_list_file.open("r") as f:
args.test = f.read().splitlines()
except Exception as e:
print(
@@ -323,12 +351,21 @@ def main(argv=None):
taskQueue = queue.Queue()
resultQueue = queue.Queue()
- for t in args.test:
- if os.path.isfile(t):
- if not os.path.basename(t) == "README":
- print("Warning: Skipping test {} as not a valid directory".format(t))
+ for tdir in args.test:
+ tpath = Path(tdir)
+ if tpath.is_file():
+ if tpath.name != "README":
+ print(
+ "Warning: Skipping test {} as not a valid directory".format(tpath)
+ )
else:
- taskQueue.put((t))
+ if args.recursive_tests:
+ tpath_list = getTestsInPath(tpath)
+ else:
+ tpath_list = [tpath]
+
+ for t in tpath_list:
+ taskQueue.put((t))
print(
"Running {} tests on {} system{} under test".format(
@@ -356,7 +393,7 @@ def main(argv=None):
while True:
try:
- runner, test, rc, msg, time_delta = resultQueue.get(block=False)
+ runner, test_path, rc, msg, time_delta = resultQueue.get(block=False)
resultQueue.task_done()
except queue.Empty:
break
@@ -368,7 +405,7 @@ def main(argv=None):
msg = "{} ...\nskipped {} bytes\n... {}".format(
msg[:half], trimmed, msg[-half:]
)
- resultLists[runner].append((test, rc, msg, time_delta))
+ resultLists[runner].append((test_path, rc, msg, time_delta))
results[runner][rc] += 1
createXUnitResults(args.xunit_file, runnerList, resultLists, args.verbose)