diff options
author | Jeremy Johnson <jeremy.johnson@arm.com> | 2023-09-14 17:02:09 +0100 |
---|---|---|
committer | Jeremy Johnson <jeremy.johnson@arm.com> | 2023-10-02 12:04:44 +0100 |
commit | e2b5e87804e158cb3e5d06a131c317b3890b87b3 (patch) | |
tree | fd8b5a4d56dfcea4be4e6ced73f2d4d5b2e1d92d /verif/runner | |
parent | bb0935f868a5ab09403cf3628848655b06ac1dec (diff) | |
download | reference_model-e2b5e87804e158cb3e5d06a131c317b3890b87b3.tar.gz |
Support for compliance checking testing
Updated to conformance generator to not generate tests with results for
compliance tests.
Updated test runner to run compliance mode version (precise & abs mode)
of reference model to create test results to use against SUT results.
Updated reference model to enable abs_mode on correct desc.json flags.
Updated test checker to support compliance checking using verifier lib.
Seperated color printing from test checker.
Change-Id: I7e2fbfc6883916caa5d94d4ece122c48bf45f530
Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Diffstat (limited to 'verif/runner')
-rw-r--r-- | verif/runner/tosa_refmodel_compliance_sut_run.py | 17 | ||||
-rw-r--r-- | verif/runner/tosa_refmodel_sut_run.py | 5 | ||||
-rw-r--r-- | verif/runner/tosa_test_presets.py | 7 | ||||
-rw-r--r-- | verif/runner/tosa_test_runner.py | 128 | ||||
-rw-r--r-- | verif/runner/tosa_verif_run_tests.py | 65 |
5 files changed, 179 insertions, 43 deletions
diff --git a/verif/runner/tosa_refmodel_compliance_sut_run.py b/verif/runner/tosa_refmodel_compliance_sut_run.py new file mode 100644 index 0000000..36e53b6 --- /dev/null +++ b/verif/runner/tosa_refmodel_compliance_sut_run.py @@ -0,0 +1,17 @@ +"""TOSA ref model compliance runner module.""" +# Copyright (c) 2023, ARM Limited. +# SPDX-License-Identifier: Apache-2.0 +from runner.tosa_refmodel_sut_run import TosaSUTRunner as TosaRefRunner + + +class TosaSUTRunner(TosaRefRunner): + """Compliance mode enabled ref model runner.""" + + def __init__(self, args, runnerArgs, testDirPath): + """Initialize the TosaTestRunner base class""" + super().__init__(args, runnerArgs, testDirPath) + + # Override - Set compliance mode precise FP64 calculations + self.compliance = True + + # All other functions inherited from refmodel_sut_run diff --git a/verif/runner/tosa_refmodel_sut_run.py b/verif/runner/tosa_refmodel_sut_run.py index 419f87b..d9eb108 100644 --- a/verif/runner/tosa_refmodel_sut_run.py +++ b/verif/runner/tosa_refmodel_sut_run.py @@ -25,6 +25,9 @@ class TosaSUTRunner(TosaTestRunner): """Initialize using the given test details.""" super().__init__(args, runnerArgs, testDirPath) + # Don't do any compliance runs + self.compliance = False + def runTestGraph(self): """Run the test on the reference model.""" # Build up the TOSA reference command line @@ -46,7 +49,7 @@ class TosaSUTRunner(TosaTestRunner): if args.ref_intermediates: cmd.extend(["--dump_intermediates", str(args.ref_intermediates)]) - if args.precise_mode: + if args.precise_mode or self.compliance: cmd.extend(["--precise_mode=1"]) # Run command and interpret tosa graph result via process return codes diff --git a/verif/runner/tosa_test_presets.py b/verif/runner/tosa_test_presets.py new file mode 100644 index 0000000..c45550d --- /dev/null +++ b/verif/runner/tosa_test_presets.py @@ -0,0 +1,7 @@ +"""Presets file for test running.""" +# Copyright (c) 2023, ARM Limited. +# SPDX-License-Identifier: Apache-2.0 + +TOSA_REFCOMPLIANCE_RUNNER = "runner.tosa_refmodel_compliance_sut_run" +TOSA_REFMODEL_RUNNER = "runner.tosa_refmodel_sut_run" +MAX_XUNIT_TEST_MESSAGE = 1000 diff --git a/verif/runner/tosa_test_runner.py b/verif/runner/tosa_test_runner.py index 579dd60..30a7168 100644 --- a/verif/runner/tosa_test_runner.py +++ b/verif/runner/tosa_test_runner.py @@ -4,11 +4,43 @@ import json from enum import IntEnum -from checker.tosa_result_checker import LogColors -from checker.tosa_result_checker import print_color -from checker.tosa_result_checker import set_print_in_color +import conformance.model_files as cmf +import schemavalidation.schemavalidation as sch +from checker.color_print import LogColors +from checker.color_print import print_color +from checker.color_print import set_print_in_color +from checker.tosa_result_checker import set_print_result from checker.tosa_result_checker import test_check from json2fbbin import json2fbbin +from runner.tosa_test_presets import TOSA_REFCOMPLIANCE_RUNNER + + +def isComplianceModeDotProduct(testDesc): + """Checks the test descriptor for DOT_PRODUCT compliance mode.""" + if ( + "meta" in testDesc + and "compliance" in testDesc["meta"] + and "tensors" in testDesc["meta"]["compliance"] + ): + for _, t in testDesc["meta"]["compliance"]["tensors"].items(): + if "mode" in t and t["mode"] == "DOT_PRODUCT": + return True + return False + + +def getRunnerResultFilePath(resultFilePath, sutModule): + """Return the result file path with the runner specific naming.""" + return resultFilePath.with_suffix(f".{sutModule}{resultFilePath.suffix}") + + +def getBoundsResultFilePath(resultFilePath, sutModule=None): + """Return the bounds result file with/without runner specific naming.""" + boundsFilePath = resultFilePath.parent / f"bounds_{resultFilePath.name}" + if sutModule is not None: + boundsFilePath = boundsFilePath.with_suffix( + f".{sutModule}{boundsFilePath.suffix}" + ) + return boundsFilePath class TosaTestInvalid(Exception): @@ -39,8 +71,13 @@ class TosaTestRunner: self.testDir = str(testDirPath) self.testDirPath = testDirPath self.testName = self.testDirPath.name + self.verify_lib_path = cmf.find_tosa_file( + cmf.TosaFileType.VERIFY_LIBRARY, args.ref_model_path + ) set_print_in_color(not args.no_color) + # Stop the result checker printing anything - we will do it + set_print_result(False) # Check if we want to run binary and if its already converted descFilePath = testDirPath / "desc.json" @@ -53,6 +90,8 @@ class TosaTestRunner: # Load the json test file with descFilePath.open("r") as fd: self.testDesc = json.load(fd) + # Validate the json with the schema + sch.TestDescSchemaValidator().validate_config(self.testDesc) except Exception as e: raise TosaTestInvalid(str(descFilePath), e) @@ -76,6 +115,16 @@ class TosaTestRunner: self.descFile = str(descFilePath) self.descFilePath = descFilePath + # Check for compliance mode - need to run refmodel to get results + if "meta" in self.testDesc and "compliance" in self.testDesc["meta"]: + self.complianceMode = True + if "expected_result" in self.testDesc: + if self.args.verbose: + print("Warning: fixing conflicting compliance mode in test.desc") + self.testDesc.pop("expected_result") + else: + self.complianceMode = False + def skipTest(self): """Check if the test is skipped due to test type or profile selection.""" expectedFailure = self.testDesc["expected_failure"] @@ -96,7 +145,9 @@ class TosaTestRunner: def testResult(self, tosaGraphResult, graphMessage=None): """Work out test result based on graph result and output files.""" expectedFailure = self.testDesc["expected_failure"] - print_result_line = True + print_check_result = False + + sutModule = self.__module__ if tosaGraphResult == TosaTestRunner.TosaGraphResult.TOSA_VALID: if expectedFailure: @@ -107,8 +158,25 @@ class TosaTestRunner: # but overriding this with any failures found result = TosaTestRunner.Result.EXPECTED_PASS messages = [] + + # Go through each output result checking it for resultNum, resultFileName in enumerate(self.testDesc["ofm_file"]): - if "expected_result_file" in self.testDesc: + resultFilePath = self.testDirPath / resultFileName + + # Work out the file to check against (if any) + if self.complianceMode and sutModule != TOSA_REFCOMPLIANCE_RUNNER: + conformanceFilePath = getRunnerResultFilePath( + resultFilePath, TOSA_REFCOMPLIANCE_RUNNER + ) + if isComplianceModeDotProduct(self.testDesc): + conformanceBoundsPath = getBoundsResultFilePath( + resultFilePath, TOSA_REFCOMPLIANCE_RUNNER + ) + else: + # Not expecting a bounds file for this test + conformanceBoundsPath = None + elif "expected_result_file" in self.testDesc: + conformanceBoundsPath = None try: conformanceFilePath = ( self.testDirPath @@ -123,15 +191,20 @@ class TosaTestRunner: print(msg) break else: + # Nothing to check against conformanceFilePath = None - resultFilePath = self.testDirPath / resultFileName + conformanceBoundsPath = None if conformanceFilePath: - print_result_line = False # Checker will print one for us + print_check_result = True # Result from checker chkResult, tolerance, msg = test_check( conformanceFilePath, resultFilePath, test_name=self.testName, + test_desc=self.testDesc, + bnd_result_path=conformanceBoundsPath, + ofm_name=self.testDesc["ofm_name"][resultNum], + verify_lib_path=self.verify_lib_path, ) # Change EXPECTED_PASS assumption if we have any failures if chkResult != 0: @@ -143,18 +216,31 @@ class TosaTestRunner: # No conformance file to verify, just check results file exists if not resultFilePath.is_file(): result = TosaTestRunner.Result.UNEXPECTED_FAILURE - msg = "Results file is missing: {}".format(resultFilePath) + msg = f"Results file is missing: {resultFilePath}" messages.append(msg) print(msg) if resultFilePath.is_file(): # Move the resultFilePath to allow subsequent system under # tests to create them and to test they have been created - resultFilePath = resultFilePath.rename( - resultFilePath.with_suffix( - ".{}{}".format(self.__module__, resultFilePath.suffix) - ) + # and to enable compliance testing against refmodel results + resultFilePath.rename( + getRunnerResultFilePath(resultFilePath, sutModule) ) + if ( + isComplianceModeDotProduct(self.testDesc) + and sutModule == TOSA_REFCOMPLIANCE_RUNNER + ): + boundsFilePath = getBoundsResultFilePath(resultFilePath) + if boundsFilePath.is_file(): + boundsFilePath = boundsFilePath.rename( + getBoundsResultFilePath(resultFilePath, sutModule) + ) + else: + result = TosaTestRunner.Result.INTERNAL_ERROR + msg = f"Internal error: Missing expected dot product compliance bounds file {boundsFilePath}" + messages.append(msg) + print(msg) resultMessage = "\n".join(messages) if len(messages) > 0 else None else: @@ -168,16 +254,14 @@ class TosaTestRunner: result = TosaTestRunner.Result.UNEXPECTED_FAILURE resultMessage = graphMessage - if print_result_line: - if ( - result == TosaTestRunner.Result.EXPECTED_FAILURE - or result == TosaTestRunner.Result.EXPECTED_PASS - ): - print_color( - LogColors.GREEN, "Result code PASS {}".format(self.testName) - ) - else: - print_color(LogColors.RED, "Result code FAIL {}".format(self.testName)) + status = "Result" if print_check_result else "Result code" + if ( + result == TosaTestRunner.Result.EXPECTED_FAILURE + or result == TosaTestRunner.Result.EXPECTED_PASS + ): + print_color(LogColors.GREEN, f"{sutModule}: {status} PASS {self.testName}") + else: + print_color(LogColors.RED, f"{sutModule}: {status} FAIL {self.testName}") return result, resultMessage diff --git a/verif/runner/tosa_verif_run_tests.py b/verif/runner/tosa_verif_run_tests.py index 722c0e7..d1755e6 100644 --- a/verif/runner/tosa_verif_run_tests.py +++ b/verif/runner/tosa_verif_run_tests.py @@ -3,6 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import argparse import importlib +import json import os import queue import threading @@ -11,14 +12,12 @@ from datetime import datetime from pathlib import Path import conformance.model_files as cmf +import runner.tosa_test_presets as ttp from json2numpy import json2numpy from runner.tosa_test_runner import TosaTestInvalid from runner.tosa_test_runner import TosaTestRunner from xunit import xunit -TOSA_REFMODEL_RUNNER = "runner.tosa_refmodel_sut_run" -MAX_XUNIT_TEST_MESSAGE = 1000 - def parseArgs(argv): """Parse the arguments and return the settings.""" @@ -104,7 +103,7 @@ def parseArgs(argv): dest="sut_module", type=str, nargs="+", - default=[TOSA_REFMODEL_RUNNER], + default=[ttp.TOSA_REFMODEL_RUNNER], help="System under test module to load (derives from TosaTestRunner). May be repeated", ) parser.add_argument( @@ -175,18 +174,20 @@ EXCLUSION_PREFIX = ["test", "model", "desc"] def convert2Numpy(test_path): """Convert all the JSON numpy files back into binary numpy.""" jsons = test_path.glob("*.json") - for json in jsons: + for j in jsons: for exclude in EXCLUSION_PREFIX: - if json.name.startswith(exclude): - json = None + if j.name.startswith(exclude): + j = None break - if json: + if j: # debug print(f"Converting {json}") - json2numpy.json_to_npy(json) + json2numpy.json_to_npy(j) -def workerThread(task_queue, runnerList, args, result_queue): +def workerThread(task_queue, runnerList, complianceRunner, args, result_queue): """Worker thread that runs the next test from the queue.""" + complianceRunnerList = runnerList.copy() + complianceRunnerList.insert(0, (complianceRunner, [])) while True: try: test_path = task_queue.get(block=False) @@ -196,9 +197,24 @@ def workerThread(task_queue, runnerList, args, result_queue): if test_path is None: break + try: + # Check for compliance test + desc = test_path / "desc.json" + with desc.open("r") as fd: + j = json.load(fd) + compliance = "compliance" in j["meta"] + except Exception: + compliance = False + + if compliance: + # Run compliance first to create output files! + currentRunners = complianceRunnerList + else: + currentRunners = runnerList + msg = "" converted = False - for runnerModule, runnerArgs in runnerList: + for runnerModule, runnerArgs in currentRunners: try: start_time = datetime.now() # Set up system under test runner @@ -358,8 +374,11 @@ def main(argv=None): cmf.TosaFileType.SCHEMA, args.ref_model_path ) - if TOSA_REFMODEL_RUNNER in args.sut_module and not args.ref_model_path.is_file(): - print(f"Argument error: Reference Model not found - {str(args.ref_model_path)}") + # Always check as it will be needed for compliance + if not args.ref_model_path.is_file(): + print( + f"Argument error: Reference Model not found - ({str(args.ref_model_path)})" + ) exit(2) if args.test_list_file: @@ -374,7 +393,12 @@ def main(argv=None): ) exit(2) + # Load in the runner modules and the ref model compliance module runnerList = loadSUTRunnerModules(args) + complianceRunner = importlib.import_module(ttp.TOSA_REFCOMPLIANCE_RUNNER) + # Create a separate reporting runner list as the compliance runner may not + # be always run - depends on compliance testing + fullRunnerList = runnerList + [(complianceRunner, [])] threads = [] taskQueue = queue.Queue() @@ -404,7 +428,8 @@ def main(argv=None): for i in range(args.jobs): t = threading.Thread( - target=workerThread, args=(taskQueue, runnerList, args, resultQueue) + target=workerThread, + args=(taskQueue, runnerList, complianceRunner, args, resultQueue), ) t.setDaemon(True) t.start() @@ -415,7 +440,7 @@ def main(argv=None): # Set up results lists for each system under test resultLists = {} results = {} - for runnerModule, _ in runnerList: + for runnerModule, _ in fullRunnerList: runner = runnerModule.__name__ resultLists[runner] = [] results[runner] = [0] * len(TosaTestRunner.Result) @@ -428,19 +453,19 @@ def main(argv=None): break # Limit error messages to make results easier to digest - if msg and len(msg) > MAX_XUNIT_TEST_MESSAGE: - half = int(MAX_XUNIT_TEST_MESSAGE / 2) - trimmed = len(msg) - MAX_XUNIT_TEST_MESSAGE + if msg and len(msg) > ttp.MAX_XUNIT_TEST_MESSAGE: + half = int(ttp.MAX_XUNIT_TEST_MESSAGE / 2) + trimmed = len(msg) - ttp.MAX_XUNIT_TEST_MESSAGE msg = "{} ...\nskipped {} bytes\n... {}".format( msg[:half], trimmed, msg[-half:] ) resultLists[runner].append((test_path, rc, msg, time_delta)) results[runner][rc] += 1 - createXUnitResults(args.xunit_file, runnerList, resultLists, args.verbose) + createXUnitResults(args.xunit_file, fullRunnerList, resultLists, args.verbose) # Print out results for each system under test - for runnerModule, _ in runnerList: + for runnerModule, _ in fullRunnerList: runner = runnerModule.__name__ resultSummary = [] for result in TosaTestRunner.Result: |