aboutsummaryrefslogtreecommitdiff
path: root/verif/tests
diff options
context:
space:
mode:
authorJeremy Johnson <jeremy.johnson@arm.com>2023-09-14 17:02:09 +0100
committerJeremy Johnson <jeremy.johnson@arm.com>2023-10-02 12:04:44 +0100
commite2b5e87804e158cb3e5d06a131c317b3890b87b3 (patch)
treefd8b5a4d56dfcea4be4e6ced73f2d4d5b2e1d92d /verif/tests
parentbb0935f868a5ab09403cf3628848655b06ac1dec (diff)
downloadreference_model-e2b5e87804e158cb3e5d06a131c317b3890b87b3.tar.gz
Support for compliance checking testing
Updated to conformance generator to not generate tests with results for compliance tests. Updated test runner to run compliance mode version (precise & abs mode) of reference model to create test results to use against SUT results. Updated reference model to enable abs_mode on correct desc.json flags. Updated test checker to support compliance checking using verifier lib. Seperated color printing from test checker. Change-Id: I7e2fbfc6883916caa5d94d4ece122c48bf45f530 Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Diffstat (limited to 'verif/tests')
-rw-r--r--verif/tests/test_tosa_run_tests_mocksut.py55
1 files changed, 42 insertions, 13 deletions
diff --git a/verif/tests/test_tosa_run_tests_mocksut.py b/verif/tests/test_tosa_run_tests_mocksut.py
index 234f156..fb4a811 100644
--- a/verif/tests/test_tosa_run_tests_mocksut.py
+++ b/verif/tests/test_tosa_run_tests_mocksut.py
@@ -1,5 +1,5 @@
"""Tests for tosa_verif_run_tests.py."""
-# Copyright (c) 2021-2022, ARM Limited.
+# Copyright (c) 2021-2023, ARM Limited.
# SPDX-License-Identifier: Apache-2.0
import json
from copy import deepcopy
@@ -7,6 +7,7 @@ from pathlib import Path
from xml.dom import minidom
import pytest
+from runner.tosa_test_presets import TOSA_REFCOMPLIANCE_RUNNER
from runner.tosa_verif_run_tests import main
@@ -21,11 +22,24 @@ TEST_DESC = {
GRAPH_RESULT_VALID = "valid"
GRAPH_RESULT_ERROR = "error"
+FAKE_REF_MODEL_PATH = Path(__file__).parent / "__fake_ref_model__"
+
+
+def _create_fake_ref_model():
+ """Create a fake ref model to fool the runner."""
+ with FAKE_REF_MODEL_PATH.open("w") as fd:
+ print("Fake ref model for mock testing", file=fd)
+
+
+def _delete_fake_ref_model():
+ """Clean up fake ref model."""
+ FAKE_REF_MODEL_PATH.unlink()
+
def _create_desc_json(json_object) -> Path:
"""Create test desc.json."""
file = Path(__file__).parent / "desc.json"
- with open(file, "w") as fd:
+ with file.open("w") as fd:
json.dump(json_object, fd, indent=2)
return file
@@ -45,28 +59,33 @@ def _delete_desc_json(file: Path):
def testDir() -> str:
"""Set up a mock expected pass test."""
print("SET UP - testDir")
+ _create_fake_ref_model()
file = _create_desc_json(TEST_DESC)
yield file.parent
print("TEAR DOWN - testDir")
_delete_desc_json(file)
+ _delete_fake_ref_model()
@pytest.fixture
def testDirExpectedFail() -> str:
"""Set up a mock expected fail test."""
print("SET UP - testDirExpectedFail")
+ _create_fake_ref_model()
fail = deepcopy(TEST_DESC)
fail["expected_failure"] = True
file = _create_desc_json(fail)
yield file.parent
print("TEAR DOWN - testDirExpectedFail")
_delete_desc_json(file)
+ _delete_fake_ref_model()
@pytest.fixture
def testDirMultiOutputs() -> str:
"""Set up a mock multiple results output test."""
print("SET UP - testDirMultiOutputs")
+ _create_fake_ref_model()
out = deepcopy(TEST_DESC)
out["ofm_name"].append("tr1")
out["ofm_file"].append("test-result-1.npy")
@@ -74,11 +93,14 @@ def testDirMultiOutputs() -> str:
yield file.parent
print("TEAR DOWN - testDirMultiOutputs")
_delete_desc_json(file)
+ _delete_fake_ref_model()
def _get_default_argv(testDir: Path, graphResult: str) -> list:
"""Create default args based on test directory and graph result."""
return [
+ "--ref-model-path",
+ f"{str(FAKE_REF_MODEL_PATH)}",
"--sut-module",
"tests.tosa_mock_sut_run",
"--test",
@@ -99,13 +121,22 @@ def _get_xml_results(argv: list):
return results
-def _get_xml_testsuites_from_results(results, expectedTestSuites: int):
- """Get XML testcases from results."""
+def _get_xml_testsuites_from_results(results, numExpectedTestSuites: int):
+ """Get XML testsuites from results."""
testSuites = results.getElementsByTagName("testsuite")
- assert len(testSuites) == expectedTestSuites
+ assert len(testSuites) == numExpectedTestSuites
return testSuites
+def _check_xml_testsuites_in_results(results, expectedTestSuites: list):
+ """Check XML testsuites in results."""
+ # Add compliance to expected list
+ expectedTestSuites.append(TOSA_REFCOMPLIANCE_RUNNER)
+ testSuites = _get_xml_testsuites_from_results(results, len(expectedTestSuites))
+ for suite in testSuites:
+ assert suite.getAttribute("name") in expectedTestSuites
+
+
def _get_xml_testcases_from_results(results, expectedTestCases: int):
"""Get XML testcases from results."""
testCases = results.getElementsByTagName("testcase")
@@ -188,14 +219,13 @@ def test_mock_sut_binary_conversion(testDir: Path):
def test_mock_and_dummy_sut_results(testDir: Path):
"""Run two SUTs and check they both return results."""
try:
+ suts = ["tests.tosa_dummy_sut_run", "tests.tosa_mock_sut_run"]
argv = _get_default_argv(testDir, GRAPH_RESULT_VALID)
# Override sut-module setting with both SUTs
- argv.extend(
- ["--sut-module", "tests.tosa_dummy_sut_run", "tests.tosa_mock_sut_run"]
- )
+ argv.extend(["--sut-module"] + suts)
main(argv)
results = _get_xml_results(argv)
- _get_xml_testsuites_from_results(results, 2)
+ _check_xml_testsuites_in_results(results, suts)
_get_xml_testcases_from_results(results, 2)
except Exception as e:
assert False, f"Unexpected exception {e}"
@@ -204,14 +234,13 @@ def test_mock_and_dummy_sut_results(testDir: Path):
def test_two_mock_suts(testDir: Path):
"""Test that a duplicated SUT is ignored."""
try:
+ sut = ["tests.tosa_mock_sut_run"]
argv = _get_default_argv(testDir, GRAPH_RESULT_VALID)
# Override sut-module setting with duplicated SUT
- argv.extend(
- ["--sut-module", "tests.tosa_mock_sut_run", "tests.tosa_mock_sut_run"]
- )
+ argv.extend(["--sut-module"] + sut * 2)
main(argv)
results = _get_xml_results(argv)
- _get_xml_testsuites_from_results(results, 1)
+ _check_xml_testsuites_in_results(results, sut)
_get_xml_testcases_from_results(results, 1)
except Exception as e:
assert False, f"Unexpected exception {e}"