aboutsummaryrefslogtreecommitdiff
path: root/verif/tests/test_tosa_run_tests_mocksut.py
blob: 98044e0fdfdfc4cec576f2fd1e7b95b5ff52f09b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
"""Tests for tosa_verif_run_tests.py."""
# Copyright (c) 2021-2022, ARM Limited.
# SPDX-License-Identifier: Apache-2.0
import json
from copy import deepcopy
from pathlib import Path
from xml.dom import minidom

import pytest

from runner.tosa_verif_run_tests import main


TEST_DESC = {
    "tosa_file": "pytest.json",
    "ifm_name": ["test-0", "test-1"],
    "ifm_file": ["test-0.npy", "test-1.npy"],
    "ofm_name": ["test-result-0"],
    "ofm_file": ["test-result-0.npy"],
    "expected_failure": False,
}
GRAPH_RESULT_VALID = "valid"
GRAPH_RESULT_ERROR = "error"


def _create_desc_json(json_object) -> Path:
    """Create test desc.json."""
    file = Path(__file__).parent / "desc.json"
    with open(file, "w") as fd:
        json.dump(json_object, fd, indent=2)
    return file


def _delete_desc_json(file: Path):
    """Clean up desc.json."""
    binary_file = file.parent / "desc_binary.json"
    if binary_file.exists():
        print(binary_file.read_text())
        binary_file.unlink()
    else:
        print(file.read_text())
    file.unlink()


@pytest.fixture
def testDir() -> str:
    """Set up a mock expected pass test."""
    print("SET UP - testDir")
    file = _create_desc_json(TEST_DESC)
    yield file.parent
    print("TEAR DOWN - testDir")
    _delete_desc_json(file)


@pytest.fixture
def testDirExpectedFail() -> str:
    """Set up a mock expected fail test."""
    print("SET UP - testDirExpectedFail")
    fail = deepcopy(TEST_DESC)
    fail["expected_failure"] = True
    file = _create_desc_json(fail)
    yield file.parent
    print("TEAR DOWN - testDirExpectedFail")
    _delete_desc_json(file)


@pytest.fixture
def testDirMultiOutputs() -> str:
    """Set up a mock multiple results output test."""
    print("SET UP - testDirMultiOutputs")
    out = deepcopy(TEST_DESC)
    out["ofm_name"].append("tr1")
    out["ofm_file"].append("test-result-1.npy")
    file = _create_desc_json(out)
    yield file.parent
    print("TEAR DOWN - testDirMultiOutputs")
    _delete_desc_json(file)


def _get_default_argv(testDir: Path, graphResult: str) -> list:
    """Create default args based on test directory and graph result."""
    return [
        "--sut-module",
        "tests.tosa_mock_sut_run",
        "--test",
        str(testDir),
        "--xunit-file",
        str(testDir / "result.xml"),
        # Must be last argument to allow easy extension with extra args
        "--sut-module-args",
        f"tests.tosa_mock_sut_run:graph={graphResult}",
    ]


def _get_xml_results(argv: list):
    """Get XML results and remove file."""
    resultsFile = Path(argv[argv.index("--xunit-file") + 1])
    results = minidom.parse(str(resultsFile))
    resultsFile.unlink()
    return results


def _get_xml_testsuites_from_results(results, expectedTestSuites: int):
    """Get XML testcases from results."""
    testSuites = results.getElementsByTagName("testsuite")
    assert len(testSuites) == expectedTestSuites
    return testSuites


def _get_xml_testcases_from_results(results, expectedTestCases: int):
    """Get XML testcases from results."""
    testCases = results.getElementsByTagName("testcase")
    assert len(testCases) == expectedTestCases
    return testCases


def _get_xml_failure(argv: list):
    """Get the results and single testcase with the failure result entry if there is one."""
    results = _get_xml_results(argv)
    testCases = _get_xml_testcases_from_results(results, 1)
    fail = testCases[0].getElementsByTagName("failure")
    if fail:
        return fail[0].firstChild.data
    return None


def test_mock_sut_expected_pass(testDir: Path):
    """Run expected pass SUT test."""
    try:
        argv = _get_default_argv(testDir, GRAPH_RESULT_VALID)
        main(argv)
        fail = _get_xml_failure(argv)
    except Exception as e:
        assert False, f"Unexpected exception {e}"
    assert not fail


UNEXPECTED_PASS_PREFIX_STR = "UNEXPECTED_PASS"
UNEXPECTED_FAIL_PREFIX_STR = "UNEXPECTED_FAIL"


def test_mock_sut_unexpected_pass(testDirExpectedFail: Path):
    """Run unexpected pass SUT test."""
    try:
        argv = _get_default_argv(testDirExpectedFail, GRAPH_RESULT_VALID)
        main(argv)
        fail = _get_xml_failure(argv)
    except Exception as e:
        assert False, f"Unexpected exception {e}"
    assert fail.startswith(UNEXPECTED_PASS_PREFIX_STR)


def test_mock_sut_expected_failure(testDirExpectedFail: Path):
    """Run expected failure SUT test."""
    try:
        argv = _get_default_argv(testDirExpectedFail, GRAPH_RESULT_ERROR)
        main(argv)
        fail = _get_xml_failure(argv)
    except Exception as e:
        assert False, f"Unexpected exception {e}"
    assert not fail


def test_mock_sut_unexpected_failure(testDir: Path):
    """Run unexpected failure SUT test."""
    try:
        argv = _get_default_argv(testDir, GRAPH_RESULT_ERROR)
        main(argv)
        fail = _get_xml_failure(argv)
    except Exception as e:
        assert False, f"Unexpected exception {e}"
    assert fail.startswith(UNEXPECTED_FAIL_PREFIX_STR)


def test_mock_sut_binary_conversion(testDir: Path):
    """Run unexpected failure SUT test."""
    try:
        argv = _get_default_argv(testDir, GRAPH_RESULT_VALID)
        argv.extend(["--binary", "--flatc-path", str(testDir / "mock_flatc.py")])
        main(argv)
        binary_desc = testDir / "desc_binary.json"
        assert binary_desc.exists()
        fail = _get_xml_failure(argv)
    except Exception as e:
        assert False, f"Unexpected exception {e}"
    assert not fail


def test_mock_and_dummy_sut_results(testDir: Path):
    """Run two SUTs and check they both return results."""
    try:
        argv = _get_default_argv(testDir, GRAPH_RESULT_VALID)
        # Override sut-module setting with both SUTs
        argv.extend(
            ["--sut-module", "tests.tosa_dummy_sut_run", "tests.tosa_mock_sut_run"]
        )
        main(argv)
        results = _get_xml_results(argv)
        _get_xml_testsuites_from_results(results, 2)
        _get_xml_testcases_from_results(results, 2)
    except Exception as e:
        assert False, f"Unexpected exception {e}"


def test_two_mock_suts(testDir: Path):
    """Test that a duplicated SUT is ignored."""
    try:
        argv = _get_default_argv(testDir, GRAPH_RESULT_VALID)
        # Override sut-module setting with duplicated SUT
        argv.extend(
            ["--sut-module", "tests.tosa_mock_sut_run", "tests.tosa_mock_sut_run"]
        )
        main(argv)
        results = _get_xml_results(argv)
        _get_xml_testsuites_from_results(results, 1)
        _get_xml_testcases_from_results(results, 1)
    except Exception as e:
        assert False, f"Unexpected exception {e}"


def test_mock_sut_multi_outputs_expected_pass(testDirMultiOutputs: Path):
    """Run expected pass SUT test with multiple outputs."""
    try:
        argv = _get_default_argv(testDirMultiOutputs, GRAPH_RESULT_VALID)
        main(argv)
        fail = _get_xml_failure(argv)
    except Exception as e:
        assert False, f"Unexpected exception {e}"
    assert not fail


def test_mock_sut_multi_outputs_unexpected_failure(testDirMultiOutputs: Path):
    """Run SUT test which expects multiple outputs, but last one is missing."""
    try:
        argv = _get_default_argv(testDirMultiOutputs, GRAPH_RESULT_VALID)
        argv.append("tests.tosa_mock_sut_run:num_results=1")
        main(argv)
        fail = _get_xml_failure(argv)
    except Exception as e:
        assert False, f"Unexpected exception {e}"
    assert fail.startswith(UNEXPECTED_FAIL_PREFIX_STR)