aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Johnson <jeremy.johnson@arm.com>2022-09-15 10:38:17 +0100
committerEric Kunze <eric.kunze@arm.com>2022-11-19 00:42:57 +0000
commite4b08ffbe457c8932740e3171964cf2e7cd69b4f (patch)
treeeaa7a48d1de8e1819398c63110e99f993c0aa847
parent52460a8b21e3691cd56b22e62986fa24012b8e68 (diff)
downloadreference_model-e4b08ffbe457c8932740e3171964cf2e7cd69b4f.tar.gz
Initial set up of Main Inference conformance test gen
tosa-verif-build-tests - option for setting FP values range - option for recursively finding tests - change from os.path to Path tosa_verif_result_check - option to supply FP tolerance - output difference and max tolerance on contents mismatch - change from os.path to Path MI conformance - contains examples of AVG_POOL2D and CONV2D tests Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com> Change-Id: I8e1645cd8f10308604400ea53eef723ca163eed7
-rw-r--r--verif/checker/tosa_result_checker.py56
-rw-r--r--verif/conformance/test_select.py31
-rw-r--r--verif/conformance/tosa_main_profile_ops_info.json96
-rw-r--r--verif/conformance/tosa_verif_conformance_generator.py298
-rw-r--r--verif/generator/tosa_test_gen.py34
-rw-r--r--verif/generator/tosa_verif_build_tests.py38
-rw-r--r--verif/runner/tosa_refmodel_sut_run.py4
-rw-r--r--verif/runner/tosa_test_runner.py50
-rw-r--r--verif/runner/tosa_verif_run_tests.py89
-rw-r--r--verif/tests/test_tosa_refmodel.py4
-rw-r--r--verif/tests/tosa_dummy_sut_run.py4
11 files changed, 488 insertions, 216 deletions
diff --git a/verif/checker/tosa_result_checker.py b/verif/checker/tosa_result_checker.py
index b7a76b6..1169a95 100644
--- a/verif/checker/tosa_result_checker.py
+++ b/verif/checker/tosa_result_checker.py
@@ -2,7 +2,6 @@
# Copyright (c) 2020-2022, ARM Limited.
# SPDX-License-Identifier: Apache-2.0
import argparse
-import os
from enum import Enum
from enum import IntEnum
from enum import unique
@@ -62,37 +61,41 @@ TestResultErrorStr = [
]
##################################
+DEFAULT_FP_TOLERANCE = 1e-3
+
def test_check(
- reference,
- result,
+ reference_path,
+ result_path,
test_name="test",
quantize_tolerance=0,
- float_tolerance=1e-3,
+ float_tolerance=DEFAULT_FP_TOLERANCE,
misc_checks=[],
):
"""Check if the result is the same as the expected reference."""
- if not os.path.isfile(reference):
+ if not reference_path.is_file():
print_color(LogColors.RED, "Reference MISSING FILE {}".format(test_name))
- msg = "Missing reference file: {}".format(reference)
+ msg = "Missing reference file: {}".format(reference_path)
return (TestResult.MISSING_FILE, 0.0, msg)
- if not os.path.isfile(result):
+ if not result_path.is_file():
print_color(LogColors.RED, "Results MISSING FILE {}".format(test_name))
- msg = "Missing result file: {}".format(result)
+ msg = "Missing result file: {}".format(result_path)
return (TestResult.MISSING_FILE, 0.0, msg)
try:
- test_result = np.load(result)
+ test_result = np.load(result_path)
except Exception as e:
print_color(LogColors.RED, "Results INCORRECT FORMAT {}".format(test_name))
- msg = "Incorrect numpy format of {}\nnumpy.load exception: {}".format(result, e)
+ msg = "Incorrect numpy format of {}\nnumpy.load exception: {}".format(
+ result_path, e
+ )
return (TestResult.INCORRECT_FORMAT, 0.0, msg)
try:
- reference_result = np.load(reference)
+ reference_result = np.load(reference_path)
except Exception as e:
print_color(LogColors.RED, "Reference INCORRECT FORMAT {}".format(test_name))
msg = "Incorrect numpy format of {}\nnumpy.load exception: {}".format(
- reference, e
+ reference_path, e
)
return (TestResult.INCORRECT_FORMAT, 0.0, msg)
@@ -109,6 +112,7 @@ def test_check(
# >= 0, allow that special case
test_result = np.squeeze(test_result)
reference_result = np.squeeze(reference_result)
+ difference = None
if np.shape(test_result) != np.shape(reference_result):
print_color(LogColors.RED, "Results MISCOMPARE {}".format(test_name))
@@ -155,6 +159,7 @@ def test_check(
)
)
# Fall-through to below to add failure values
+ difference = reference_result - test_result
elif reference_result.dtype == bool:
assert test_result.dtype == bool
@@ -165,6 +170,7 @@ def test_check(
return (TestResult.PASS, 0.0, "")
msg = "Boolean result does not match"
tolerance = 0.0
+ difference = None
# Fall-through to below to add failure values
# TODO: update for fp16 tolerance
@@ -174,6 +180,7 @@ def test_check(
print_color(LogColors.GREEN, "Results PASS {}".format(test_name))
return (TestResult.PASS, tolerance, "")
msg = "Float result does not match within tolerance of {}".format(tolerance)
+ difference = reference_result - test_result
# Fall-through to below to add failure values
else:
print_color(LogColors.RED, "Results UNSUPPORTED TYPE {}".format(test_name))
@@ -182,16 +189,24 @@ def test_check(
# Fall-through for mismatch failure to add values to msg
print_color(LogColors.RED, "Results MISCOMPARE {}".format(test_name))
- np.set_printoptions(threshold=128)
- msg = "{}\ntest_result: {}\n{}".format(msg, test_result.shape, test_result)
- msg = "{}\nreference_result: {}\n{}".format(
+ np.set_printoptions(threshold=128, edgeitems=2)
+
+ if difference is not None:
+ tolerance_needed = np.amax(np.absolute(difference))
+ msg = "{}\n-- tolerance_needed: {}".format(msg, tolerance_needed)
+
+ msg = "{}\n>> reference_result: {}\n{}".format(
msg, reference_result.shape, reference_result
)
+ msg = "{}\n<< test_result: {}\n{}".format(msg, test_result.shape, test_result)
+
+ if difference is not None:
+ msg = "{}\n!! difference_result: \n{}".format(msg, difference)
return (TestResult.MISMATCH, tolerance, msg)
def main(argv=None):
- """Check that the supplied reference and result files are the same."""
+ """Check that the supplied reference and result files have the same contents."""
parser = argparse.ArgumentParser()
parser.add_argument(
"reference_path", type=Path, help="the path to the reference file to test"
@@ -199,11 +214,14 @@ def main(argv=None):
parser.add_argument(
"result_path", type=Path, help="the path to the result file to test"
)
+ parser.add_argument(
+ "--fp-tolerance", type=float, default=DEFAULT_FP_TOLERANCE, help="FP tolerance"
+ )
args = parser.parse_args(argv)
- ref_path = args.reference_path
- res_path = args.result_path
- result, tolerance, msg = test_check(ref_path, res_path)
+ result, tolerance, msg = test_check(
+ args.reference_path, args.result_path, float_tolerance=args.fp_tolerance
+ )
if result != TestResult.PASS:
print(msg)
diff --git a/verif/conformance/test_select.py b/verif/conformance/test_select.py
index 9e73b0d..c04b7ef 100644
--- a/verif/conformance/test_select.py
+++ b/verif/conformance/test_select.py
@@ -135,10 +135,39 @@ class Operator:
test_dir: the directory where the tests for all operators can be found
config: a dictionary with:
- "params" - mappings of parameter names to the values to select
+ "params" - a dictionary with mappings of parameter names to the values
+ to select (a sub-set of expected values for instance)
"permutes" - a list of parameter names to be permuted
+ "preselected" - a list of dictionaries containing parameter names and
+ pre-chosen values
+ "sparsity" - a dictionary of parameter names with a sparsity value
"errorifs" - list of ERRORIF case names to be selected (negative test)
negative: bool indicating if negative testing is being selected (ERRORIF tests)
+
+ EXAMPLE CONFIG:
+ "params": {
+ "output_type": [
+ "outi8",
+ "outb"
+ ]
+ },
+ "permutes": [
+ "shape",
+ "type"
+ ],
+ "sparsity": {
+ "pad": 15
+ },
+ "preselected": [
+ {
+ "shape": "6",
+ "type": "i8",
+ "pad": "pad00"
+ }
+ ],
+ "errorifs": [
+ "InputZeroPointNotZero"
+ ]
"""
assert isinstance(
self.name, str
diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json
new file mode 100644
index 0000000..656cdde
--- /dev/null
+++ b/verif/conformance/tosa_main_profile_ops_info.json
@@ -0,0 +1,96 @@
+{
+ "avg_pool2d": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "fp32",
+ "--target-dtype",
+ "fp16",
+ "--target-dtype",
+ "bf16",
+ "--fp-values-range",
+ "-2.0,2.0",
+ "--target-shape",
+ "1,49,42,28",
+ "--target-shape",
+ "3,11,44,3",
+ "--allow-pooling-and-conv-oversizes"
+ ],
+ [
+ "--target-dtype",
+ "fp32",
+ "--fp-values-range",
+ "-2.0,2.0",
+ "--target-shape",
+ "1,65535,5,1",
+ "--target-shape",
+ "1,3,65537,1",
+ "--allow-pooling-and-conv-oversizes"
+ ]
+ ],
+ "params": {
+ },
+ "permutes": [
+ "shape",
+ "type",
+ "accum_type"
+ ],
+ "sparsity": {
+ "pad": 17
+ },
+ "profile": [
+ "tosa-mi"
+ ]
+ },
+ "conv2d": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "fp32",
+ "--target-dtype",
+ "fp16",
+ "--target-dtype",
+ "bf16",
+ "--fp-values-range",
+ "-2.0,2.0",
+ "--target-shape",
+ "1,49,42,28",
+ "--target-shape",
+ "1,11,44,13",
+ "--allow-pooling-and-conv-oversizes"
+ ],
+ [
+ "--target-dtype",
+ "fp32",
+ "--fp-values-range",
+ "-2.0,2.0",
+ "--target-shape",
+ "1,65535,4,1",
+ "--target-shape",
+ "1,5,65536,1",
+ "--max-conv-dilation",
+ "1",
+ "--allow-pooling-and-conv-oversizes"
+ ]
+ ],
+ "params": {
+ "shape": [],
+ "type": [],
+ "kernel": [],
+ "stride": [],
+ "pad": [],
+ "dilation": []
+ },
+ "permutes": [
+ "kernel",
+ "shape",
+ "type",
+ "pad"
+ ],
+ "profile": [
+ "tosa-mi"
+ ]
+ }
+} \ No newline at end of file
diff --git a/verif/conformance/tosa_verif_conformance_generator.py b/verif/conformance/tosa_verif_conformance_generator.py
index 7032ad4..8f23f57 100644
--- a/verif/conformance/tosa_verif_conformance_generator.py
+++ b/verif/conformance/tosa_verif_conformance_generator.py
@@ -35,8 +35,15 @@ PROFILE_OPS_INFO = {
"operator_test_params": "tosa_base_profile_ops_info.json",
"framework_tests": "tosa_base_profile_framework_ops_info.json",
"exclude_types": [],
- }
+ },
+ "tosa-mi": {
+ # Note: This is just the extra tests not in the base profile!
+ "operator_test_params": "tosa_main_profile_ops_info.json",
+ "framework_tests": "tosa_main_profile_framework_ops_info.json",
+ "exclude_types": [],
+ },
}
+PROFILES_ALL = "all"
LOCATION_REF_MODEL_BINARY = Path("build/reference_model/tosa_reference_model")
@@ -181,7 +188,7 @@ def _get_all_tests_list(
return tests
-def generate_results(args, operator, op_build_dir, tests=None):
+def generate_results(args, profile, operator, op_build_dir, tests=None):
"""Run tests on reference model and save result to the test directory."""
num_cores = args.num_cores
run_tests_cmd = "tosa_verif_run_tests"
@@ -201,7 +208,7 @@ def generate_results(args, operator, op_build_dir, tests=None):
if not tests:
# Do not need to run ERRORIF tests as they don't have result files
tests = _get_all_tests_list(
- args.profile, op_build_dir, operator, exclude_negative_tests=True
+ profile, op_build_dir, operator, exclude_negative_tests=True
)
for test in tests:
@@ -232,10 +239,11 @@ def generate_results(args, operator, op_build_dir, tests=None):
def convert_tests(
args,
+ profile,
operator,
op_build_dir,
output_dir,
- profiles,
+ op_profiles_list,
tests=None,
group=None,
trim_op_subdir=False,
@@ -247,8 +255,10 @@ def convert_tests(
output_dir = output_dir / group
ref_cmd_base = ["--ref-model-directory", str(ref_model_dir)]
- for profile in profiles:
- ref_cmd_base.extend(["--profile", profile])
+ # This op maybe in more than one profile - e.g. tosa_bi and tosa_mi
+ # even if we are only producing tests for tosa_mi
+ for op_profile in op_profiles_list:
+ ref_cmd_base.extend(["--profile", op_profile])
if args.framework_schema:
ref_cmd_base.extend(["--framework-schema", str(args.framework_schema)])
ref_cmd_base.append("--output-directory")
@@ -256,8 +266,8 @@ def convert_tests(
ref_cmds = []
if not tests:
- tests = _get_all_tests_list(args.profile, op_build_dir, operator)
- logger.info(f"Converting all {args.profile} profile tests")
+ tests = _get_all_tests_list(profile, op_build_dir, operator)
+ logger.info(f"Converting all {profile} profile tests")
# Controls if we copy the tests in their operator sub-directory or not
output_dir_relative_pos = -1 if trim_op_subdir else -2
@@ -303,16 +313,19 @@ def convert_tests(
return output_dir
-def get_op_tests_selection(args, operator, op_build_dir, test_params, negative=False):
+def get_op_tests_selection(
+ args, profile, operator, op_build_dir, test_params, negative=False
+):
"""Use test picker to get subsection of tests generated."""
assert operator in test_params
+ logger.info("Choosing {} tests".format(("negative" if negative else "positive")))
try:
op_params = test_params[operator]
op = Operator.registry[operator](
op_build_dir,
op_params,
negative,
- exclude_types=PROFILE_OPS_INFO[args.profile]["exclude_types"],
+ exclude_types=PROFILE_OPS_INFO[profile]["exclude_types"],
)
except KeyError:
logger.error(f"{operator} operator is not supported by test_select")
@@ -321,11 +334,11 @@ def get_op_tests_selection(args, operator, op_build_dir, test_params, negative=F
return op.select_tests()
-def check_op_tests(args, operator, output_dir):
+def check_op_tests(args, profile, operator, output_dir):
"""Move test folders than contain files larger than 30MB to new directory."""
destination_dir = str(args.output_dir) + "_large_files"
- tests = _get_all_tests_list(args.profile, output_dir, operator, include_all=True)
+ tests = _get_all_tests_list(profile, output_dir, operator, include_all=True)
if not tests:
logger.error(
f"Couldn't find any tests to size check for {operator} in {output_dir}"
@@ -408,6 +421,7 @@ def parse_args(argv=None):
"""Parse the arguments."""
parser = argparse.ArgumentParser()
profiles = list(PROFILE_OPS_INFO.keys())
+ profiles.append(PROFILES_ALL)
parser.add_argument(
"--profile",
dest="profile",
@@ -555,7 +569,6 @@ def main():
logging.getLogger("test_select").setLevel(loglevel)
logging.getLogger("convert2conformance").setLevel(loglevel)
- print(f"Creating conformance tests for TOSA {args.profile} profile")
print(f"Output directory: {args.output_dir}")
if args.random_seed != DEFAULT_SEED:
@@ -567,147 +580,172 @@ def main():
logger.debug(f"Creating build directory: {args.build_dir}")
args.build_dir.mkdir(parents=True, exist_ok=True)
+ # TODO: For tosa-mi should really generate tosa-bi profile as well
+ # - for now leave it as subset instead of as superset (for testing)
+ if args.profile == PROFILES_ALL:
+ profiles = list(PROFILE_OPS_INFO.keys())
+ else:
+ profiles = [args.profile]
+
try:
- # Framework unit tests
- if args.unit_tests in ["framework", "both"]:
- logger.debug("Creating FRAMEWORK unit tests")
- test_picks_file = (
- args.param_json_dir / PROFILE_OPS_INFO[args.profile]["framework_tests"]
- )
- try:
- with open(test_picks_file, "r") as fd:
- test_picks = json.load(fd)
- except Exception as e:
- logger.error(
- f"Couldn't load framework tests info - {test_picks_file}: {e}"
+ for profile in profiles:
+ print(f"Creating conformance tests for TOSA {profile} profile")
+ # Framework unit tests
+ if args.unit_tests in ["framework", "both"]:
+ logger.debug("Creating FRAMEWORK unit tests")
+ test_picks_file = (
+ args.param_json_dir / PROFILE_OPS_INFO[profile]["framework_tests"]
)
- return 1
-
- operators = args.operators
- if not operators:
- # Create tests for all the operators
- operators = list(test_picks.keys())
-
- root_output_dir = args.output_dir / "frameworks" / "tflite" / "operators"
- for op in operators:
- if op not in test_picks:
- logger.warning(
- f"Framework op {op} not found in {test_picks_file} - skipping"
+ try:
+ with open(test_picks_file, "r") as fd:
+ test_picks = json.load(fd)
+ except Exception as e:
+ logger.error(
+ f"Couldn't load framework tests info - {test_picks_file}: {e}"
)
- continue
+ return 1
- logger.debug(f"Copying and renaming {op}")
- framework_test_dir = copy_rename_framework_tests(args, op, test_picks)
- profiles = test_picks[op]["profile"]
- if args.convert_all_tests:
- logger.debug("Running and converting all framework tests")
- convert_tests(
- args,
- op,
- framework_test_dir,
- root_output_dir,
- profiles,
- trim_op_subdir=True,
- )
- else:
- framework_tests = get_framework_tests_selection(
- args, op, test_picks, framework_test_dir
+ operators = args.operators
+ if not operators:
+ # Create tests for all the operators
+ operators = list(test_picks.keys())
+
+ root_output_dir = (
+ args.output_dir / "frameworks" / "tflite" / "operators"
+ )
+ for op in operators:
+ logger.info(f"FRAMEWORK OP: {op}")
+ if op not in test_picks:
+ logger.warning(
+ f"Framework op {op} not found in {test_picks_file} - skipping"
+ )
+ continue
+
+ op_profiles_list = test_picks[op]["profile"]
+ if (
+ args.profile != PROFILES_ALL
+ and args.profile not in op_profiles_list
+ ):
+ # Skip this operator as not part of the profile chosen
+ logger.debug(f"Skipping {op} as not part of {args.profile}")
+ continue
+
+ logger.debug(f"Copying and renaming {op}")
+ framework_test_dir = copy_rename_framework_tests(
+ args, op, test_picks
)
+
+ if args.convert_all_tests:
+ logger.debug("Running and converting all framework tests")
+ framework_tests = None # Don't select any
+ else:
+ logger.debug("Running and converting selected framework tests")
+ framework_tests = get_framework_tests_selection(
+ args, op, test_picks, framework_test_dir
+ )
convert_tests(
args,
+ profile,
op,
framework_test_dir,
root_output_dir,
- profiles,
+ op_profiles_list,
tests=framework_tests,
trim_op_subdir=True,
)
- # Operator unit tests
- if args.unit_tests in ["operator", "both"]:
- logger.debug("Creating OPERATOR unit tests")
- test_params_file = (
- args.param_json_dir
- / PROFILE_OPS_INFO[args.profile]["operator_test_params"]
- )
- try:
- with open(test_params_file, "r") as fd:
- test_params = json.load(fd)
- except Exception as e:
- logger.error(
- f"Couldn't load operator test params - {test_params_file}: {e}"
+ # Operator unit tests
+ if args.unit_tests in ["operator", "both"]:
+ logger.debug("Creating OPERATOR unit tests")
+ test_params_file = (
+ args.param_json_dir
+ / PROFILE_OPS_INFO[profile]["operator_test_params"]
)
- return 1
+ try:
+ with open(test_params_file, "r") as fd:
+ test_params = json.load(fd)
+ except Exception as e:
+ logger.error(
+ f"Couldn't load operator test params - {test_params_file}: {e}"
+ )
+ return 1
+
+ operators = args.operators
+ if not operators:
+ # Create tests for all the operators
+ operators = list(test_params.keys())
+
+ for op in operators:
+ logger.info(f"OPERATOR: {op}")
+ if op not in test_params:
+ logger.warning(
+ f"{op} operator parameters not found in {test_params_file} - skipping"
+ )
+ continue
- operators = args.operators
- if not operators:
- # Create tests for all the operators
- operators = list(test_params.keys())
+ if (
+ args.test_type == "negative"
+ and "no_negative_tests" in test_params[op]
+ and test_params[op]["no_negative_tests"]
+ ):
+ logger.warning(f"No negative tests for {op}")
+ continue
- for op in operators:
- if op not in test_params:
- logger.warning(
- f"{op} operator parameters not found in {test_params_file} - skipping"
- )
- continue
-
- if (
- args.test_type == "negative"
- and "no_negative_tests" in test_params[op]
- and test_params[op]["no_negative_tests"]
- ):
- logger.warning(f"No negative tests for {op}")
- continue
-
- op_build_dir = build_op_tests(args, op, test_params)
-
- operator_group = test_params[op]["group"]
- root_output_dir = args.output_dir / "operators"
- profiles = test_params[op]["profile"]
- if args.convert_all_tests:
- logger.debug(f"Running and converting all {op} tests")
- generate_results(args, op, op_build_dir)
+ op_profiles_list = test_params[op]["profile"]
+ if (
+ args.profile != PROFILES_ALL
+ and args.profile not in op_profiles_list
+ ):
+ # Skip this operator as not part of the profile chosen
+ logger.debug(f"Skipping {op} as not part of {args.profile}")
+ continue
+
+ op_build_dir = build_op_tests(args, op, test_params)
+
+ operator_group = test_params[op]["group"]
+ root_output_dir = args.output_dir / "operators"
+ if args.convert_all_tests:
+ logger.debug(f"Running and converting all {op} tests")
+ generate_results(args, profile, op, op_build_dir)
+ operator_test_list = None
+ else:
+ logger.debug(f"Running and converting selection of {op} tests")
+ if args.test_type in ["positive", "both"]:
+ tests_gen, tests_gen2 = tee(
+ get_op_tests_selection(
+ args, profile, op, op_build_dir, test_params
+ )
+ )
+ generate_results(args, profile, op, op_build_dir, tests_gen)
+ operator_test_list = list(tests_gen2)
+ else:
+ operator_test_list = []
+ if args.test_type in ["negative", "both"] and (
+ "no_negative_tests" not in test_params[op]
+ or not test_params[op]["no_negative_tests"]
+ ):
+ operator_test_list.extend(
+ get_op_tests_selection(
+ args,
+ profile,
+ op,
+ op_build_dir,
+ test_params,
+ negative=True,
+ )
+ )
output_dir = convert_tests(
args,
+ profile,
op,
op_build_dir,
root_output_dir,
- profiles,
+ op_profiles_list,
+ tests=operator_test_list,
group=operator_group,
)
- else:
- if args.test_type in ["positive", "both"]:
- tests_gen1, tests_gen2 = tee(
- get_op_tests_selection(args, op, op_build_dir, test_params)
- )
- generate_results(args, op, op_build_dir, tests_gen1)
- output_dir = convert_tests(
- args,
- op,
- op_build_dir,
- root_output_dir,
- profiles,
- tests=tests_gen2,
- group=operator_group,
- )
- if args.test_type in ["negative", "both"] and (
- "no_negative_tests" not in test_params[op]
- or not test_params[op]["no_negative_tests"]
- ):
- negative_tests = get_op_tests_selection(
- args, op, op_build_dir, test_params, negative=True
- )
- output_dir = convert_tests(
- args,
- op,
- op_build_dir,
- root_output_dir,
- profiles,
- tests=negative_tests,
- group=operator_group,
- )
- if not args.keep_large_files:
- check_op_tests(args, op, output_dir)
+ if not args.keep_large_files:
+ check_op_tests(args, profile, op, output_dir)
except GenConformanceError:
return 1
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 95e06ed..f3ca512 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -36,6 +36,9 @@ class TosaTestGen:
self.quantGen = TosaQuantGen()
# Force makeShape to do a specific starting shape
self.targetted_shape = None
+ # Work out floating point range
+ self.random_fp_low = min(args.tensor_fp_value_range)
+ self.random_fp_high = max(args.tensor_fp_value_range)
def createSerializer(self, opName, testPath):
self.testPath = os.path.join(opName, testPath)
@@ -84,13 +87,25 @@ class TosaTestGen:
self.rng.integers(low=-(1 << 47), high=(1 << 47), size=shape)
)
elif dtype == DType.FP16:
- return np.float16(self.rng.random(size=shape))
+ return np.float16(
+ self.rng.uniform(
+ low=self.random_fp_low, high=self.random_fp_high, size=shape
+ )
+ )
elif dtype == DType.BF16:
- f32_tensor = np.float32(self.rng.random(size=shape))
+ f32_tensor = np.float32(
+ self.rng.uniform(
+ low=self.random_fp_low, high=self.random_fp_high, size=shape
+ )
+ )
# Floor the last 16 bits of each f32 value
return np.float32(vect_f32_to_bf16(f32_tensor))
elif dtype == DType.FP32:
- return np.float32(self.rng.random(size=shape))
+ return np.float32(
+ self.rng.uniform(
+ low=self.random_fp_low, high=self.random_fp_high, size=shape
+ )
+ )
else:
raise Exception("Unrecognized Dtype: {}".format(dtype))
@@ -135,12 +150,17 @@ class TosaTestGen:
def getRandNumberDType(self, dtype):
if dtype == DType.FP32:
- return self.rng.random()
+ return np.float32(
+ self.rng.uniform(low=self.random_fp_low, high=self.random_fp_high)
+ )
elif dtype == DType.FP16:
- rand_f32 = self.rng.random()
- return np.float16(rand_f32)
+ return np.float16(
+ self.rng.uniform(low=self.random_fp_low, high=self.random_fp_high)
+ )
elif dtype == DType.BF16:
- rand_f32 = self.rng.random()
+ rand_f32 = np.float32(
+ self.rng.uniform(low=self.random_fp_low, high=self.random_fp_high)
+ )
return vect_f32_to_bf16(rand_f32)
elif dtype == DType.BOOL:
return self.rng.choice([False, True])
diff --git a/verif/generator/tosa_verif_build_tests.py b/verif/generator/tosa_verif_build_tests.py
index ab78b1a..bc1ec8e 100644
--- a/verif/generator/tosa_verif_build_tests.py
+++ b/verif/generator/tosa_verif_build_tests.py
@@ -2,20 +2,24 @@
# SPDX-License-Identifier: Apache-2.0
import argparse
import re
+import sys
from generator.tosa_test_gen import TosaTestGen
from serializer.tosa_serializer import dtype_str_to_val
from serializer.tosa_serializer import DTypeNames
+OPTION_FP_VALUES_RANGE = "--fp-values-range"
+
# Used for parsing a comma-separated list of integers in a string
# to an actual list of integers
-def str_to_list(in_s):
+def str_to_list(in_s, is_float=False):
"""Converts a comma-separated list of string integers to a python list of ints"""
lst = in_s.split(",")
out_list = []
for i in lst:
- out_list.append(int(i))
+ val = float(i) if is_float else int(i)
+ out_list.append(val)
return out_list
@@ -25,6 +29,26 @@ def auto_int(x):
def parseArgs(argv):
+ """Parse the command line arguments."""
+ if argv is None:
+ argv = sys.argv[1:]
+
+ if OPTION_FP_VALUES_RANGE in argv:
+ # Argparse fix for hyphen (minus values) in argument values
+ # convert "ARG VAL" into "ARG=VAL"
+ # Example --fp-values-range -2.0,2.0 -> --fp-values-range=-2.0,2.0
+ new_argv = []
+ idx = 0
+ while idx < len(argv):
+ arg = argv[idx]
+ if arg == OPTION_FP_VALUES_RANGE and idx + 1 < len(argv):
+ val = argv[idx + 1]
+ if val.startswith("-"):
+ arg = f"{arg}={val}"
+ idx += 1
+ new_argv.append(arg)
+ idx += 1
+ argv = new_argv
parser = argparse.ArgumentParser()
parser.add_argument(
@@ -61,6 +85,14 @@ def parseArgs(argv):
)
parser.add_argument(
+ OPTION_FP_VALUES_RANGE,
+ dest="tensor_fp_value_range",
+ default="0.0,1.0",
+ type=lambda x: str_to_list(x, is_float=True),
+ help="Min,Max range of floating point tensor values",
+ )
+
+ parser.add_argument(
"--max-batch-size",
dest="max_batch_size",
default=1,
@@ -132,7 +164,7 @@ def parseArgs(argv):
help="Upper limit on width and height output dimensions for `resize` op. Default: 1000",
)
- # Targetting a specific shape/rank/dtype
+ # Targeting a specific shape/rank/dtype
parser.add_argument(
"--target-shape",
dest="target_shapes",
diff --git a/verif/runner/tosa_refmodel_sut_run.py b/verif/runner/tosa_refmodel_sut_run.py
index 6acaaf4..95f6e7b 100644
--- a/verif/runner/tosa_refmodel_sut_run.py
+++ b/verif/runner/tosa_refmodel_sut_run.py
@@ -21,9 +21,9 @@ class TosaRefReturnCode(IntEnum):
class TosaSUTRunner(TosaTestRunner):
"""TOSA Reference Model runner."""
- def __init__(self, args, runnerArgs, testDir):
+ def __init__(self, args, runnerArgs, testDirPath):
"""Initialize using the given test details."""
- super().__init__(args, runnerArgs, testDir)
+ super().__init__(args, runnerArgs, testDirPath)
def runTestGraph(self):
"""Run the test on the reference model."""
diff --git a/verif/runner/tosa_test_runner.py b/verif/runner/tosa_test_runner.py
index 65931d8..d8c2a87 100644
--- a/verif/runner/tosa_test_runner.py
+++ b/verif/runner/tosa_test_runner.py
@@ -33,47 +33,49 @@ class TosaTestInvalid(Exception):
class TosaTestRunner:
"""TOSA Test Runner template class for systems under test."""
- def __init__(self, args, runnerArgs, testDir):
+ def __init__(self, args, runnerArgs, testDirPath):
"""Initialize and load JSON meta data file."""
self.args = args
self.runnerArgs = runnerArgs
- self.testDir = testDir
- self.testName = Path(self.testDir).name
+ self.testDir = str(testDirPath)
+ self.testDirPath = testDirPath
+ self.testName = self.testDirPath.name
set_print_in_color(not args.no_color)
# Check if we want to run binary and if its already converted
- descFilePath = Path(testDir, "desc.json")
- descBinFilePath = Path(testDir, "desc_binary.json")
+ descFilePath = testDirPath / "desc.json"
+ descBinFilePath = testDirPath / "desc_binary.json"
if args.binary:
if descBinFilePath.is_file():
descFilePath = descBinFilePath
try:
# Load the json test file
- with open(descFilePath, "r") as fd:
+ with descFilePath.open("r") as fd:
self.testDesc = json.load(fd)
except Exception as e:
raise TosaTestInvalid(str(descFilePath), e)
# Convert to binary if needed
- tosaFilePath = Path(testDir, self.testDesc["tosa_file"])
+ tosaFilePath = testDirPath / self.testDesc["tosa_file"]
if args.binary and tosaFilePath.suffix == ".json":
# Convert tosa JSON to binary
json2fbbin.json_to_fbbin(
Path(args.flatc_path),
Path(args.operator_fbs),
tosaFilePath,
- Path(testDir),
+ testDirPath,
)
# Write new desc_binary file
self.testDesc["tosa_file"] = tosaFilePath.stem + ".tosa"
- with open(descBinFilePath, "w") as fd:
+ with descBinFilePath.open("w") as fd:
json.dump(self.testDesc, fd, indent=2)
descFilePath = descBinFilePath
# Set location of desc.json (or desc_binary.json) file in use
self.descFile = str(descFilePath)
+ self.descFilePath = descFilePath
def skipTest(self):
"""Check if the test is skipped due to test type or profile selection."""
@@ -109,9 +111,9 @@ class TosaTestRunner:
for resultNum, resultFileName in enumerate(self.testDesc["ofm_file"]):
if "expected_result_file" in self.testDesc:
try:
- conformanceFile = Path(
- self.testDir,
- self.testDesc["expected_result_file"][resultNum],
+ conformanceFilePath = (
+ self.testDirPath
+ / self.testDesc["expected_result_file"][resultNum]
)
except IndexError:
result = TosaTestRunner.Result.INTERNAL_ERROR
@@ -122,14 +124,14 @@ class TosaTestRunner:
print(msg)
break
else:
- conformanceFile = None
- resultFile = Path(self.testDir, resultFileName)
+ conformanceFilePath = None
+ resultFilePath = self.testDirPath / resultFileName
- if conformanceFile:
+ if conformanceFilePath:
print_result_line = False # Checker will print one for us
chkResult, tolerance, msg = test_check(
- str(conformanceFile),
- str(resultFile),
+ conformanceFilePath,
+ resultFilePath,
test_name=self.testName,
)
# Change EXPECTED_PASS assumption if we have any failures
@@ -140,18 +142,18 @@ class TosaTestRunner:
print(msg)
else:
# No conformance file to verify, just check results file exists
- if not resultFile.is_file():
+ if not resultFilePath.is_file():
result = TosaTestRunner.Result.UNEXPECTED_FAILURE
- msg = "Results file is missing: {}".format(resultFile)
+ msg = "Results file is missing: {}".format(resultFilePath)
messages.append(msg)
print(msg)
- if resultFile.is_file():
- # Move the resultFile to allow subsequent system under
+ if resultFilePath.is_file():
+ # Move the resultFilePath to allow subsequent system under
# tests to create them and to test they have been created
- resultFile = resultFile.rename(
- resultFile.with_suffix(
- ".{}{}".format(self.__module__, resultFile.suffix)
+ resultFilePath = resultFilePath.rename(
+ resultFilePath.with_suffix(
+ ".{}{}".format(self.__module__, resultFilePath.suffix)
)
)
diff --git a/verif/runner/tosa_verif_run_tests.py b/verif/runner/tosa_verif_run_tests.py
index 77394cc..ddb32a4 100644
--- a/verif/runner/tosa_verif_run_tests.py
+++ b/verif/runner/tosa_verif_run_tests.py
@@ -2,7 +2,6 @@
# Copyright (c) 2020-2022, ARM Limited.
# SPDX-License-Identifier: Apache-2.0
import argparse
-import glob
import importlib
import os
import queue
@@ -40,6 +39,13 @@ def parseArgs(argv):
help="File containing list of tests to run (one per line)",
)
parser.add_argument(
+ "-r",
+ "--recursive",
+ dest="recursive_tests",
+ action="store_true",
+ help="Recursively search for tests",
+ )
+ parser.add_argument(
"--operator-fbs",
dest="operator_fbs",
default="conformance_tests/third_party/serialization_lib/schema/tosa.fbs",
@@ -146,27 +152,28 @@ def parseArgs(argv):
EXCLUSION_PREFIX = ["test", "model", "desc"]
-def convert2Numpy(testDir):
+def convert2Numpy(test_path):
"""Convert all the JSON numpy files back into binary numpy."""
- jsons = glob.glob(os.path.join(testDir, "*.json"))
+ jsons = test_path.glob("*.json")
for json in jsons:
for exclude in EXCLUSION_PREFIX:
- if os.path.basename(json).startswith(exclude):
- json = ""
+ if json.name.startswith(exclude):
+ json = None
+ break
if json:
- # debug print("Converting " + json)
- json2numpy.json_to_npy(Path(json))
+ # debug print(f"Converting {json}")
+ json2numpy.json_to_npy(json)
def workerThread(task_queue, runnerList, args, result_queue):
"""Worker thread that runs the next test from the queue."""
while True:
try:
- test = task_queue.get(block=False)
+ test_path = task_queue.get(block=False)
except queue.Empty:
break
- if test is None:
+ if test_path is None:
break
msg = ""
@@ -176,21 +183,25 @@ def workerThread(task_queue, runnerList, args, result_queue):
start_time = datetime.now()
# Set up system under test runner
runnerName = runnerModule.__name__
- runner = runnerModule.TosaSUTRunner(args, runnerArgs, test)
+ runner = runnerModule.TosaSUTRunner(args, runnerArgs, test_path)
skip, reason = runner.skipTest()
if skip:
msg = "Skipping {} test".format(reason)
- print("{} {}".format(msg, test))
+ print("{} {}".format(msg, test_path))
rc = TosaTestRunner.Result.SKIPPED
else:
# Convert JSON data files into numpy format on first pass
if not converted:
- convert2Numpy(test)
+ convert2Numpy(test_path)
converted = True
if args.verbose:
- print("Running runner {} with test {}".format(runnerName, test))
+ print(
+ "Running runner {} with test {}".format(
+ runnerName, test_path
+ )
+ )
try:
grc, gmsg = runner.runTestGraph()
rc, msg = runner.testResult(grc, gmsg)
@@ -220,7 +231,9 @@ def workerThread(task_queue, runnerList, args, result_queue):
rc = TosaTestRunner.Result.INTERNAL_ERROR
finally:
end_time = datetime.now()
- result_queue.put((runnerName, test, rc, msg, end_time - start_time))
+ result_queue.put(
+ (runnerName, test_path, rc, msg, end_time - start_time)
+ )
task_queue.task_done()
@@ -262,10 +275,10 @@ def createXUnitResults(xunitFile, runnerList, resultLists, verbose):
xunit_suite = xunit_result.create_suite(runner)
# Sort by test name
- for test, rc, msg, time_delta in sorted(
+ for test_path, rc, msg, time_delta in sorted(
resultLists[runner], key=lambda tup: tup[0]
):
- test_name = test
+ test_name = str(test_path)
xt = xunit.xunit_test(test_name, runner)
xt.time = str(
@@ -293,12 +306,27 @@ def createXUnitResults(xunitFile, runnerList, resultLists, verbose):
xunit_result.write_results(xunitFile)
+def getTestsInPath(path):
+ # Recursively find any tests in this directory
+ desc_path = path / "desc.json"
+ if desc_path.is_file():
+ return [path]
+ elif path.is_dir():
+ path_list = []
+ for p in path.glob("*"):
+ path_list.extend(getTestsInPath(p))
+ return path_list
+ else:
+ return []
+
+
def main(argv=None):
"""Start worker threads to do the testing and outputs the results."""
args = parseArgs(argv)
- if TOSA_REFMODEL_RUNNER in args.sut_module and not os.path.isfile(
- args.ref_model_path
+ if (
+ TOSA_REFMODEL_RUNNER in args.sut_module
+ and not Path(args.ref_model_path).is_file()
):
print(
"Argument error: Reference Model not found ({})".format(args.ref_model_path)
@@ -307,7 +335,7 @@ def main(argv=None):
if args.test_list_file:
try:
- with open(args.test_list_file) as f:
+ with args.test_list_file.open("r") as f:
args.test = f.read().splitlines()
except Exception as e:
print(
@@ -323,12 +351,21 @@ def main(argv=None):
taskQueue = queue.Queue()
resultQueue = queue.Queue()
- for t in args.test:
- if os.path.isfile(t):
- if not os.path.basename(t) == "README":
- print("Warning: Skipping test {} as not a valid directory".format(t))
+ for tdir in args.test:
+ tpath = Path(tdir)
+ if tpath.is_file():
+ if tpath.name != "README":
+ print(
+ "Warning: Skipping test {} as not a valid directory".format(tpath)
+ )
else:
- taskQueue.put((t))
+ if args.recursive_tests:
+ tpath_list = getTestsInPath(tpath)
+ else:
+ tpath_list = [tpath]
+
+ for t in tpath_list:
+ taskQueue.put((t))
print(
"Running {} tests on {} system{} under test".format(
@@ -356,7 +393,7 @@ def main(argv=None):
while True:
try:
- runner, test, rc, msg, time_delta = resultQueue.get(block=False)
+ runner, test_path, rc, msg, time_delta = resultQueue.get(block=False)
resultQueue.task_done()
except queue.Empty:
break
@@ -368,7 +405,7 @@ def main(argv=None):
msg = "{} ...\nskipped {} bytes\n... {}".format(
msg[:half], trimmed, msg[-half:]
)
- resultLists[runner].append((test, rc, msg, time_delta))
+ resultLists[runner].append((test_path, rc, msg, time_delta))
results[runner][rc] += 1
createXUnitResults(args.xunit_file, runnerList, resultLists, args.verbose)
diff --git a/verif/tests/test_tosa_refmodel.py b/verif/tests/test_tosa_refmodel.py
index 50ff1ab..1f9cd3e 100644
--- a/verif/tests/test_tosa_refmodel.py
+++ b/verif/tests/test_tosa_refmodel.py
@@ -240,8 +240,8 @@ def test_refmodel_simple_op(tosaTest):
# Check Numpy result versus refmodel
check_result, tolerance, msg = tosa_check(
- str(result_file),
- str(ofm_file),
+ result_file,
+ ofm_file,
test_name=test_dir.name,
misc_checks=misc_checks,
)
diff --git a/verif/tests/tosa_dummy_sut_run.py b/verif/tests/tosa_dummy_sut_run.py
index fffcfa1..344aa2d 100644
--- a/verif/tests/tosa_dummy_sut_run.py
+++ b/verif/tests/tosa_dummy_sut_run.py
@@ -7,9 +7,9 @@ from runner.tosa_test_runner import TosaTestRunner
class TosaSUTRunner(TosaTestRunner):
"""TOSA dummy SUT runner."""
- def __init__(self, args, runnerArgs, testDir):
+ def __init__(self, args, runnerArgs, testDirPath):
"""Initialize using the given test details."""
- super().__init__(args, runnerArgs, testDir)
+ super().__init__(args, runnerArgs, testDirPath)
def runTestGraph(self):
"""Nothing run as this is a dummy SUT that does nothing."""