aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Johnson <jeremy.johnson@arm.com>2022-06-30 14:27:56 +0100
committerJeremy Johnson <jeremy.johnson@arm.com>2022-07-12 11:28:15 +0100
commit0ecfa37738d56cbb50af584e9bf077052094f460 (patch)
treec3b6d68ddd458ec6a1a409245ca860ed07522ce4
parent6c6467f3f07687f0eebd049f1fa3ce21c516f184 (diff)
downloadreference_model-0ecfa37738d56cbb50af584e9bf077052094f460.tar.gz
Add conformance generator scripts
Change-Id: I5cb16ea1f47ee454f03a1d5182827e3fd9f49128 Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
-rw-r--r--README.md51
-rwxr-xr-xscripts/convert2conformance/convert2conformance.py9
-rw-r--r--setup.cfg2
-rw-r--r--verif/conformance/test_select.py783
-rw-r--r--verif/conformance/tosa_base_profile_framework_ops_info.json185
-rw-r--r--verif/conformance/tosa_base_profile_ops_info.json2220
-rw-r--r--verif/conformance/tosa_verif_conformance_generator.py676
7 files changed, 3923 insertions, 3 deletions
diff --git a/README.md b/README.md
index 97fef58..6a05aea 100644
--- a/README.md
+++ b/README.md
@@ -413,6 +413,57 @@ tosa_verif_framework_compiler_runner \
--test tests
```
+### TOSA Conformance Generator
+
+This script enables creation of part or all of the *TOSA conformance tests
+<https://git.mlplatform.org/tosa/conformance_tests.git/>*, to
+enable development of these tests.
+
+Currently only the Base Profile of TOSA is supported by the generator.
+
+#### Setup
+
+To enable selection of the framework tests for conformance, the TOSA Framework
+Unit Tests (see above) must have been pre-generated and there is access to the
+framework schema from TensorFlow Lite.
+
+#### Usage
+
+These are the main script options for controlling the types of tests produced:
+
+* `--profile` - controls the TOSA profile, only `base` is currently supported.
+* `--unit-tests` - choose either `operator`, `framework` or `both` tests.
+* `--test-type` - selects `postive`, `negative` or `both` types of test.
+
+
+An example to create the TOSA operator unit tests for ADD and SUB:
+
+```bash
+tosa_verif_conformance_generator \
+ --profile base \
+ --unit-tests operator \
+ --ref-model-directory reference_model \
+ --operator add sub
+```
+
+The above command will create some temporary files in a `conformance_build`
+directory, but will output the conformance unit tests into a `conformance`
+directory.
+
+This next example will create all the conformance tests, using different
+temporary build and output directories:
+
+```bash
+tosa_verif_conformance_generator \
+ --profile base \
+ --unit-tests both \
+ --ref-model-directory reference_model \
+ --build-directory tmp_build \
+ --output-directory conf_tests \
+ --framework-tests-directory tests \
+ --framework-schema tensorflow/lite/schema/schema.fbs
+```
+
## Other tools
Included in this repository are some support utilities used by the test runner:
diff --git a/scripts/convert2conformance/convert2conformance.py b/scripts/convert2conformance/convert2conformance.py
index 71f263b..aa41ff1 100755
--- a/scripts/convert2conformance/convert2conformance.py
+++ b/scripts/convert2conformance/convert2conformance.py
@@ -200,8 +200,10 @@ def main(argv=None):
# Work out where the desc.json file is
desc_filename = args.test_dir / NAME_DESC_FILENAME
framework_conversion = False
+ test_type_desc = "unknown"
if desc_filename.is_file():
- logger.info("Found reference model unit test")
+ logger.debug("Found TOSA operator unit test")
+ test_type_desc = "TOSA operator"
else:
desc_filename = (
args.test_dir
@@ -209,7 +211,8 @@ def main(argv=None):
/ NAME_DESC_FILENAME
)
if desc_filename.is_file():
- logger.info(f"Found framework unit test for {args.framework}")
+ logger.debug(f"Found framework unit test for {args.framework}")
+ test_type_desc = f"{args.framework}"
framework_conversion = True
else:
logger.error(f"Could not find {NAME_DESC_FILENAME} in {args.test_dir}")
@@ -297,7 +300,7 @@ def main(argv=None):
with open(new_desc_filename, "w") as fd:
json.dump(test_desc, fd, indent=2)
- logger.info(f"Converted to {args.output_dir}")
+ logger.info(f"Converted {test_type_desc} test to {args.output_dir}")
return 0
diff --git a/setup.cfg b/setup.cfg
index 6420a40..56974bd 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -28,6 +28,7 @@ packages =
checker
frameworks
tests
+ conformance
xunit
json2fbbin
json2numpy
@@ -54,6 +55,7 @@ console_scripts =
convert2conformance = convert2conformance.convert2conformance:main
tosa_verif_framework_generator = frameworks.tosa_verif_framework_generator:main
tosa_verif_framework_compiler_runner = frameworks.tosa_verif_framework_compiler_runner:main
+ tosa_verif_conformance_generator = conformance.tosa_verif_conformance_generator:main
[tool:pytest]
testpaths=verif/tests
diff --git a/verif/conformance/test_select.py b/verif/conformance/test_select.py
new file mode 100644
index 0000000..1013b6e
--- /dev/null
+++ b/verif/conformance/test_select.py
@@ -0,0 +1,783 @@
+# Copyright (c) 2021-2022, ARM Limited.
+# SPDX-License-Identifier: Apache-2.0
+"""Select generated tests."""
+import argparse
+import itertools
+import json
+import logging
+from pathlib import Path
+from typing import Any
+from typing import Dict
+from typing import List
+
+logging.basicConfig()
+logger = logging.getLogger("test_select")
+
+
+def expand_params(permutes: Dict[str, List[Any]], others: Dict[str, List[Any]]):
+ """Generate permuted combinations of a dictionary of values and combine with others.
+
+ permutes: a dictionary with sequences of values to be fully permuted
+ others: a dictionary with sequences of values not fully permuted, but all used
+
+ This yields dictionaries with one value from each of the items in permutes,
+ combined with one value from each of the items in others.
+
+ Example 1:
+
+ permutes = {"a": [1, 2], "b": [3, 4]}
+ others = {"c": [5, 6, 7], "d" [True, False]}
+
+ generates:
+
+ [
+ {"a": 1, "b": 3, "c": 5, "d": True},
+ {"a": 1, "b": 4, "c": 6, "d": False`},
+ {"a": 2, "b": 3, "c": 7, "d": True},
+ {"a": 2, "b": 4, "c": 5, "d": False`},
+ ]
+
+ Example 2:
+
+ permutes = {"a": [1, 2], "b": [3, 4]}
+ others = {"c": [5, 6, 7, 8, 9], "d" [True, False]}
+
+ generates:
+
+ [
+ {"a": 1, "b": 3, "c": 5, "d": True},
+ {"a": 1, "b": 4, "c": 6, "d": False},
+ {"a": 2, "b": 3, "c": 7, "d": True},
+ {"a": 2, "b": 4, "c": 8, "d": False},
+ {"a": 1, "b": 3, "c": 9, "d": True},
+ ]
+
+ Raises:
+ ValueError if any item is in both permutes and others
+ """
+ for k in permutes:
+ if k in others:
+ raise ValueError(f"item conflict: {k}")
+
+ p_keys = []
+ p_vals = []
+ # if permutes is empty, p_permute_len should be 0, but we leave it as 1
+ # so we return a single, empty dictionary, if others is also empty
+ p_product_len = 1
+ # extract the keys and values from the permutes dictionary
+ # and calulate the product of the number of values in each item as we do so
+ for k, v in permutes.items():
+ p_keys.append(k)
+ p_vals.append(v)
+ p_product_len *= len(v)
+ # create a cyclic generator for the product of all the permuted values
+ p_product = itertools.product(*p_vals)
+ p_generator = itertools.cycle(p_product)
+
+ o_keys = []
+ o_vals = []
+ o_generators = []
+ # extract the keys and values from the others dictionary
+ # and create a cyclic generator for each list of values
+ for k, v in others.items():
+ o_keys.append(k)
+ o_vals.append(v)
+ o_generators.append(itertools.cycle(v))
+
+ # The number of params dictionaries generated will be the maximumum size
+ # of the permuted values and the non-permuted values from others
+ max_items = max([p_product_len] + [len(x) for x in o_vals])
+
+ # create a dictionary with a single value for each of the permutes and others keys
+ for _ in range(max_items):
+ params = {}
+ # add the values for the permutes parameters
+ # the permuted values generator returns a value for each of the permuted keys
+ # in the same order as they were originally given
+ p_vals = next(p_generator)
+ for i in range(len(p_keys)):
+ params[p_keys[i]] = p_vals[i]
+ # add the values for the others parameters
+ # there is a separate generator for each of the others values
+ for i in range(len(o_keys)):
+ params[o_keys[i]] = next(o_generators[i])
+ yield params
+
+
+class Operator:
+ """Base class for operator specific selection properties."""
+
+ # A registry of all Operator subclasses, indexed by the operator name
+ registry = {}
+
+ def __init_subclass__(cls, **kwargs):
+ """Subclass initialiser to register all Operator classes."""
+ super().__init_subclass__(**kwargs)
+ cls.registry[cls.name] = cls
+
+ # Derived classes must override the operator name
+ name = None
+ # Operators with additional parameters must override the param_names
+ # NB: the order must match the order the values appear in the test names
+ param_names = ["shape", "type"]
+
+ # Working set of param_names - updated for negative tests
+ wks_param_names = None
+
+ def __init__(
+ self,
+ test_dir: Path,
+ config: Dict[str, Dict[str, List[Any]]],
+ negative=False,
+ exclude_types=None,
+ ):
+ """Initialise the selection parameters for an operator.
+
+ test_dir: the directory where the tests for all operators can be found
+ config: a dictionary with:
+ "params" - mappings of parameter names to the values to select
+ "permutes" - a list of parameter names to be permuted
+ "errorifs" - list of ERRORIF case names to be selected (negative test)
+ negative: bool indicating if negative testing is being selected (ERRORIF tests)
+ """
+ assert isinstance(
+ self.name, str
+ ), f"{self.__class__.__name__}: {self.name} is not a valid operator name"
+
+ self.negative = negative
+ self.wks_param_names = self.param_names.copy()
+ if self.negative:
+ # need to override positive set up - use "errorifs" config if set
+ # add in errorif case before shape to support all ops, including
+ # different ops like COND_IF and CONVnD etc
+ index = self.wks_param_names.index("shape")
+ self.wks_param_names[index:index] = ["ERRORIF", "case"]
+ config["params"] = {x: [] for x in self.wks_param_names}
+ config["params"]["case"] = (
+ config["errorifs"] if "errorifs" in config else []
+ )
+ config["permutes"] = []
+ config["preselected"] = {}
+
+ self.params = config["params"] if "params" in config else {}
+ self.permutes = config["permutes"] if "permutes" in config else []
+ self.sparsity = config["sparsity"] if "sparsity" in config else {}
+ self.preselected = config["preselected"] if "preselected" in config else {}
+ self.non_permutes = [x for x in self.wks_param_names if x not in self.permutes]
+ logger.info(f"{self.name}: permutes={self.permutes}")
+ logger.info(f"{self.name}: non_permutes={self.non_permutes}")
+
+ if exclude_types is None:
+ exclude_types = []
+ self.test_paths = [
+ p
+ for p in self.get_test_paths(test_dir, self.negative)
+ # exclusion of types if requested
+ if self.path_params(p)["type"] not in exclude_types
+ ]
+ if not self.test_paths:
+ logger.error(f"no tests found for {self.name} in {test_dir}")
+ logger.debug(f"{self.name}: paths={self.test_paths}")
+
+ # get default parameter values for any not given in the config
+ default_params = self.get_default_params()
+ for param in default_params:
+ if param not in self.params or not self.params[param]:
+ self.params[param] = default_params[param]
+ for param in self.wks_param_names:
+ logger.info(f"{self.name}: params[{param}]={self.params[param]}")
+
+ @staticmethod
+ def _get_test_paths(test_dir: Path, base_dir_glob, path_glob, negative):
+ """Generate test paths for operators using operator specifics."""
+ for base_dir in sorted(test_dir.glob(base_dir_glob)):
+ for path in sorted(base_dir.glob(path_glob)):
+ if (not negative and "ERRORIF" not in str(path)) or (
+ negative and "ERRORIF" in str(path)
+ ):
+ yield path
+
+ @classmethod
+ def get_test_paths(cls, test_dir: Path, negative):
+ """Generate test paths for this operator."""
+ yield from Operator._get_test_paths(test_dir, f"{cls.name}*", "*", negative)
+
+ def path_params(self, path):
+ """Return a dictionary of params from the test path."""
+ params = {}
+ op_name_parts = self.name.split("_")
+ values = path.name.split("_")[len(op_name_parts) :]
+ assert len(values) == len(
+ self.wks_param_names
+ ), f"len({values}) == len({self.wks_param_names})"
+ for i, param in enumerate(self.wks_param_names):
+ params[param] = values[i]
+ return params
+
+ def get_default_params(self):
+ """Get the default parameter values from the test names."""
+ params = {param: set() for param in self.wks_param_names}
+ for path in self.test_paths:
+ path_params = self.path_params(path)
+ for k in params:
+ params[k].add(path_params[k])
+ for param in params:
+ params[param] = sorted(list(params[param]))
+ return params
+
+ def select_tests(self): # noqa: C901 (function too complex)
+ """Generate the paths to the selected tests for this operator."""
+ if not self.test_paths:
+ # Exit early when nothing to select from
+ return
+
+ # the test paths that have not been selected yet
+ unused_paths = set(self.test_paths)
+
+ # a list of dictionaries of unused preselected parameter combinations
+ unused_preselected = [x for x in self.preselected]
+ logger.debug(f"preselected: {unused_preselected}")
+
+ # a list of dictionaries of unused permuted parameter combinations
+ permutes = {k: self.params[k] for k in self.permutes}
+ others = {k: self.params[k] for k in self.non_permutes}
+ unused_permuted = [x for x in expand_params(permutes, others)]
+ logger.debug(f"permuted: {unused_permuted}")
+
+ # a dictionary of sets of unused parameter values
+ if self.negative:
+ # We only care about selecting a test for each errorif case
+ unused_values = {k: set() for k in self.params}
+ unused_values["case"] = set(self.params["case"])
+ else:
+ unused_values = {k: set(v) for k, v in self.params.items()}
+
+ # select tests matching permuted, or preselected, parameter combinations
+ for path in self.test_paths:
+ path_params = self.path_params(path)
+ if path_params in unused_permuted or path_params in unused_preselected:
+ unused_paths.remove(path)
+ if path_params in unused_preselected:
+ unused_preselected.remove(path_params)
+ if path_params in unused_permuted:
+ unused_permuted.remove(path_params)
+ if self.negative:
+ # remove any other errorif cases, so we only match one
+ for p in list(unused_permuted):
+ if p["case"] == path_params["case"]:
+ unused_permuted.remove(p)
+ # remove the param values used by this path
+ for k in path_params:
+ unused_values[k].discard(path_params[k])
+ logger.debug(f"FOUND: {path.name}")
+ yield path
+
+ # search for tests that match any unused parameter values
+ for n, path in enumerate(sorted(list(unused_paths))):
+ path_params = self.path_params(path)
+ # select paths with unused param values
+ # skipping some, if sparsity is set for the param
+ for k in path_params:
+ if path_params[k] in unused_values[k] and (
+ k not in self.sparsity or n % self.sparsity[k] == 0
+ ):
+ # remove the param values used by this path
+ for p in path_params:
+ unused_values[p].discard(path_params[p])
+ logger.debug(f"FOUND: {path.name}")
+ yield path
+ break
+
+ # report any preselected combinations that were not found
+ for params in unused_preselected:
+ logger.warning(f"MISSING preselected: {params}")
+ # report any permuted combinations that were not found
+ for params in unused_permuted:
+ logger.debug(f"MISSING permutation: {params}")
+ # report any param values that were not found
+ for k, values in unused_values.items():
+ if values:
+ if k not in self.sparsity:
+ logger.warning(f"MISSING {len(values)} values for {k}: {values}")
+ else:
+ logger.info(
+ f"Skipped {len(values)} values for {k} due to sparsity setting"
+ )
+ logger.debug(f"Values skipped: {values}")
+
+
+class AbsOperator(Operator):
+ """Test selector for the ABS operator."""
+
+ name = "abs"
+
+
+class ArithmeticRightShiftOperator(Operator):
+ """Test selector for the Arithmetic Right Shift operator."""
+
+ name = "arithmetic_right_shift"
+ param_names = ["shape", "type", "rounding"]
+
+
+class AddOperator(Operator):
+ """Test selector for the ADD operator."""
+
+ name = "add"
+
+
+class ArgmaxOperator(Operator):
+ """Test selector for the ARGMAX operator."""
+
+ name = "argmax"
+ param_names = ["shape", "type", "axis"]
+
+
+class AvgPool2dOperator(Operator):
+ """Test selector for the AVG_POOL2D operator."""
+
+ name = "avg_pool2d"
+ param_names = ["shape", "type", "stride", "kernel", "pad"]
+
+
+class BitwiseAndOperator(Operator):
+ """Test selector for the BITWISE_AND operator."""
+
+ name = "bitwise_and"
+
+
+class BitwiseNotOperator(Operator):
+ """Test selector for the BITWISE_NOT operator."""
+
+ name = "bitwise_not"
+
+
+class BitwiseOrOperator(Operator):
+ """Test selector for the BITWISE_OR operator."""
+
+ name = "bitwise_or"
+
+
+class BitwiseXorOperator(Operator):
+ """Test selector for the BITWISE_XOR operator."""
+
+ name = "bitwise_xor"
+
+
+class CastOperator(Operator):
+ """Test selector for the CAST operator."""
+
+ name = "cast"
+ param_names = ["shape", "type", "output_type"]
+
+
+class ClampOperator(Operator):
+ """Test selector for the CLAMP operator."""
+
+ name = "clamp"
+
+
+class CLZOperator(Operator):
+ """Test selector for the CLZ operator."""
+
+ name = "clz"
+ param_names = ["shape", "type"]
+
+
+class ConcatOperator(Operator):
+ """Test selector for the CONCAT operator."""
+
+ name = "concat"
+ param_names = ["shape", "type", "axis"]
+
+
+class CondIfOperator(Operator):
+ """Test selector for the COND_IF operator."""
+
+ name = "cond_if"
+ param_names = ["variant", "shape", "type", "cond"]
+
+
+class ConstOperator(Operator):
+ """Test selector for the CONST operator."""
+
+ name = "const"
+
+
+class Conv2dOperator(Operator):
+ """Test selector for the CONV2D operator."""
+
+ name = "conv2d"
+ param_names = ["kernel", "shape", "type", "stride", "pad", "dilation"]
+
+
+class Conv3dOperator(Operator):
+ """Test selector for the CONV3D operator."""
+
+ name = "conv3d"
+ param_names = ["kernel", "shape", "type", "stride", "pad", "dilation"]
+
+
+class DepthwiseConv2dOperator(Operator):
+ """Test selector for the DEPTHWISE_CONV2D operator."""
+
+ name = "depthwise_conv2d"
+ param_names = ["kernel", "shape", "type", "stride", "pad", "dilation"]
+
+
+class EqualOperator(Operator):
+ """Test selector for the EQUAL operator."""
+
+ name = "equal"
+
+
+class FullyConnectedOperator(Operator):
+ """Test selector for the FULLY_CONNECTED operator."""
+
+ name = "fully_connected"
+
+
+class GatherOperator(Operator):
+ """Test selector for the GATHER operator."""
+
+ name = "gather"
+
+
+class GreaterOperator(Operator):
+ """Test selector for the GREATER operator."""
+
+ name = "greater"
+
+ @classmethod
+ def get_test_paths(cls, test_dir: Path, negative):
+ """Generate test paths for this operator."""
+ yield from Operator._get_test_paths(test_dir, f"{cls.name}", "*", negative)
+
+
+class GreaterEqualOperator(Operator):
+ """Test selector for the GREATER_EQUAL operator."""
+
+ name = "greater_equal"
+
+
+class IdentityOperator(Operator):
+ """Test selector for the IDENTITY operator."""
+
+ name = "identity"
+
+
+class IntDivOperator(Operator):
+ """Test selector for the INTDIV."""
+
+ name = "intdiv"
+
+
+class LogicalAndOperator(Operator):
+ """Test selector for the LOGICAL_AND operator."""
+
+ name = "logical_and"
+
+
+class LogicalLeftShiftOperator(Operator):
+ """Test selector for the LOGICAL_LEFT_SHIFT operator."""
+
+ name = "logical_left_shift"
+
+
+class LogicalNotOperator(Operator):
+ """Test selector for the LOGICAL_NOT operator."""
+
+ name = "logical_not"
+
+
+class LogicalOrOperator(Operator):
+ """Test selector for the LOGICAL_OR operator."""
+
+ name = "logical_or"
+
+
+class LogicalRightShiftOperator(Operator):
+ """Test selector for the LOGICAL_RIGHT_SHIFT operator."""
+
+ name = "logical_right_shift"
+
+
+class LogicalXorOperator(Operator):
+ """Test selector for the LOGICAL_XOR operator."""
+
+ name = "logical_xor"
+
+
+class MatmulOperator(Operator):
+ """Test selector for the MATMUL operator."""
+
+ name = "matmul"
+
+
+class MaximumOperator(Operator):
+ """Test selector for the Maximum operator."""
+
+ name = "maximum"
+
+
+class MaxPool2dOperator(Operator):
+ """Test selector for the MAX_POOL2D operator."""
+
+ name = "max_pool2d"
+ param_names = ["shape", "type", "stride", "kernel", "pad"]
+
+
+class MinimumOperator(Operator):
+ """Test selector for the Minimum operator."""
+
+ name = "minimum"
+
+
+class MulOperator(Operator):
+ """Test selector for the MUL operator."""
+
+ name = "mul"
+ param_names = ["shape", "type", "perm", "shift"]
+
+
+class NegateOperator(Operator):
+ """Test selector for the Negate operator."""
+
+ name = "negate"
+
+
+class PadOperator(Operator):
+ """Test selector for the PAD operator."""
+
+ name = "pad"
+ param_names = ["shape", "type", "pad"]
+
+
+class ReduceAllOperator(Operator):
+ """Test selector for the REDUCE_ALL operator."""
+
+ name = "reduce_all"
+ param_names = ["shape", "type", "axis"]
+
+
+class ReduceAnyOperator(Operator):
+ """Test selector for the REDUCE_ANY operator."""
+
+ name = "reduce_any"
+ param_names = ["shape", "type", "axis"]
+
+
+class ReduceMaxOperator(Operator):
+ """Test selector for the REDUCE_MAX operator."""
+
+ name = "reduce_max"
+ param_names = ["shape", "type", "axis"]
+
+
+class ReduceMinOperator(Operator):
+ """Test selector for the REDUCE_MIN operator."""
+
+ name = "reduce_min"
+ param_names = ["shape", "type", "axis"]
+
+
+class ReduceSumOperator(Operator):
+ """Test selector for the REDUCE_SUM operator."""
+
+ name = "reduce_sum"
+ param_names = ["shape", "type", "axis"]
+
+
+class RescaleOperator(Operator):
+ """Test selector for the RESCALE operator."""
+
+ name = "rescale"
+ param_names = [
+ "shape",
+ "type",
+ "output_type",
+ "scale",
+ "double_round",
+ "per_channel",
+ ]
+
+
+class ReshapeOperator(Operator):
+ """Test selector for the RESHAPE operator."""
+
+ name = "reshape"
+ param_names = ["shape", "type", "perm", "rank"]
+
+
+class ResizeOperator(Operator):
+ """Test selector for the RESIZE operator."""
+
+ name = "resize"
+ param_names = [
+ "shape",
+ "type",
+ "mode",
+ "shift",
+ "output_dims",
+ "output_type",
+ "stride",
+ "offset",
+ ]
+
+
+class ReverseOperator(Operator):
+ """Test selector for the REVERSE operator."""
+
+ name = "reverse"
+ param_names = ["shape", "type", "axis"]
+
+
+class ScatterOperator(Operator):
+ """Test selector for the SCATTER operator."""
+
+ name = "scatter"
+
+
+class SelectOperator(Operator):
+ """Test selector for the SELECT operator."""
+
+ name = "select"
+
+
+class SliceOperator(Operator):
+ """Test selector for the SLICE operator."""
+
+ name = "slice"
+ param_names = ["shape", "type", "perm"]
+
+
+class SubOperator(Operator):
+ """Test selector for the SUB operator."""
+
+ name = "sub"
+
+
+class TableOperator(Operator):
+ """Test selector for the TABLE operator."""
+
+ name = "table"
+
+
+class TileOperator(Operator):
+ """Test selector for the TILE operator."""
+
+ name = "tile"
+ param_names = ["shape", "type", "perm"]
+
+
+class TransposeOperator(Operator):
+ """Test selector for the TRANSPOSE operator."""
+
+ name = "transpose"
+ param_names = ["shape", "type", "perm"]
+
+ @classmethod
+ def get_test_paths(cls, test_dir: Path, negative):
+ """Generate test paths for this operator."""
+ yield from Operator._get_test_paths(test_dir, f"{cls.name}", "*", negative)
+
+
+class TransposeConv2dOperator(Operator):
+ """Test selector for the TRANSPOSE_CONV2D operator."""
+
+ name = "transpose_conv2d"
+ param_names = ["kernel", "shape", "type", "stride", "pad", "out_shape"]
+
+ def path_params(self, path):
+ """Return a dictionary of params from the test path."""
+ params = super().path_params(path)
+ # out_shape is different for every test case, so ignore it for selection
+ params["out_shape"] = ""
+ return params
+
+
+class WhileLoopOperator(Operator):
+ """Test selector for the WHILE_LOOP operator."""
+
+ name = "while_loop"
+ param_names = ["shape", "type", "cond"]
+
+
+def parse_args():
+ """Parse the arguments."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--test-dir",
+ default=Path.cwd(),
+ type=Path,
+ help=(
+ "The directory where test subdirectories for all operators can be found"
+ " (default: current working directory)"
+ ),
+ )
+ parser.add_argument(
+ "--config",
+ default=Path(__file__).with_suffix(".json"),
+ type=Path,
+ help="A JSON file defining the parameters to use for each operator",
+ )
+ parser.add_argument(
+ "--full-path", action="store_true", help="output the full path for each test"
+ )
+ parser.add_argument(
+ "-v",
+ dest="verbosity",
+ action="count",
+ default=0,
+ help="Verbosity (can be used multiple times for more details)",
+ )
+ parser.add_argument(
+ "operators",
+ type=str,
+ nargs="*",
+ help=(
+ f"Select tests for the specified operator(s)"
+ f" - all operators are assumed if none are specified)"
+ f" - choose from: {[n for n in Operator.registry]}"
+ ),
+ )
+ parser.add_argument(
+ "--test-type",
+ dest="test_type",
+ choices=["positive", "negative"],
+ default="positive",
+ type=str,
+ help="type of tests selected, positive or negative",
+ )
+ return parser.parse_args()
+
+
+def main():
+ """Example test selection."""
+ args = parse_args()
+
+ loglevels = (logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)
+ logger.basicConfig(level=loglevels[min(args.verbosity, len(loglevels) - 1)])
+ logger.info(f"{__file__}: args: {args}")
+
+ try:
+ with open(args.config, "r") as fd:
+ config = json.load(fd)
+ except Exception as e:
+ logger.error(f"Config file error: {e}")
+ return 2
+
+ negative = args.test_type == "negative"
+ for op_name in Operator.registry:
+ if not args.operators or op_name in args.operators:
+ op_params = config[op_name] if op_name in config else {}
+ op = Operator.registry[op_name](
+ args.test_dir, op_params, negative, exclude_types=["float"]
+ )
+ for test_path in op.select_tests():
+ print(test_path.resolve() if args.full_path else test_path.name)
+
+ return 0
+
+
+if __name__ == "__main__":
+ exit(main())
diff --git a/verif/conformance/tosa_base_profile_framework_ops_info.json b/verif/conformance/tosa_base_profile_framework_ops_info.json
new file mode 100644
index 0000000..7beb2dd
--- /dev/null
+++ b/verif/conformance/tosa_base_profile_framework_ops_info.json
@@ -0,0 +1,185 @@
+{
+ "add": {
+ "tests": [
+ "add_1_qi16",
+ "add_1x32x32x8_qi8"
+ ]
+ },
+ "average_pool_2d": {
+ "alternate_names": [
+ "avg_pool2d"
+ ],
+ "tests": [
+ "average_pool_2d_1x4x4x4_qi8_st11_padSAME_kern11",
+ "average_pool_2d_1x4x8x19_qi16_st21_padSAME_kern22",
+ "average_pool_2d_1x7x7x9_qi8_st22_padSAME_kern11",
+ "average_pool_2d_1x32x32x8_qu8_st12_padVALID_kern12",
+ "average_pool_2d_1x8x4x17_qu8_st21_padVALID_kern21"
+ ]
+ },
+ "concatenation": {
+ "tests": [
+ "concatenation_concat_13x21x3_i32_axis_0",
+ "concatenation_concat_14x19_i32_axis_1",
+ "concatenation_concat_1x32x32x8_i32_axis_2",
+ "concatenation_concat_64_i32_axis_m1",
+ "concatenation_concatv2_13x21x3_i32_axis_1",
+ "concatenation_concatv2_1x32x32x8_i32_axis_m2",
+ "concatenation_concatv2_1x7x7x9_i32_axis_m3",
+ "concatenation_concatv2_64_i32_axis_0"
+ ],
+ "alternate_names": [
+ "concat",
+ "concatv2"
+ ]
+ },
+ "conv_2d": {
+ "alternate_names": [
+ "conv2d"
+ ],
+ "tests": [
+ "conv_2d_1x1_1x4x8x19_qu8_st11_padVALID_dilat21",
+ "conv_2d_3x3_1x7x7x9_qi8_st12_padVALID_dilat11",
+ "conv_2d_5x5_1x32x32x8_qu8_st11_padSAME_dilat22",
+ "conv_2d_bias_1x1_1x32x32x8_qi8_st11_padVALID_dilat12",
+ "conv_2d_bias_5x5_1x7x7x9_qu8_st22_padSAME_dilat12",
+ "conv_2d_relu_1x1_1x32x32x8_qi8",
+ "conv_2d_relu_3x3_1x4x8x19_qu8",
+ "conv_2d_relu6_3x3_1x8x4x17_qi8",
+ "conv_2d_relu6_5x5_1x4x8x19_qu8",
+ "conv_2d_relu_n1_to_1_3x3_1x32x32x8_qu8",
+ "conv_2d_relu_n1_to_1_5x5_1x7x7x9_qi8",
+ "conv_2d_tanh_3x3_1x7x7x9_qi8",
+ "conv_2d_tanh_5x5_1x32x32x8_qu8"
+ ]
+ },
+ "depthwise_conv_2d": {
+ "alternate_names": [
+ "depthwise_conv2d"
+ ],
+ "tests": [
+ "depthwise_conv_2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat21",
+ "depthwise_conv_2d_3x3_1x7x7x9_qu8_st22_padVALID_dilat11",
+ "depthwise_conv_2d_5x5_1x32x32x8_qi8_st11_padSAME_dilat12",
+ "depthwise_conv_2d_bias_1x1_1x4x4x4_qu8_st11_padSAME_dilat11",
+ "depthwise_conv_2d_bias_3x3_1x32x32x8_qu8_st11_padVALID_dilat22"
+ ]
+ },
+ "fully_connected": {
+ "alternate_names": [
+ "matmul"
+ ],
+ "tests": [
+ "fully_connected_1x4x4x4_qi8",
+ "fully_connected_13x21x3_qu8",
+ "fully_connected_14x19_qi8"
+ ]
+ },
+ "leaky_relu": {
+ "tests": [
+ "leaky_relu_1_qi16_0",
+ "leaky_relu_14x19_qi8_0",
+ "leaky_relu_13x21x3_qu8_0",
+ "leaky_relu_1x4x8x19_qi16_0"
+ ]
+ },
+ "logistic": {
+ "alternate_names": [
+ "sigmoid"
+ ],
+ "tests": [
+ "logistic_13x21x3_qu8",
+ "logistic_1x8x4x17_qi8",
+ "logistic_64_qi16"
+ ]
+ },
+ "max_pool_2d": {
+ "alternate_names": [
+ "max_pool2d"
+ ],
+ "tests": [
+ "max_pool_2d_1x4x4x4_qi8_st11_padSAME_kern11",
+ "max_pool_2d_1x4x4x4_qu8_st22_padVALID_kern22",
+ "max_pool_2d_1x4x8x19_qu8_st21_padVALID_kern22",
+ "max_pool_2d_1x8x4x17_qu8_st21_padVALID_kern21",
+ "max_pool_2d_1x32x32x8_qi8_st12_padVALID_kern22"
+ ]
+ },
+ "minimum": {
+ "alternate_names": [
+ "min"
+ ],
+ "tests": [
+ "minimum_1x4x4x4_i32",
+ "minimum_64_i32"
+ ]
+ },
+ "mul": {
+ "tests": [
+ "mul_13x21x3_qi16",
+ "mul_1x4x8x19_qu8"
+ ]
+ },
+ "pack": {
+ "alternate_names": [
+ "stack"
+ ],
+ "tests": [
+ "pack_13x21x3_i32_axis0",
+ "pack_14x19_i32_axis1",
+ "pack_1x7x7x9_i32_axis2",
+ "pack_1x8x4x17_i32_axis4"
+ ]
+ },
+ "reshape": {
+ "tests": [
+ "reshape_14x19_i32_rank1",
+ "reshape_13x21x3_i32_rank3",
+ "reshape_1x32x32x8_i32_rank2"
+ ]
+ },
+ "resize_bilinear": {
+ "tests": [
+ "resize_bilinear_1x32x32x8_qi16",
+ "resize_bilinear_1x4x4x4_qi8"
+ ]
+ },
+ "resize_nearest_neighbor": {
+ "alternate_names": [
+ "resize_nearest"
+ ],
+ "tests": [
+ "resize_nearest_neighbor_1x4x8x19_qu8",
+ "resize_nearest_neighbor_1x8x4x17_qi8"
+ ]
+ },
+ "softmax": {
+ "tests": [
+ "softmax_1_qi8",
+ "softmax_14x19_qi16",
+ "softmax_1x32x32x8_qu8"
+ ]
+ },
+ "split": {
+ "tests": [
+ "split_13x21x3_i32_splitv_axis0",
+ "split_1x32x32x8_i32_split4_axis2",
+ "split_1x4x8x19_i32_splitv_axis3",
+ "split_64_i32_split2_axis0"
+ ]
+ },
+ "strided_slice": {
+ "tests": [
+ "strided_slice_13x21x3_i32_perm0",
+ "strided_slice_14x19_i32_perm1",
+ "strided_slice_64_i32_perm3"
+ ]
+ },
+ "tanh": {
+ "tests": [
+ "tanh_13x21x3_qu8",
+ "tanh_14x19_qi8",
+ "tanh_1x8x4x17_qi16"
+ ]
+ }
+} \ No newline at end of file
diff --git a/verif/conformance/tosa_base_profile_ops_info.json b/verif/conformance/tosa_base_profile_ops_info.json
new file mode 100644
index 0000000..79e50ff
--- /dev/null
+++ b/verif/conformance/tosa_base_profile_ops_info.json
@@ -0,0 +1,2220 @@
+{
+ "abs": {
+ "group": "ew_unary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,1,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "add": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,1,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "argmax": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--tensor-dim-range",
+ "32,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3",
+ "--num-rand-permutations",
+ "2"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--tensor-dim-range",
+ "1,32",
+ "--target-rank",
+ "4",
+ "--num-rand-permutations",
+ "2"
+ ],
+ [
+ "--target-shape",
+ "1,3,1,65535",
+ "--target-shape",
+ "1,65538,1,2",
+ "--target-dtype",
+ "int8",
+ "--num-rand-permutations",
+ "2"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "arithmetic_right_shift": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "rounding"
+ ]
+ },
+ "avg_pool2d": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-shape",
+ "1,49,42,28",
+ "--target-shape",
+ "3,11,44,3",
+ "--allow-pooling-and-conv-oversizes"
+ ],
+ [
+ "--target-shape",
+ "1,65535,5,1",
+ "--target-shape",
+ "1,3,65537,1",
+ "--target-dtype",
+ "int8",
+ "--allow-pooling-and-conv-oversizes"
+ ]
+ ],
+ "params": {
+ "shape": [],
+ "type": [
+ "i8",
+ "i16"
+ ],
+ "kernel": [],
+ "stride": [],
+ "pad": []
+ },
+ "permutes": [
+ "shape",
+ "type",
+ "pad"
+ ]
+ },
+ "bitwise_and": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "1,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65536,1",
+ "--target-shape",
+ "1,65539,1,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "bitwise_not": {
+ "group": "ew_unary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "2,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65537,1",
+ "--target-shape",
+ "1,65538,1,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "bitwise_or": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "3,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65538,1",
+ "--target-shape",
+ "1,65537,1,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "bitwise_xor": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "4,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65536,1",
+ "--target-shape",
+ "1,65536,1,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "cast": {
+ "group": "type_conversion",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,1,65535,1,1",
+ "--target-shape",
+ "1,65538,1,2",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {
+ "shape": [],
+ "type": [],
+ "output_type": [
+ "outINT8",
+ "outINT16",
+ "outINT32",
+ "outBOOL"
+ ]
+ },
+ "permutes": [
+ "shape",
+ "type",
+ "output_type"
+ ]
+ },
+ "clamp": {
+ "group": "activation",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "20,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,20",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,1,65536",
+ "--target-shape",
+ "1,3,65537,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "clz": {
+ "group": "ew_unary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,1,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "concat": {
+ "group": "data_layout",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3",
+ "--num-const-inputs-concat",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5",
+ "--num-const-inputs-concat",
+ "1"
+ ],
+ [
+ "--target-shape",
+ "1,1,65535,1,1",
+ "--target-shape",
+ "1,65538,1,2",
+ "--target-dtype",
+ "int8",
+ "--num-const-inputs-concat",
+ "2"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "cond_if": {
+ "group": "control_flow",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "bool",
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32"
+ ],
+ [
+ "--target-shape",
+ "3,22,17,1,2",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "variant",
+ "shape",
+ "cond"
+ ]
+ },
+ "const": {
+ "group": "data_nodes",
+ "no_negative_tests": "true",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,1,65537,1,1",
+ "--target-shape",
+ "1,65530,1,2",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "conv2d": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-shape",
+ "1,49,42,28",
+ "--target-shape",
+ "1,11,44,13",
+ "--allow-pooling-and-conv-oversizes"
+ ],
+ [
+ "--target-shape",
+ "1,65535,4,1",
+ "--target-shape",
+ "1,5,65536,1",
+ "--target-dtype",
+ "int8",
+ "--max-conv-dilation",
+ "1",
+ "--allow-pooling-and-conv-oversizes"
+ ]
+ ],
+ "params": {
+ "shape": [],
+ "type": [
+ "i8xi8",
+ "i16xi8",
+ "i8xi4"
+ ],
+ "kernel": [],
+ "stride": [],
+ "pad": [],
+ "dilation": []
+ },
+ "permutes": [
+ "kernel",
+ "shape",
+ "type",
+ "pad"
+ ]
+ },
+ "conv3d": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-shape",
+ "1,9,21,14,1",
+ "--target-shape",
+ "1,9,11,12,3",
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--allow-pooling-and-conv-oversizes"
+ ],
+ [
+ "--target-shape",
+ "1,1,65535,3,1",
+ "--target-shape",
+ "1,1,3,65536,1",
+ "--target-dtype",
+ "int8",
+ "--max-conv-dilation",
+ "1",
+ "--allow-pooling-and-conv-oversizes"
+ ]
+ ],
+ "params": {
+ "shape": [],
+ "type": [
+ "i8xi8",
+ "i16xi8",
+ "i8xi4"
+ ],
+ "kernel": [],
+ "stride": [],
+ "pad": [],
+ "dilation": []
+ },
+ "permutes": [
+ "kernel",
+ "shape",
+ "type",
+ "pad",
+ "stride"
+ ]
+ },
+ "depthwise_conv2d": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-shape",
+ "1,9,44,3",
+ "--target-shape",
+ "1,11,33,4",
+ "--allow-pooling-and-conv-oversizes"
+ ],
+ [
+ "--target-shape",
+ "1,65532,2,1",
+ "--target-shape",
+ "1,4,65537,1",
+ "--target-dtype",
+ "int8",
+ "--max-conv-dilation",
+ "1",
+ "--allow-pooling-and-conv-oversizes"
+ ]
+ ],
+ "params": {
+ "shape": [],
+ "type": [
+ "i8xi8",
+ "i16xi8",
+ "i8xi4"
+ ],
+ "kernel": [],
+ "stride": [],
+ "pad": [],
+ "dilation": []
+ },
+ "permutes": [
+ "kernel",
+ "shape",
+ "type",
+ "pad"
+ ]
+ },
+ "equal": {
+ "group": "comparison",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65501,1",
+ "--target-shape",
+ "1,65541,1,1,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "fully_connected": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16"
+ ],
+ [
+ "--target-shape",
+ "444,1",
+ "--target-shape",
+ "3,65538",
+ "--tensor-dim-range",
+ "1,2",
+ "--target-dtype",
+ "int8"
+ ],
+ [
+ "--target-shape",
+ "3,16",
+ "--target-shape",
+ "1,23",
+ "--tensor-dim-range",
+ "100,200",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "gather": {
+ "group": "scatter_gather",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "4,64"
+ ],
+ [
+ "--target-shape",
+ "2,65533,1",
+ "--target-shape",
+ "1,2,65533",
+ "--target-shape",
+ "5000,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "greater": {
+ "group": "comparison",
+ "generator_args": [
+ [
+ "--filter",
+ "^greater$",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "10,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--filter",
+ "^greater$",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,15",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--filter",
+ "^greater$",
+ "--target-shape",
+ "1,2,65530,1",
+ "--target-shape",
+ "1,65539,1,2,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "greater_equal": {
+ "group": "comparison",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "20,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,20",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,3,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "identity": {
+ "group": "data_nodes",
+ "no_negative_tests": "true",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "1,61",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "1,15",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,1,65536,1,1",
+ "--target-shape",
+ "1,65531,1,2",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "intdiv": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,65533,1",
+ "--target-shape",
+ "1,1,65539,1,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "logical_and": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "1,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,65538,1",
+ "--target-shape",
+ "1,2,1,65536"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "logical_left_shift": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,65532,1,1",
+ "--target-shape",
+ "1,2,1,65538",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "logical_not": {
+ "group": "ew_unary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "2,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,15",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,65537,1",
+ "--target-shape",
+ "1,2,1,65535"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "logical_or": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "3,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,14",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,65536,1",
+ "--target-shape",
+ "1,2,1,65537"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "logical_right_shift": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "25,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,25",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,1,65540,3",
+ "--target-shape",
+ "2,1,65530,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "logical_xor": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "4,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "3,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,65539,1",
+ "--target-shape",
+ "1,2,1,65534"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "matmul": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--tensor-dim-range",
+ "16,64"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--tensor-dim-range",
+ "1,16"
+ ],
+ [
+ "--target-shape",
+ "1,65532,1",
+ "--target-shape",
+ "1,2,65538",
+ "--target-shape",
+ "2000,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "maximum": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,1,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "max_pool2d": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-shape",
+ "1,4,75,3",
+ "--target-shape",
+ "2,11,44,1",
+ "--allow-pooling-and-conv-oversizes"
+ ],
+ [
+ "--target-shape",
+ "1,65535,5,1",
+ "--target-shape",
+ "1,3,65537,1",
+ "--target-shape",
+ "33333,3,2,1",
+ "--target-dtype",
+ "int8",
+ "--allow-pooling-and-conv-oversizes"
+ ]
+ ],
+ "params": {
+ "shape": [],
+ "type": [
+ "i8",
+ "i16"
+ ],
+ "kernel": [],
+ "stride": [],
+ "pad": []
+ },
+ "permutes": [
+ "shape",
+ "type",
+ "pad"
+ ]
+ },
+ "minimum": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,1,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "mul": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65500,1",
+ "--target-shape",
+ "1,68539,1,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "perm",
+ "shift"
+ ]
+ },
+ "negate": {
+ "group": "ew_unary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "22,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,22",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "3,1,65540,1",
+ "--target-shape",
+ "1,1,2,1,65531",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "pad": {
+ "group": "data_layout",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "1,64",
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,16",
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--target-rank",
+ "4"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "5",
+ "--target-dtype",
+ "int16"
+ ],
+ [
+ "--target-shape",
+ "1,1,65537,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ],
+ "sparsity": {
+ "pad": 15
+ },
+ "preselected": [
+ {
+ "shape": "6",
+ "type": "i8",
+ "pad": "pad00"
+ }
+ ]
+ },
+ "reduce_all": {
+ "group": "reduction",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "1,32"
+ ],
+ [
+ "--target-shape",
+ "65530,1,1,1",
+ "--target-shape",
+ "2,65538,1,1"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "axis"
+ ]
+ },
+ "reduce_any": {
+ "group": "reduction",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "1,32"
+ ],
+ [
+ "--target-shape",
+ "65530,1,1,1",
+ "--target-shape",
+ "2,65538,1,1"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "axis"
+ ]
+ },
+ "reduce_max": {
+ "group": "reduction",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,32"
+ ],
+ [
+ "--target-shape",
+ "65530,1,1,1",
+ "--target-shape",
+ "2,65538,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "axis"
+ ]
+ },
+ "reduce_min": {
+ "group": "reduction",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,24"
+ ],
+ [
+ "--target-shape",
+ "1,1,65538,1",
+ "--target-shape",
+ "2,65535,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "axis"
+ ]
+ },
+ "reduce_sum": {
+ "group": "reduction",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,32"
+ ],
+ [
+ "--target-shape",
+ "3,65532,1,1",
+ "--target-shape",
+ "65536,1,1,2",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "axis"
+ ]
+ },
+ "rescale": {
+ "group": "type_conversion",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "16,32",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {
+ "scale": [
+ "sc0",
+ "sc0",
+ "sc1",
+ "sc1",
+ "sc1",
+ "sc1"
+ ],
+ "double_round": [
+ "dr0",
+ "dr0",
+ "dr0",
+ "dr0",
+ "dr1",
+ "dr1"
+ ],
+ "per_channel": [
+ "pc0",
+ "pc1",
+ "pc0",
+ "pc1",
+ "pc0",
+ "pc1",
+ "pc1"
+ ]
+ },
+ "permutes": [
+ "shape",
+ "type",
+ "output_type"
+ ]
+ },
+ "reshape": {
+ "group": "data_layout",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--num-rand-permutations",
+ "2"
+ ],
+ [
+ "--target-shape",
+ "1,1,65535,1",
+ "--target-shape",
+ "1,65538,1,2",
+ "--target-dtype",
+ "int8",
+ "--num-rand-permutations",
+ "2"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "rank",
+ "perm"
+ ]
+ },
+ "resize": {
+ "group": "image",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16"
+ ],
+ [
+ "--target-shape",
+ "1,3,16383,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "reverse": {
+ "group": "data_layout",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "1,32"
+ ],
+ [
+ "--target-shape",
+ "1,1,65535",
+ "--target-shape",
+ "1,65540,1,2",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "scatter": {
+ "group": "scatter_gather",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "4,64"
+ ],
+ [
+ "--target-shape",
+ "1,65540,1",
+ "--target-shape",
+ "1,1,65533",
+ "--target-shape",
+ "3000,7,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "select": {
+ "group": "ew_ternary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,1,65535,1",
+ "--target-shape",
+ "1,65538,1,2",
+ "--target-dtype",
+ "int8",
+ "--num-rand-permutations",
+ "2"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "slice": {
+ "group": "data_layout",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3",
+ "--num-rand-permutations",
+ "2"
+ ],
+ [
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--num-rand-permutations",
+ "2"
+ ],
+ [
+ "--target-shape",
+ "1,1,65535,1",
+ "--target-shape",
+ "1,65538,1,2",
+ "--target-dtype",
+ "int8",
+ "--num-rand-permutations",
+ "2"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "perm"
+ ]
+ },
+ "sub": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65533,1",
+ "--target-shape",
+ "1,65539,1,1,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "table": {
+ "group": "ew_binary",
+ "generator_args": [
+ [
+ "--tensor-dim-range",
+ "16,64",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--target-rank",
+ "3"
+ ],
+ [
+ "--tensor-dim-range",
+ "1,16",
+ "--target-rank",
+ "4",
+ "--target-rank",
+ "5"
+ ],
+ [
+ "--target-shape",
+ "1,2,65536,1",
+ "--target-shape",
+ "1,65537,1,1,3",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "tile": {
+ "group": "data_layout",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--tensor-dim-range",
+ "4,32"
+ ],
+ [
+ "--target-shape",
+ "65533",
+ "--target-shape",
+ "1,2,65540,1",
+ "--target-dtype",
+ "int8"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ },
+ "transpose": {
+ "group": "data_layout",
+ "generator_args": [
+ [
+ "--filter",
+ "^transpose$",
+ "--target-dtype",
+ "int8",
+ "--target-rank",
+ "1",
+ "--target-rank",
+ "2",
+ "--num-rand-permutations",
+ "2"
+ ],
+ [
+ "--filter",
+ "^transpose$",
+ "--target-dtype",
+ "int8",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int32",
+ "--target-dtype",
+ "bool",
+ "--target-rank",
+ "3",
+ "--target-rank",
+ "4",
+ "--num-rand-permutations",
+ "2"
+ ],
+ [
+ "--filter",
+ "^transpose$",
+ "--target-shape",
+ "1,65537,1,2",
+ "--target-shape",
+ "65535,1,1,1",
+ "--target-dtype",
+ "int8",
+ "--num-rand-permutations",
+ "1"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type",
+ "perm"
+ ]
+ },
+ "transpose_conv2d": {
+ "group": "tensor",
+ "generator_args": [
+ [
+ "--target-shape",
+ "1,49,33,1",
+ "--target-shape",
+ "2,11,33,3",
+ "--target-dtype",
+ "int16",
+ "--target-dtype",
+ "int8",
+ "--allow-pooling-and-conv-oversizes"
+ ],
+ [
+ "--target-shape",
+ "1,65536,1,1",
+ "--target-dtype",
+ "int8",
+ "--max-conv-dilation",
+ "1"
+ ]
+ ],
+ "params": {
+ "shape": [],
+ "type": [
+ "i8xi8",
+ "i16xi8",
+ "i8xi4"
+ ],
+ "kernel": [],
+ "stride": [],
+ "pad": [],
+ "out_shape": []
+ },
+ "permutes": [
+ "shape",
+ "type",
+ "stride",
+ "pad"
+ ]
+ },
+ "while_loop": {
+ "group": "control_flow",
+ "generator_args": [
+ [
+ "--target-dtype",
+ "int32"
+ ],
+ [
+ "--target-shape",
+ "3,22,17,1,2",
+ "--target-shape",
+ "1,65537,2,1",
+ "--target-dtype",
+ "int32"
+ ]
+ ],
+ "params": {},
+ "permutes": [
+ "shape",
+ "type"
+ ]
+ }
+} \ No newline at end of file
diff --git a/verif/conformance/tosa_verif_conformance_generator.py b/verif/conformance/tosa_verif_conformance_generator.py
new file mode 100644
index 0000000..836c639
--- /dev/null
+++ b/verif/conformance/tosa_verif_conformance_generator.py
@@ -0,0 +1,676 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021-2022, ARM Limited.
+# SPDX-License-Identifier: Apache-2.0
+"""Build conformance tests.
+
+Steps:
+- Specific input shapes (or tests) are specified and produced by using the
+ settings in the .json files.
+- Tests are selected to produce a good coverage.
+- Tests are run on the reference model to produce the correct output files.
+- Tests are converted into JSON format and saved to desired output directory.
+"""
+import argparse
+import json
+import logging
+import multiprocessing as mp
+import os
+import shlex
+import shutil
+import subprocess
+from functools import partial
+from itertools import tee
+from pathlib import Path
+
+from conformance.test_select import Operator
+from convert2conformance.convert2conformance import main as c2c_main
+from distutils.dir_util import copy_tree
+
+logging.basicConfig()
+logger = logging.getLogger("tosa_verif_conformance_generator")
+
+# Configuration for each TOSA profile
+PROFILE_OPS_INFO = {
+ "base": {
+ "operator_test_params": "tosa_base_profile_ops_info.json",
+ "framework_tests": "tosa_base_profile_framework_ops_info.json",
+ "exclude_types": ["float"],
+ }
+}
+
+LOCATION_REF_MODEL_BINARY = Path("build/reference_model/tosa_reference_model")
+
+
+class GenConformanceError(Exception):
+ """Generation error reporting exception."""
+
+ pass
+
+
+def _run_sh_command(args, cwd, full_cmd):
+ """Run an external command and capture stdout/stderr."""
+ # Quote the command line for printing
+ full_cmd_esc = [shlex.quote(x) for x in full_cmd]
+ if args.capture_output:
+ logger.debug(f"Command: {full_cmd_esc}")
+
+ rc = subprocess.run(
+ full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
+ )
+
+ if args.capture_output:
+ stdout = rc.stdout.decode("utf-8")
+ logger.debug(f"stdout: \n{stdout}")
+ if rc.returncode != 0:
+
+ raise Exception(
+ "Error running command: {}.\n{}".format(
+ " ".join(full_cmd_esc), rc.stderr.decode("utf-8")
+ )
+ )
+ return (rc.stdout, rc.stderr)
+
+
+def build_op_tests(args, operator, test_params):
+ """Build tests for a given operator.
+
+ Builds a set of tests based on the parameters defined in test_params
+
+ Returns operator output directory
+ """
+ assert operator in test_params
+
+ build_tests_cmd = "tosa_verif_build_tests"
+ op_build_dir = args.build_dir
+
+ ref_cmd_base = [
+ build_tests_cmd,
+ "--filter",
+ operator,
+ "-o",
+ str(op_build_dir),
+ "--seed",
+ "42",
+ ]
+
+ ref_cmds = []
+
+ if args.test_type in ["positive", "both"]:
+ # Append extra parameters and run test generator for each set of parameters.
+ for arglist in test_params[operator]["generator_args"]:
+ ref_cmd_pos_test = ref_cmd_base.copy()
+ ref_cmd_pos_test.extend(arglist)
+ ref_cmds.append(ref_cmd_pos_test)
+
+ if args.test_type in ["negative", "both"]:
+ ref_cmd_neg_test = ref_cmd_base.copy()
+ ref_cmd_neg_test.extend(["--test-type", "negative"])
+ ref_cmds.append(ref_cmd_neg_test)
+
+ logger.debug(f"Creating {operator} tests with {len(ref_cmds)} parameter(s)")
+ error = False
+ for i, cmd in enumerate(ref_cmds):
+ try:
+ _run_sh_command(args, args.ref_model_dir.absolute(), cmd)
+ logger.info(
+ f"{operator} test batch {(i+1)}/{len(ref_cmds)} created successfully"
+ )
+ except Exception as e:
+ logger.error(
+ f"{operator} test batch {(i+1)}/{len(ref_cmds)} unsuccessful, skipping"
+ )
+ logger.error(f" build_op_tests error: {e} ")
+ error = True
+ if error:
+ raise (GenConformanceError())
+
+ return op_build_dir
+
+
+def _check_to_include_test(profile, test_name, exclude_negative_tests=False):
+ """Check test name for exclusions, return False to indicate excluded."""
+ excludes = ["ERRORIF"] if exclude_negative_tests else []
+ excludes.extend(PROFILE_OPS_INFO[profile]["exclude_types"])
+
+ for exclusion in excludes:
+ if f"_{exclusion}_" in test_name:
+ return False
+ return True
+
+
+def _get_all_tests_list(
+ profile, test_root_dir, operator, exclude_negative_tests=False, include_all=False
+):
+ """Create test list based on tests in the test_dir."""
+ test_dir = test_root_dir / operator
+ if not test_dir.is_dir():
+ # Tests are split into multiple dirs, for example: conv2d_1x1, conv2d_3x3
+ test_dir = test_root_dir
+ directories = [
+ tdir for tdir in test_dir.glob("*") if tdir.name.startswith(operator)
+ ]
+ else:
+ directories = [test_dir]
+
+ tests = []
+ for tdir in directories:
+ tests.extend(
+ [
+ test
+ for test in tdir.glob("*")
+ if include_all
+ or _check_to_include_test(profile, test.name, exclude_negative_tests)
+ ]
+ )
+ return tests
+
+
+def generate_results(args, operator, op_build_dir, tests=None):
+ """Run tests on reference model and save result to the test directory."""
+ num_cores = args.num_cores
+ run_tests_cmd = "tosa_verif_run_tests"
+
+ ref_model_path = args.ref_model_dir / LOCATION_REF_MODEL_BINARY
+ ref_cmd_base = ref_cmd = [
+ run_tests_cmd,
+ "--ref-model-path",
+ str(ref_model_path.absolute()),
+ "-j",
+ str(num_cores),
+ "-v",
+ "-t",
+ ]
+ ref_cmds = []
+
+ if not tests:
+ # Do not need to run ERRORIF tests as they don't have result files
+ tests = _get_all_tests_list(
+ args.profile, op_build_dir, operator, exclude_negative_tests=True
+ )
+
+ for test in tests:
+ ref_cmd = ref_cmd_base.copy()
+ ref_cmd.append(str(test))
+ ref_cmds.append(ref_cmd)
+
+ fail_string = "UNEXPECTED_FAILURE"
+ failed_counter = 0
+
+ job_pool = mp.Pool(args.num_cores)
+ sh_partial = partial(_run_sh_command, args, args.ref_model_dir.absolute())
+ pool_results = job_pool.map(sh_partial, ref_cmds)
+ job_pool.close()
+ job_pool.join()
+
+ # Use captured output for run_sh_command to work out if test passed.
+ for i, rc in enumerate(pool_results):
+ if fail_string in str(rc[0]):
+ logger.error(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed.")
+ failed_counter += 1
+ else:
+ logger.info(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} passed.")
+
+ logger.info(f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests passed")
+ logger.info("Ran tests on model and saved results of passing tests")
+
+
+def convert_tests(
+ args,
+ operator,
+ op_build_dir,
+ output_dir,
+ tests=None,
+ group=None,
+ trim_op_subdir=False,
+):
+ """Convert tests to JSON and save to output directory."""
+ ref_model_dir = args.ref_model_dir
+
+ if group:
+ output_dir = output_dir / group
+
+ ref_cmd_base = ["--ref-model-directory", str(ref_model_dir)]
+ if args.framework_schema:
+ ref_cmd_base.extend(["--framework-schema", str(args.framework_schema)])
+ ref_cmd_base.append("--output-directory")
+
+ ref_cmds = []
+
+ if not tests:
+ tests = _get_all_tests_list(args.profile, op_build_dir, operator)
+ logger.info(f"Converting all {args.profile} profile tests")
+
+ # Controls if we copy the tests in their operator sub-directory or not
+ output_dir_relative_pos = -1 if trim_op_subdir else -2
+ for test in tests:
+ logger.info(f"Test chosen: {test}")
+ ref_cmd = ref_cmd_base.copy()
+ full_output_directory = output_dir / test.relative_to(
+ *test.parts[:output_dir_relative_pos]
+ )
+ ref_cmd.append(str(full_output_directory))
+ ref_cmd.append(str(test))
+ ref_cmds.append(ref_cmd)
+
+ if len(ref_cmds) == 0:
+ logger.warning("No tests found. Nothing to convert")
+ return
+
+ job_pool = mp.Pool(args.num_cores)
+
+ pool_results = job_pool.map(c2c_main, ref_cmds)
+ job_pool.close()
+ job_pool.join()
+
+ failed_counter = 0
+ for i, result in enumerate(pool_results):
+ if result != 0:
+ logger.error(
+ f"test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed to convert."
+ )
+ failed_counter += 1
+ else:
+ logger.info(f"test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} converted")
+ logger.info(
+ f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests successfully converted"
+ )
+
+ if failed_counter > 0:
+ logger.error(f"Stopping due to {failed_counter} test conversion errors")
+ raise (GenConformanceError())
+
+ logger.info("Converted tests to JSON and saved to output directory")
+
+ return output_dir
+
+
+def get_op_tests_selection(args, operator, op_build_dir, test_params, negative=False):
+ """Use test picker to get subsection of tests generated."""
+ assert operator in test_params
+ try:
+ op_params = test_params[operator]
+ op = Operator.registry[operator](
+ op_build_dir,
+ op_params,
+ negative,
+ exclude_types=PROFILE_OPS_INFO[args.profile]["exclude_types"],
+ )
+ except KeyError:
+ logger.error(f"{operator} operator is not supported by test_select")
+ raise (GenConformanceError())
+
+ return op.select_tests()
+
+
+def check_op_tests(args, operator, output_dir):
+ """Move test folders than contain files larger than 30MB to new directory."""
+ destination_dir = str(args.output_dir) + "_large_files"
+
+ tests = _get_all_tests_list(args.profile, output_dir, operator, include_all=True)
+ if not tests:
+ logger.error(
+ f"Couldn't find any tests to size check for {operator} in {output_dir}"
+ )
+ raise (GenConformanceError())
+
+ for tdir in tests:
+ move_dir = False
+ test_files = [file for file in tdir.glob("*")]
+ for file in test_files:
+ file_size = os.stat(file).st_size / 1024**2
+ if file_size > 30:
+ move_dir = True
+
+ if move_dir:
+ move_destination = destination_dir / tdir.relative_to(output_dir)
+ logger.warning(
+ f"{tdir.relative_to(output_dir)} contains files that are too large (>30MB), test moved to new folder: {destination_dir}"
+ )
+
+ if move_destination.is_dir():
+ logger.warning(
+ f"{move_destination} directory already exists, deleting existing."
+ )
+ shutil.rmtree(str(move_destination))
+ shutil.move(str(tdir), move_destination)
+
+
+def copy_rename_framework_tests(args, operator, test_picks):
+ """Copy framework tests into new folder and rename them if needed.
+
+ The tests are renamed to match the framework operator names if an
+ alternate name has been used instead.
+ """
+ framework_tests_dir = args.framework_tests_dir
+ new_tests_dir = args.build_dir / "frameworks" / operator
+ os.makedirs(new_tests_dir, exist_ok=True)
+
+ # Get the framework tests operator name
+ if "alternate_names" in test_picks[operator]:
+ alternate_names = test_picks[operator]["alternate_names"]
+ else:
+ alternate_names = [operator]
+
+ # Get the alternate named test directories for the operator
+ for alt_name in alternate_names:
+ test_prefix = f"test_{alt_name}"
+ test_dirs = list(framework_tests_dir.glob(f"{test_prefix}_*"))
+
+ # Copy tests to new directory and rename to match framework operator names
+ # - if there is just 1 alternate name, replace the full test prefix
+ # test_add_... -> add_...
+ # - if there are multiple alternate names, just replace the "test"
+ # test_concatv2_... -> concatenation_concatv2_...
+ old_prefix = test_prefix if len(alternate_names) == 1 else "test"
+
+ for tdir in test_dirs:
+ new_test_name = tdir.name.replace(old_prefix, operator)
+ copy_destination = new_tests_dir / new_test_name
+ logger.debug(f"copying test folder {tdir} to {copy_destination}")
+ copy_tree(str(tdir), str(copy_destination))
+
+ logger.info(f"Copied and renamed {len(test_dirs)} framework test folders")
+ return new_tests_dir.parent
+
+
+def get_framework_tests_selection(args, operator, test_picks, op_build_dir):
+ """Get the list of pre-chosen tests with relative paths."""
+ try:
+ tests = test_picks[operator]["tests"]
+ except KeyError:
+ logger.error(f"Framework test selection not defined for {operator} operator")
+ raise (GenConformanceError())
+
+ test_paths = [op_build_dir / operator / test for test in tests]
+ return test_paths
+
+
+def parse_args(argv=None):
+ """Parse the arguments."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--operators",
+ type=str,
+ nargs="*",
+ help="The operator(s) to create tests for, if not supplied all tests will be created",
+ )
+ parser.add_argument(
+ "--ref-model-directory",
+ dest="ref_model_dir",
+ type=Path,
+ required=True,
+ help="Reference Model directory (must be pre-built)",
+ )
+ script_dir = Path(__file__).parent.absolute()
+ parser.add_argument(
+ "--test-param-json-directory",
+ dest="param_json_dir",
+ type=Path,
+ default=script_dir,
+ help="Test parameters (ops info) JSON file directory",
+ )
+ parser.add_argument(
+ "--convert-all-tests",
+ action="store_true",
+ help="Converts all tests instead of those picked by test_select",
+ )
+ parser.add_argument(
+ "--keep-large-files",
+ action="store_true",
+ help="Keeps tests that contain files larger than 30MB in output directory",
+ )
+ parser.add_argument(
+ "--capture-output",
+ action="store_true",
+ help="Prints output of running sh commands",
+ )
+ parser.add_argument(
+ "--build-directory",
+ dest="build_dir",
+ type=Path,
+ default=Path.cwd() / "conformance_build",
+ help="Temporary build directory for files created during this process (default is conformance_build)",
+ )
+ parser.add_argument(
+ "--output-directory",
+ dest="output_dir",
+ type=Path,
+ default=Path.cwd() / "conformance",
+ help="Output directory (default is conformance)",
+ )
+ parser.add_argument(
+ "-j",
+ dest="num_cores",
+ type=int,
+ default=6,
+ help="Number of simultaneous jobs to split the tasks into for multiprocessing",
+ )
+ parser.add_argument(
+ "-v",
+ dest="verbosity",
+ action="count",
+ default=0,
+ help="Verbosity (can be used multiple times for more details)",
+ )
+ parser.add_argument(
+ "--unit-tests",
+ dest="unit_tests",
+ choices=["operator", "framework", "both"],
+ default="operator",
+ type=str,
+ help="Which unit tests are produced: operator, framework, or both",
+ )
+ parser.add_argument(
+ "--test-type",
+ dest="test_type",
+ choices=["positive", "negative", "both"],
+ default="both",
+ type=str,
+ help="Type of tests produced: positive, negative, or both",
+ )
+ profiles = list(PROFILE_OPS_INFO.keys())
+ parser.add_argument(
+ "--profile",
+ dest="profile",
+ choices=profiles,
+ default=profiles[0],
+ type=str,
+ help="TOSA profile",
+ )
+ parser.add_argument(
+ "--framework-tests-directory",
+ dest="framework_tests_dir",
+ type=Path,
+ default=Path.cwd() / "tests",
+ help="The pre-built framework tests directory (default is tests)",
+ )
+ parser.add_argument(
+ "--framework-schema",
+ dest="framework_schema",
+ type=Path,
+ help="Framework flatbuffers schema needed to convert framework models",
+ )
+ args = parser.parse_args(argv)
+
+ return args
+
+
+def main():
+ args = parse_args()
+
+ if not args.ref_model_dir.is_dir():
+ logger.error(
+ f"Missing or invalid reference model directory: {args.ref_model_dir}"
+ )
+ return 2
+ else:
+ ref_model = args.ref_model_dir / LOCATION_REF_MODEL_BINARY
+ if not ref_model.is_file():
+ logger.error(
+ f"{LOCATION_REF_MODEL_BINARY} not found in {args.ref_model_dir}\nHave you built the reference model?"
+ )
+ return 2
+ if args.unit_tests in ["framework", "both"]:
+ if not args.framework_schema:
+ logger.error(
+ "Need to supply location of Framework flatbuffers schema via --framework-schema"
+ )
+ return 2
+ if not args.framework_tests_dir.is_dir():
+ logger.error(
+ f"Missing or invalid framework tests directory: {args.framework_tests_dir}"
+ )
+ return 2
+
+ loglevels = (logging.WARNING, logging.INFO, logging.DEBUG)
+ loglevel = loglevels[min(args.verbosity, len(loglevels) - 1)]
+ logger.setLevel(loglevel)
+ # Set other loggers the same
+ logging.getLogger("test_select").setLevel(loglevel)
+ logging.getLogger("convert2conformance").setLevel(loglevel)
+
+ print(f"Creating conformance tests for TOSA {args.profile} profile")
+ print(f"Output directory: {args.output_dir}")
+
+ args.build_dir = args.build_dir.resolve()
+ logger.debug(f"Creating build directory: {args.build_dir}")
+ args.build_dir.mkdir(parents=True, exist_ok=True)
+
+ try:
+ # Framework unit tests
+ if args.unit_tests in ["framework", "both"]:
+ logger.debug("Creating FRAMEWORK unit tests")
+ test_picks_file = (
+ args.param_json_dir / PROFILE_OPS_INFO[args.profile]["framework_tests"]
+ )
+ try:
+ with open(test_picks_file, "r") as fd:
+ test_picks = json.load(fd)
+ except Exception as e:
+ logger.error(
+ f"Couldn't load framework tests info - {test_picks_file}: {e}"
+ )
+ return 1
+
+ operators = args.operators
+ if not operators:
+ # Create tests for all the operators
+ operators = list(test_picks.keys())
+
+ root_output_dir = args.output_dir / "frameworks" / "tflite" / "operators"
+ for op in operators:
+ if op not in test_picks:
+ logger.warning(
+ f"Framework op {op} not found in {test_picks_file} - skipping"
+ )
+ continue
+
+ logger.debug(f"Copying and renaming {op}")
+ framework_test_dir = copy_rename_framework_tests(args, op, test_picks)
+ if args.convert_all_tests:
+ logger.debug("Running and converting all framework tests")
+ convert_tests(
+ args,
+ op,
+ framework_test_dir,
+ root_output_dir,
+ trim_op_subdir=True,
+ )
+ else:
+ framework_tests = get_framework_tests_selection(
+ args, op, test_picks, framework_test_dir
+ )
+ convert_tests(
+ args,
+ op,
+ framework_test_dir,
+ root_output_dir,
+ tests=framework_tests,
+ trim_op_subdir=True,
+ )
+
+ # Operator unit tests
+ if args.unit_tests in ["operator", "both"]:
+ logger.debug("Creating OPERATOR unit tests")
+ test_params_file = (
+ args.param_json_dir
+ / PROFILE_OPS_INFO[args.profile]["operator_test_params"]
+ )
+ try:
+ with open(test_params_file, "r") as fd:
+ test_params = json.load(fd)
+ except Exception as e:
+ logger.error(
+ f"Couldn't load operator test params - {test_params_file}: {e}"
+ )
+ return 1
+
+ operators = args.operators
+ if not operators:
+ # Create tests for all the operators
+ operators = list(test_params.keys())
+
+ for op in operators:
+ if op not in test_params:
+ logger.warning(
+ f"{op} operator parameters not found in {test_params_file} - skipping"
+ )
+ continue
+
+ if (
+ args.test_type == "negative"
+ and "no_negative_tests" in test_params[op]
+ and test_params[op]["no_negative_tests"]
+ ):
+ logger.warning(f"No negative tests for {op}")
+ continue
+
+ op_build_dir = build_op_tests(args, op, test_params)
+
+ operator_group = test_params[op]["group"]
+ root_output_dir = args.output_dir / "operators"
+ if args.convert_all_tests:
+ logger.debug(f"Running and converting all {op} tests")
+ generate_results(args, op, op_build_dir)
+ output_dir = convert_tests(
+ args, op, op_build_dir, root_output_dir, group=operator_group
+ )
+ else:
+ if args.test_type in ["positive", "both"]:
+ tests_gen1, tests_gen2 = tee(
+ get_op_tests_selection(args, op, op_build_dir, test_params)
+ )
+ generate_results(args, op, op_build_dir, tests_gen1)
+ output_dir = convert_tests(
+ args,
+ op,
+ op_build_dir,
+ root_output_dir,
+ tests=tests_gen2,
+ group=operator_group,
+ )
+ if args.test_type in ["negative", "both"] and (
+ "no_negative_tests" not in test_params[op]
+ or not test_params[op]["no_negative_tests"]
+ ):
+ negative_tests = get_op_tests_selection(
+ args, op, op_build_dir, test_params, negative=True
+ )
+ output_dir = convert_tests(
+ args,
+ op,
+ op_build_dir,
+ root_output_dir,
+ tests=negative_tests,
+ group=operator_group,
+ )
+ if not args.keep_large_files:
+ check_op_tests(args, op, output_dir)
+ except GenConformanceError:
+ return 1
+
+ return 0
+
+
+if __name__ == "__main__":
+ exit(main())