aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorevacha01 <evan.chandler@arm.com>2024-01-26 12:25:32 +0000
committerevacha01 <evan.chandler@arm.com>2024-02-06 13:48:12 +0000
commit9847722e2b172b69fe9ae80a05c27ca5c8c36617 (patch)
tree3a3e2ad19561d3a4cadceb9fea8fa81ba37857fe
parent870662b8cbd946d2f898f05cad4fde3c9333d11d (diff)
downloadreference_model-9847722e2b172b69fe9ae80a05c27ca5c8c36617.tar.gz
Main Compliance testing for TRANSPOSE, REVERSE, and CONST
Signed-off-by: evacha01 <evan.chandler@arm.com> Change-Id: I95b931c032ce16c56ee05caab4dd26ea89557b3c
-rw-r--r--reference_model/src/generate/generate_utils.cc3
-rw-r--r--verif/conformance/tosa_main_profile_ops_info.json19
-rw-r--r--verif/generator/tosa_arg_gen.py11
-rw-r--r--verif/generator/tosa_test_gen.py59
4 files changed, 69 insertions, 23 deletions
diff --git a/reference_model/src/generate/generate_utils.cc b/reference_model/src/generate/generate_utils.cc
index 9807336..a8b472a 100644
--- a/reference_model/src/generate/generate_utils.cc
+++ b/reference_model/src/generate/generate_utils.cc
@@ -47,6 +47,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(Op,
{ Op::Op_CEIL, "CEIL" },
{ Op::Op_CLAMP, "CLAMP" },
{ Op::Op_CONCAT, "CONCAT" },
+ { Op::Op_CONST, "CONST" },
{ Op::Op_CONV2D, "CONV2D" },
{ Op::Op_DEPTHWISE_CONV2D, "DEPTHWISE_CONV2D" },
{ Op::Op_EQUAL, "EQUAL" },
@@ -74,6 +75,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(Op,
{ Op::Op_REDUCE_MIN, "REDUCE_MIN" },
{ Op::Op_REDUCE_PRODUCT, "REDUCE_PRODUCT" },
{ Op::Op_REDUCE_SUM, "REDUCE_SUM" },
+ { Op::Op_REVERSE, "REVERSE" },
{ Op::Op_SCATTER, "SCATTER" },
{ Op::Op_SELECT, "SELECT" },
{ Op::Op_SIGMOID, "SIGMOID" },
@@ -81,6 +83,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(Op,
{ Op::Op_SUB, "SUB" },
{ Op::Op_TANH, "TANH" },
{ Op::Op_TILE, "TILE" },
+ { Op::Op_TRANSPOSE, "TRANSPOSE" },
{ Op::Op_TRANSPOSE_CONV2D, "TRANSPOSE_CONV2D" },
})
diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json
index 266af4d..5e35e8b 100644
--- a/verif/conformance/tosa_main_profile_ops_info.json
+++ b/verif/conformance/tosa_main_profile_ops_info.json
@@ -544,6 +544,7 @@
"profile": [
"tosa-mi"
],
+ "support_for": [ "lazy_data_gen" ],
"generation": {
"standard": {
"no_negative_tests": "true",
@@ -556,7 +557,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-rank",
"1",
"--target-rank",
@@ -572,7 +573,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
"1,16",
"--target-rank",
@@ -584,7 +585,7 @@
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,65540,1,1,1",
"--target-shape",
@@ -1905,6 +1906,7 @@
"profile": [
"tosa-mi"
],
+ "support_for": [ "lazy_data_gen" ],
"generation": {
"standard": {
"generator_args": [
@@ -1916,7 +1918,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
"1,48"
],
@@ -1924,7 +1926,7 @@
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,1,65537",
"--target-shape",
@@ -2104,6 +2106,7 @@
"profile": [
"tosa-mi"
],
+ "support_for": [ "lazy_data_gen" ],
"generation": {
"standard": {
"generator_args": [
@@ -2115,7 +2118,7 @@
"--target-dtype",
"fp16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-rank",
"1",
"--target-rank",
@@ -2133,7 +2136,7 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-rank",
"3",
"--target-rank",
@@ -2147,7 +2150,7 @@
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,1,65539,2",
"--target-shape",
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 7825445..b4939da 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -2907,9 +2907,18 @@ class TosaArgGen:
# Create list of required amount of permutations
arg_list = [
- ("perm{}".format(p), [random_permutations[p].tolist()])
+ ("perm{}".format(p), {"perms": random_permutations[p].tolist()})
for p in range(limit)
]
+ # Now add data generator types
+ arg_list = TosaArgGen._add_data_generators(
+ testGen,
+ opName,
+ dtype,
+ arg_list,
+ error_name,
+ )
+ # Return list of tuples: (arg_str, args_dict)
return arg_list
@staticmethod
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 9cb0ce2..bfafd23 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -1666,15 +1666,23 @@ class TosaTestGen:
self.ser.addOperator(op["op"], input_list, output_list, attr)
return TosaTestGen.BuildInfo(result_tensor, None)
- def build_transpose(self, op, a, perms, validator_fcns=None, error_name=None):
- result_tens = OutputShaper.transposeOp(self.ser, self.rng, a, perms, error_name)
+ def build_transpose(
+ self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+ ):
+ assert len(inputs) == 1
+ a = inputs[0]
+ perms = args_dict["perms"]
+
+ result_tensor = OutputShaper.transposeOp(
+ self.ser, self.rng, a, perms, error_name
+ )
attr = ts.TosaSerializerAttribute()
attr.TransposeAttribute(perms)
# Invalidate Input/Output list for error if checks.
input_list = [a.name]
- output_list = [result_tens.name]
+ output_list = [result_tensor.name]
pCount, cCount = op["operands"]
num_operands = pCount + cCount
input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1687,11 +1695,11 @@ class TosaTestGen:
error_name,
op=op,
input_shape=a.shape,
- output_shape=result_tens.shape,
+ output_shape=result_tensor.shape,
perms=perms,
input_dtype=a.dtype,
- output_dtype=result_tens.dtype,
- result_tensors=[result_tens],
+ output_dtype=result_tensor.dtype,
+ result_tensors=[result_tensor],
input_list=input_list,
output_list=output_list,
num_operands=num_operands,
@@ -1700,7 +1708,12 @@ class TosaTestGen:
return None
self.ser.addOperator(op["op"], input_list, output_list, attr)
- return result_tens
+
+ compliance = self.tensorComplianceMetaData(
+ op, a.dtype, args_dict, result_tensor, error_name
+ )
+
+ return TosaTestGen.BuildInfo(result_tensor, compliance)
def build_slice(
self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
@@ -1954,9 +1967,18 @@ class TosaTestGen:
)
return result_tens
- def build_const(self, op, val, validator_fcns=None, error_name=None):
+ def build_const(
+ self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+ ):
+ assert len(inputs) == 1
+ val = inputs[0]
self.ser.addOutputTensor(val)
- return val
+
+ compliance = self.tensorComplianceMetaData(
+ op, val.dtype, args_dict, val, error_name
+ )
+
+ return TosaTestGen.BuildInfo(val, compliance)
# Type Conversion
def build_cast(
@@ -4356,6 +4378,9 @@ class TosaTestGen:
TosaErrorValidator.evWrongInputList,
TosaErrorValidator.evWrongOutputList,
),
+ "data_gen": {
+ "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+ },
},
"slice": {
"op": Op.SLICE,
@@ -4419,7 +4444,7 @@ class TosaTestGen:
"build_fcn": (
build_transpose,
TosaTensorGen.tgBasic,
- TosaTensorValuesGen.tvgDefault,
+ TosaTensorValuesGen.tvgLazyGenDefault,
TosaArgGen.agTranspose,
),
"types": TYPE_FIB,
@@ -4434,6 +4459,9 @@ class TosaTestGen:
TosaErrorValidator.evRankMismatch,
TosaErrorValidator.evTensorSizeInputOutputMismatch,
),
+ "data_gen": {
+ "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+ },
},
# Data nodes
"const": {
@@ -4442,10 +4470,13 @@ class TosaTestGen:
"build_fcn": (
build_const,
TosaTensorGen.tgBasic,
- TosaTensorValuesGen.tvgDefault,
- None,
+ TosaTensorValuesGen.tvgLazyGenDefault,
+ TosaArgGen.agNone,
),
"types": TYPE_FIB + [DType.INT48],
+ "data_gen": {
+ "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+ },
},
"identity": {
"op": Op.IDENTITY,
@@ -4788,8 +4819,8 @@ class TosaTestGen:
"build_fcn": (
build_const,
TosaTensorGen.tgBasic,
- TosaTensorValuesGen.tvgDefault,
- None,
+ TosaTensorValuesGen.tvgLazyGenDefault,
+ TosaArgGen.agNone,
),
"types": [DType.SHAPE],
},