aboutsummaryrefslogtreecommitdiff
path: root/verif
diff options
context:
space:
mode:
authorJeremy Johnson <jeremy.johnson@arm.com>2023-12-07 16:35:28 +0000
committerEric Kunze <eric.kunze@arm.com>2023-12-14 17:56:51 +0000
commita8420add949564053495ef78f3213f163c30fb9a (patch)
tree4c5e2783433e9443b2ed02e5e25c51cc5de2affd /verif
parent81db5d2f275f69cc0d3e8687af57bdba99971042 (diff)
downloadreference_model-a8420add949564053495ef78f3213f163c30fb9a.tar.gz
Main Compliance testing for SCATTER and GATHER
Added indices shuffling and random INT32 support to generate lib with testing of these new random generator modes Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com> Change-Id: I058d8b092470228075e8fe69c2ededa639163003
Diffstat (limited to 'verif')
-rw-r--r--verif/conformance/tosa_base_profile_ops_info.json10
-rw-r--r--verif/conformance/tosa_main_profile_ops_info.json20
-rw-r--r--verif/generator/datagenerator.py4
-rw-r--r--verif/generator/tosa_arg_gen.py150
-rw-r--r--verif/generator/tosa_error_if.py4
-rw-r--r--verif/generator/tosa_test_gen.py106
6 files changed, 218 insertions, 76 deletions
diff --git a/verif/conformance/tosa_base_profile_ops_info.json b/verif/conformance/tosa_base_profile_ops_info.json
index 3a8622b..b186b06 100644
--- a/verif/conformance/tosa_base_profile_ops_info.json
+++ b/verif/conformance/tosa_base_profile_ops_info.json
@@ -1502,11 +1502,15 @@
"--target-dtype",
"int32",
"--tensor-dim-range",
- "4,64"
+ "4,64",
+ "--max-batch-size",
+ "64"
],
[
"--target-dtype",
"int8",
+ "--tensor-dim-range",
+ "5,20",
"--target-shape",
"2,65533,1",
"--target-shape",
@@ -3229,7 +3233,9 @@
"--target-dtype",
"int32",
"--tensor-dim-range",
- "4,64"
+ "4,64",
+ "--max-batch-size",
+ "64"
],
[
"--target-dtype",
diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json
index c3bd6ee..fb25622 100644
--- a/verif/conformance/tosa_main_profile_ops_info.json
+++ b/verif/conformance/tosa_main_profile_ops_info.json
@@ -1133,6 +1133,7 @@
"profile": [
"tosa-mi"
],
+ "support_for": [ "lazy_data_gen" ],
"generation": {
"standard": {
"negative_dim_range": "1,10",
@@ -1145,15 +1146,19 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
- "5,72"
+ "5,72",
+ "--max-batch-size",
+ "72"
],
[
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
+ "--tensor-dim-range",
+ "5,20",
"--target-shape",
"2,65536,1",
"--target-shape",
@@ -1992,6 +1997,7 @@
"profile": [
"tosa-mi"
],
+ "support_for": [ "lazy_data_gen" ],
"generation": {
"standard": {
"negative_dim_range": "1,10",
@@ -2004,15 +2010,17 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
- "5,56"
+ "5,56",
+ "--max-batch-size",
+ "56"
],
[
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,65541,1",
"--target-shape",
diff --git a/verif/generator/datagenerator.py b/verif/generator/datagenerator.py
index 0dd60e5..b5ef35d 100644
--- a/verif/generator/datagenerator.py
+++ b/verif/generator/datagenerator.py
@@ -78,6 +78,10 @@ class GenerateLibrary:
size_bytes = size * 2
# Create buffer of bytes and initialize to zero
buffer = (ct.c_ubyte * size_bytes)(0)
+ elif dtype == "INT32":
+ # Create buffer and initialize to zero
+ buffer = (ct.c_int32 * size)(0)
+ size_bytes = size * 4
else:
raise GenerateError(f"Unsupported data type {dtype}")
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 35253e0..50811ac 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -204,7 +204,7 @@ class TosaTensorGen:
return shape_list
@staticmethod
- def tgScatter(testGen, opName, rank, error_name=None):
+ def tgGather(testGen, opName, rank, error_name=None):
pl, const = opName["operands"]
assert pl == 2
@@ -212,12 +212,31 @@ class TosaTensorGen:
if error_name != ErrorIf.WrongRank:
assert rank == 3
+ values_shape = testGen.makeShape(rank)
+ values_shape = testGen.constrictBatchSize(values_shape)
+
+ N = values_shape[0]
+ W = testGen.makeDimension()
+ indices_shape = [N, W]
+
+ shape_list = [values_shape, indices_shape]
+ return shape_list
+
+ @staticmethod
+ def tgScatter(testGen, opName, rank, error_name=None):
+ pl, const = opName["operands"]
+
+ assert pl == 3
+ assert const == 0
+ if error_name != ErrorIf.WrongRank:
+ assert rank == 3
+
values_in_shape = testGen.makeShape(rank)
- K = values_in_shape[1]
+ values_in_shape = testGen.constrictBatchSize(values_in_shape)
- # ignore max batch size if target shape is set
- if testGen.args.max_batch_size and not testGen.args.target_shapes:
- values_in_shape[0] = min(values_in_shape[0], testGen.args.max_batch_size)
+ N = values_in_shape[0]
+ K = values_in_shape[1]
+ C = values_in_shape[2]
# Make sure W is not greater than K, as we can only write each output index
# once (having a W greater than K means that you have to repeat a K index)
@@ -225,11 +244,12 @@ class TosaTensorGen:
W_max = min(testGen.args.tensor_shape_range[1], K)
W = testGen.randInt(W_min, W_max) if W_min < W_max else W_min
- input_shape = [values_in_shape[0], W, values_in_shape[2]]
+ input_shape = [N, W, C]
shape_list = []
- shape_list.append(values_in_shape.copy())
- shape_list.append(input_shape.copy())
+ shape_list.append(values_in_shape)
+ shape_list.append([N, W]) # indices
+ shape_list.append(input_shape)
return shape_list
@@ -695,6 +715,13 @@ class TosaTensorValuesGen:
"round" in argsDict["data_range_list"][idx]
and argsDict["data_range_list"][idx]["round"] is True
)
+ if data_range is not None and dtype not in (
+ DType.FP16,
+ DType.FP32,
+ DType.BF16,
+ ):
+ # Change from inclusive to exclusive range
+ data_range = (data_range[0], data_range[1] + 1)
# Ignore lazy data gen option and create data array using any range limits
arr = testGen.getRandTensor(shape, dtype, data_range)
if roundMode:
@@ -732,13 +759,15 @@ class TosaTensorValuesGen:
# TODO - generate seed for this generator based on test
info["rng_seed"] = 42
+ data_range = None
if "data_range_list" in argsDict:
data_range = argsDict["data_range_list"][idx]["range"]
if "round" in argsDict["data_range_list"][idx]:
info["round"] = argsDict["data_range_list"][idx]["round"]
elif "data_range" in argsDict:
data_range = argsDict["data_range"]
- else:
+
+ if data_range is None:
data_range = testGen.getDTypeRange(
dtypeList[idx], high_inclusive=True
)
@@ -1455,6 +1484,109 @@ class TosaTensorValuesGen:
testGen, opName, dtypeList, shapeList, argsDict, error_name
)
+ @staticmethod
+ def tvgGather(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+ K = shapeList[0][1]
+
+ # Fix the type of the indices tensor
+ dtypeList[1] = DType.INT32
+
+ dtype = dtypeList[0]
+ if not gtu.dtypeIsSupportedByCompliance(dtype):
+ # Test unsupported by data generator
+ op = testGen.TOSA_OP_LIST[opName]
+ pCount, cCount = op["operands"]
+ assert (
+ pCount == 2 and cCount == 0
+ ), "Op.GATHER must have 2 placeholders, 0 consts"
+
+ tens_ser_list = []
+ for idx, shape in enumerate(shapeList):
+ dtype = dtypeList[idx]
+ if idx != 1:
+ arr = testGen.getRandTensor(shape, dtype)
+ tens_ser_list.append(testGen.ser.addPlaceholder(shape, dtype, arr))
+ else:
+ # Limit data range of indices tensor upto K (exclusive)
+ arr = testGen.getRandTensor(shape, dtype, (0, K))
+ # To match old functionality - create indices as CONST
+ tens_ser_list.append(testGen.ser.addConst(shape, dtype, arr))
+
+ return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
+
+ else:
+ # ERROR_IF or floating point test
+ # Use inclusive values upto index K for indices tensor
+ data_range_list = (
+ {"range": None},
+ {"range": (0, K - 1)},
+ )
+ argsDict["data_range_list"] = data_range_list
+
+ return TosaTensorValuesGen.tvgLazyGenDefault(
+ testGen, opName, dtypeList, shapeList, argsDict, error_name
+ )
+
+ @staticmethod
+ def tvgScatter(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+ K = shapeList[0][1]
+ W = shapeList[2][1]
+
+ # Work out an indices tensor here with data that doesn't exceed the
+ # dimension K of the values_in tensor and does NOT repeat the same K
+ # location as needed by the spec:
+ # "It is not permitted to repeat the same output index within a single
+ # SCATTER operation and so each output index occurs at most once."
+ assert K >= W, "Op.SCATTER W must be smaller or equal to K"
+
+ # Fix the type of the indices tensor
+ dtypeList[1] = DType.INT32
+
+ dtype = dtypeList[0]
+ if not gtu.dtypeIsSupportedByCompliance(dtype):
+ # Test unsupported by data generator
+ op = testGen.TOSA_OP_LIST[opName]
+ pCount, cCount = op["operands"]
+ assert (
+ pCount == 3 and cCount == 0
+ ), "Op.SCATTER must have 3 placeholders, 0 consts"
+
+ tens_ser_list = []
+ for idx, shape in enumerate(shapeList):
+ dtype = dtypeList[idx]
+ if idx != 1:
+ arr = testGen.getRandTensor(shape, dtype)
+ tens_ser_list.append(testGen.ser.addPlaceholder(shape, dtype, arr))
+ else:
+ # Create the indices array
+ assert dtype == DType.INT32, "Op.SCATTER unexpected indices type"
+ arr = []
+ for n in range(shape[0]):
+ # Get a shuffled list of output indices (0 to K-1) and
+ # limit length to W
+ arr.append(testGen.rng.permutation(K)[:W])
+ indices_arr = np.array(arr, dtype=np.int32) # (N, W)
+ # To match old functionality - create indices as CONST
+ tens_ser_list.append(
+ testGen.ser.addConst(shape, dtype, indices_arr)
+ )
+
+ return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
+
+ else:
+ # ERROR_IF or floating point test
+ # Use inclusive values upto index K for indices tensor
+ data_range_list = (
+ {"range": None},
+ {"range": (0, K - 1)},
+ {"range": None},
+ )
+ argsDict["data_range_list"] = data_range_list
+
+ return TosaTensorValuesGen.tvgLazyGenDefault(
+ testGen, opName, dtypeList, shapeList, argsDict, error_name
+ )
+
class TosaArgGen:
"""Argument generators create exhaustive or random lists of attributes for
diff --git a/verif/generator/tosa_error_if.py b/verif/generator/tosa_error_if.py
index 86be347..5dd785f 100644
--- a/verif/generator/tosa_error_if.py
+++ b/verif/generator/tosa_error_if.py
@@ -666,12 +666,8 @@ class TosaErrorValidator:
error_reason = "Op input list does not match expected input"
if check:
- op = kwargs["op"]
input_list = kwargs["input_list"]
num_operands = kwargs["num_operands"]
- if op["op"] in [Op.SCATTER, Op.GATHER]:
- # SCATTER/GATHER add an indices input tensor in their build functions
- num_operands += 1
if len(input_list) != num_operands:
error_result = True
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 53b0b75..ee935d4 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -66,6 +66,12 @@ class TosaTestGen:
v = maxFP
elif v == "-max":
v = -maxFP
+ elif v < 0:
+ # Trim to minimum data type value
+ v = max(v, -maxFP)
+ elif v > 0:
+ # Trim to maximum data type value
+ v = min(v, maxFP)
vals.append(v)
return tuple(sorted(vals))
@@ -1722,27 +1728,19 @@ class TosaTestGen:
self.ser.addOperator(op["op"], input_list, output_list, attr)
return result_tens
- def build_gather(self, op, values, validator_fcns=None, error_name=None):
-
- # Create a new indicies tensor
- # here with data that doesn't exceed the dimensions of the values tensor
-
- K = values.shape[1] # K
- W = self.randInt(
- self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
- ) # W
- indicies_arr = np.int32(
- self.rng.integers(low=0, high=K, size=[values.shape[0], W])
- ) # (N, W)
- indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
+ def build_gather(
+ self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+ ):
+ assert len(inputs) == 2
+ values, indices = inputs
- result_tens = OutputShaper.gatherOp(
- self.ser, self.rng, values, indicies, error_name
+ result_tensor = OutputShaper.gatherOp(
+ self.ser, self.rng, values, indices, error_name
)
# Invalidate Input/Output list for error if checks.
- input_list = [values.name, indicies.name]
- output_list = [result_tens.name]
+ input_list = [values.name, indices.name]
+ output_list = [result_tensor.name]
pCount, cCount = op["operands"]
num_operands = pCount + cCount
input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1755,10 +1753,10 @@ class TosaTestGen:
error_name,
op=op,
input_shape=values.shape,
- output_shape=result_tens.shape,
+ output_shape=result_tensor.shape,
input_dtype=values.dtype,
- output_dtype=result_tens.dtype,
- result_tensors=[result_tens],
+ output_dtype=result_tensor.dtype,
+ result_tensors=[result_tensor],
input_list=input_list,
output_list=output_list,
num_operands=num_operands,
@@ -1767,33 +1765,24 @@ class TosaTestGen:
self.ser.addOperator(op["op"], input_list, output_list)
- return result_tens
+ compliance = self.tensorComplianceMetaData(
+ op, values.dtype, args_dict, result_tensor, error_name
+ )
- def build_scatter(self, op, values_in, input, validator_fcns=None, error_name=None):
-
- K = values_in.shape[1] # K
- W = input.shape[1] # W
-
- # Create an indices tensor here with data that doesn't exceed the
- # dimension K of the values_in tensor and does NOT repeat the same K
- # location as needed by the spec:
- # "It is not permitted to repeat the same output index within a single
- # SCATTER operation and so each output index occurs at most once."
- assert K >= W
- arr = []
- for n in range(values_in.shape[0]):
- # Get a shuffled list of output indices and limit it to size W
- arr.append(self.rng.permutation(K)[:W])
- indices_arr = np.array(arr, dtype=np.int32) # (N, W)
- indices = self.ser.addConst(indices_arr.shape, DType.INT32, indices_arr)
-
- result_tens = OutputShaper.scatterOp(
+ return TosaTestGen.BuildInfo(result_tensor, compliance)
+
+ def build_scatter(
+ self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+ ):
+ assert len(inputs) == 3
+ values_in, indices, input = inputs
+ result_tensor = OutputShaper.scatterOp(
self.ser, self.rng, values_in, indices, input, error_name
)
# Invalidate Input/Output list for error if checks.
input_list = [values_in.name, indices.name, input.name]
- output_list = [result_tens.name]
+ output_list = [result_tensor.name]
pCount, cCount = op["operands"]
num_operands = pCount + cCount
input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1806,10 +1795,10 @@ class TosaTestGen:
error_name,
op=op,
input_shape=values_in.shape,
- output_shape=result_tens.shape,
+ output_shape=result_tensor.shape,
input_dtype=values_in.dtype,
- output_dtype=result_tens.dtype,
- result_tensors=[result_tens],
+ output_dtype=result_tensor.dtype,
+ result_tensors=[result_tensor],
input_list=input_list,
output_list=output_list,
num_operands=num_operands,
@@ -1818,7 +1807,11 @@ class TosaTestGen:
self.ser.addOperator(op["op"], input_list, output_list)
- return result_tens
+ compliance = self.tensorComplianceMetaData(
+ op, values_in.dtype, args_dict, result_tensor, error_name
+ )
+
+ return TosaTestGen.BuildInfo(result_tensor, compliance)
def build_resize(
self,
@@ -4310,14 +4303,13 @@ class TosaTestGen:
# Scatter/Gather
"gather": {
"op": Op.GATHER,
- # Only specify 'values' tensor here. 'indices' is generated in op building stage
- "operands": (1, 0),
+ "operands": (2, 0),
"rank": (3, 3),
"build_fcn": (
build_gather,
- TosaTensorGen.tgBasic,
- TosaTensorValuesGen.tvgDefault,
- None,
+ TosaTensorGen.tgGather,
+ TosaTensorValuesGen.tvgGather,
+ TosaArgGen.agNone,
),
"types": (
DType.INT8,
@@ -4334,18 +4326,19 @@ class TosaTestGen:
TosaErrorValidator.evWrongOutputList,
TosaErrorValidator.evWrongRank,
),
+ "data_gen": {
+ "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+ },
},
"scatter": {
"op": Op.SCATTER,
- # Only specify 'values_in' tensor here.
- # 'indices' and 'input' are generated in op building stage
- "operands": (2, 0),
+ "operands": (3, 0),
"rank": (3, 3),
"build_fcn": (
build_scatter,
TosaTensorGen.tgScatter,
- TosaTensorValuesGen.tvgDefault,
- None,
+ TosaTensorValuesGen.tvgScatter,
+ TosaArgGen.agNone,
),
"types": TYPE_INT_FP,
"error_if_validators": (
@@ -4355,6 +4348,9 @@ class TosaTestGen:
TosaErrorValidator.evWrongOutputList,
TosaErrorValidator.evWrongRank,
),
+ "data_gen": {
+ "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+ },
},
# Image operations
"resize": {