aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Johnson <jeremy.johnson@arm.com>2023-12-07 16:35:28 +0000
committerEric Kunze <eric.kunze@arm.com>2023-12-14 17:56:51 +0000
commita8420add949564053495ef78f3213f163c30fb9a (patch)
tree4c5e2783433e9443b2ed02e5e25c51cc5de2affd
parent81db5d2f275f69cc0d3e8687af57bdba99971042 (diff)
downloadreference_model-a8420add949564053495ef78f3213f163c30fb9a.tar.gz
Main Compliance testing for SCATTER and GATHER
Added indices shuffling and random INT32 support to generate lib with testing of these new random generator modes Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com> Change-Id: I058d8b092470228075e8fe69c2ededa639163003
-rw-r--r--reference_model/src/generate/generate_pseudo_random.cc132
-rw-r--r--reference_model/src/generate/generate_utils.cc2
-rw-r--r--reference_model/test/generate_tests.cpp75
-rw-r--r--verif/conformance/tosa_base_profile_ops_info.json10
-rw-r--r--verif/conformance/tosa_main_profile_ops_info.json20
-rw-r--r--verif/generator/datagenerator.py4
-rw-r--r--verif/generator/tosa_arg_gen.py150
-rw-r--r--verif/generator/tosa_error_if.py4
-rw-r--r--verif/generator/tosa_test_gen.py106
9 files changed, 425 insertions, 78 deletions
diff --git a/reference_model/src/generate/generate_pseudo_random.cc b/reference_model/src/generate/generate_pseudo_random.cc
index b62c38f..865483b 100644
--- a/reference_model/src/generate/generate_pseudo_random.cc
+++ b/reference_model/src/generate/generate_pseudo_random.cc
@@ -15,6 +15,7 @@
#include "generate_utils.h"
#include "half.hpp"
+#include <algorithm>
#include <array>
#include <iterator>
#include <limits>
@@ -27,7 +28,7 @@
namespace
{
-// Random generator
+// Random FP generator
template <typename FP>
class PseudoRandomGeneratorFloat
{
@@ -127,6 +128,123 @@ bool generateFP(const TosaReference::GenerateConfig& cfg, DataType* data, size_t
return true;
}
+// Random INT generator
+template <typename INT>
+class PseudoRandomGeneratorInteger
+{
+public:
+ PseudoRandomGeneratorInteger(uint64_t seed)
+ : _gen(seed)
+ {
+ constexpr auto min = std::numeric_limits<INT>::min();
+ constexpr auto max = std::numeric_limits<INT>::max();
+
+ setDistribution(min, max);
+ }
+
+ PseudoRandomGeneratorInteger(uint64_t seed, INT min, INT max)
+ : _gen(seed)
+ {
+ setDistribution(min, max);
+ }
+
+ INT getRandomInteger()
+ {
+ return _unidis(_gen);
+ }
+
+ INT getRandomInteger(INT min, INT max)
+ {
+ typename std::uniform_int_distribution<INT>::param_type range(min, max);
+ return _unidis(_gen, range);
+ }
+
+private:
+ void setDistribution(INT min, INT max)
+ {
+ _unidis = std::uniform_int_distribution<INT>(min, max);
+ }
+
+ std::mt19937 _gen;
+ std::uniform_int_distribution<INT> _unidis;
+};
+
+template <typename DataType>
+bool shuffleINTbyRow(const TosaReference::GenerateConfig& cfg, DataType* data, size_t size)
+{
+ const TosaReference::PseudoRandomInfo& prinfo = cfg.pseudoRandomInfo;
+ PseudoRandomGeneratorInteger<DataType>* generator;
+
+ if (cfg.shape.size() != 2)
+ {
+ WARNING("[Generator][PR][INT] Shuffle only supports 2 dimensional tensors.");
+ return false;
+ }
+ if (prinfo.range.size() != 2)
+ {
+ WARNING("[Generator][PR][INT] Cannot create un-ranged shuffle data.");
+ return false;
+ }
+
+ const int32_t min = std::stoi(prinfo.range[0]);
+ const int32_t max = std::stoi(prinfo.range[1]);
+ generator = new PseudoRandomGeneratorInteger<DataType>(prinfo.rngSeed, min, max);
+
+ // Work out inclusive range
+ const auto range = std::abs(max - min) + 1;
+ const auto N = cfg.shape[0]; // Number of rows
+ const auto W = cfg.shape[1]; // Width of rows
+ if (W > range)
+ {
+ WARNING("[Generator][PR][INT] Cannot fill data size %d with given shuffle range %d.", W, range);
+ return false;
+ }
+
+ std::vector<DataType> numbers(range);
+ for (int n = 0; n < N; ++n)
+ {
+ // Fill in the numbers in range
+ std::iota(numbers.begin(), numbers.end(), min);
+
+ // Perform random shuffling
+ for (auto num = numbers.begin(); num < numbers.end(); ++num)
+ {
+ std::swap(*num, numbers[generator->getRandomInteger()]);
+ }
+ // Copy amount of data required
+ for (auto w = 0; w < W; ++w)
+ {
+ data[(n * W) + w] = numbers[w];
+ }
+ }
+ return true;
+}
+
+template <typename DataType>
+bool generateINT(const TosaReference::GenerateConfig& cfg, DataType* data, size_t size)
+{
+ const TosaReference::PseudoRandomInfo& prinfo = cfg.pseudoRandomInfo;
+ PseudoRandomGeneratorInteger<DataType>* generator;
+
+ const auto T = TosaReference::numElementsFromShape(cfg.shape);
+
+ if (prinfo.range.size() == 2)
+ {
+ const int32_t min = std::stoi(prinfo.range[0]);
+ const int32_t max = std::stoi(prinfo.range[1]);
+ generator = new PseudoRandomGeneratorInteger<DataType>(prinfo.rngSeed, min, max);
+ }
+ else
+ {
+ generator = new PseudoRandomGeneratorInteger<DataType>(prinfo.rngSeed);
+ }
+
+ for (auto t = 0; t < T; ++t)
+ {
+ data[t] = generator->getRandomInteger();
+ }
+ return true;
+}
} // namespace
namespace TosaReference
@@ -155,6 +273,18 @@ bool generatePseudoRandom(const GenerateConfig& cfg, void* data, size_t size)
half_float::half* outData = reinterpret_cast<half_float::half*>(data);
return generateFP(cfg, outData, size);
}
+ case DType::DType_INT32: {
+ int32_t* outData = reinterpret_cast<int32_t*>(data);
+ if (cfg.opType == Op::Op_SCATTER && cfg.inputPos == 1)
+ {
+ // Indices for SCATTER must not repeat - perform data shuffle
+ return shuffleINTbyRow(cfg, outData, size);
+ }
+ else
+ {
+ return generateINT(cfg, outData, size);
+ }
+ }
default:
WARNING("[Generator][PR] Unsupported type.");
return false;
diff --git a/reference_model/src/generate/generate_utils.cc b/reference_model/src/generate/generate_utils.cc
index bdc840d..d31048f 100644
--- a/reference_model/src/generate/generate_utils.cc
+++ b/reference_model/src/generate/generate_utils.cc
@@ -51,6 +51,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(Op,
{ Op::Op_EXP, "EXP" },
{ Op::Op_FLOOR, "FLOOR" },
{ Op::Op_FULLY_CONNECTED, "FULLY_CONNECTED" },
+ { Op::Op_GATHER, "GATHER" },
{ Op::Op_GREATER, "GREATER" },
{ Op::Op_GREATER_EQUAL, "GREATER_EQUAL" },
{ Op::Op_IDENTITY, "IDENTITY" },
@@ -69,6 +70,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(Op,
{ Op::Op_REDUCE_MAX, "REDUCE_MAX" },
{ Op::Op_REDUCE_MIN, "REDUCE_MIN" },
{ Op::Op_REDUCE_SUM, "REDUCE_SUM" },
+ { Op::Op_SCATTER, "SCATTER" },
{ Op::Op_SIGMOID, "SIGMOID" },
{ Op::Op_SUB, "SUB" },
{ Op::Op_TANH, "TANH" },
diff --git a/reference_model/test/generate_tests.cpp b/reference_model/test/generate_tests.cpp
index 2c318e0..e4a6d20 100644
--- a/reference_model/test/generate_tests.cpp
+++ b/reference_model/test/generate_tests.cpp
@@ -448,7 +448,7 @@ TEST_CASE("positive - FP32 conv2d dot product (last 3 values)")
conv2d_test_FP32(tosaName, tosaElements, templateJsonCfg, "5", 2, lastExpected);
}
}
-TEST_CASE("positive - pseudo random")
+TEST_CASE("positive - FP32 pseudo random")
{
std::string templateJsonCfg = R"({
"tensors" : {
@@ -823,4 +823,77 @@ TEST_CASE("positive - FP32 avg_pool2d dot product (first 3 values)")
avg_pool2d_test_FP32(tosaName, tosaElements, templateJsonCfg, "5", expected);
}
}
+
+TEST_CASE("positive - INT32 pseudo random")
+{
+ std::string templateJsonCfg = R"({
+ "tensors" : {
+ "input0" : {
+ "generator": "PSEUDO_RANDOM",
+ "data_type": "INT32",
+ "input_type": "VARIABLE",
+ "shape" : [ 2, 12 ],
+ "input_pos": 0,
+ "op" : "SCATTER",
+ "pseudo_random_info": {
+ "rng_seed": 13,
+ "range": [ "-5", "5" ]
+ }
+ },
+ "input1" : {
+ "generator": "PSEUDO_RANDOM",
+ "data_type": "INT32",
+ "input_type": "VARIABLE",
+ "shape" : [ 2, 10 ],
+ "input_pos": 1,
+ "op" : "SCATTER",
+ "pseudo_random_info": {
+ "rng_seed": 14,
+ "range": [ "0", "9" ]
+ }
+ }
+
+ }
+ })";
+
+ const std::string tosaNameP0 = "input0";
+ const size_t tosaElementsP0 = 2 * 12;
+ const std::string tosaNameP1 = "input1";
+ const size_t tosaElementsP1 = 2 * 10;
+
+ SUBCASE("scatter - int32 random")
+ {
+ std::string jsonCfg = templateJsonCfg;
+
+ std::vector<int32_t> bufferP0(tosaElementsP0);
+ REQUIRE(tgd_generate_data(jsonCfg.c_str(), tosaNameP0.c_str(), (void*)bufferP0.data(), tosaElementsP0 * 4));
+ for (auto e = bufferP0.begin(); e < bufferP0.end(); ++e)
+ {
+ // Check the values are within range
+ bool withinRange = (*e >= -5 && *e <= 5);
+ REQUIRE(withinRange);
+ }
+ }
+
+ SUBCASE("scatter - int32 row shuffle")
+ {
+ std::string jsonCfg = templateJsonCfg;
+
+ std::vector<int32_t> bufferP1(tosaElementsP1);
+ REQUIRE(tgd_generate_data(jsonCfg.c_str(), tosaNameP1.c_str(), (void*)bufferP1.data(), tosaElementsP1 * 4));
+
+ std::vector<bool> set;
+ for (int32_t n = 0; n < 2; ++n)
+ {
+ set.assign(10, false);
+ for (int32_t i = 0; i < 10; ++i)
+ {
+ auto idx = bufferP1[i];
+ // Check that the values in the buffer only occur once
+ REQUIRE(!set[idx]);
+ set[idx] = true;
+ }
+ }
+ }
+}
TEST_SUITE_END(); // generate
diff --git a/verif/conformance/tosa_base_profile_ops_info.json b/verif/conformance/tosa_base_profile_ops_info.json
index 3a8622b..b186b06 100644
--- a/verif/conformance/tosa_base_profile_ops_info.json
+++ b/verif/conformance/tosa_base_profile_ops_info.json
@@ -1502,11 +1502,15 @@
"--target-dtype",
"int32",
"--tensor-dim-range",
- "4,64"
+ "4,64",
+ "--max-batch-size",
+ "64"
],
[
"--target-dtype",
"int8",
+ "--tensor-dim-range",
+ "5,20",
"--target-shape",
"2,65533,1",
"--target-shape",
@@ -3229,7 +3233,9 @@
"--target-dtype",
"int32",
"--tensor-dim-range",
- "4,64"
+ "4,64",
+ "--max-batch-size",
+ "64"
],
[
"--target-dtype",
diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json
index c3bd6ee..fb25622 100644
--- a/verif/conformance/tosa_main_profile_ops_info.json
+++ b/verif/conformance/tosa_main_profile_ops_info.json
@@ -1133,6 +1133,7 @@
"profile": [
"tosa-mi"
],
+ "support_for": [ "lazy_data_gen" ],
"generation": {
"standard": {
"negative_dim_range": "1,10",
@@ -1145,15 +1146,19 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
- "5,72"
+ "5,72",
+ "--max-batch-size",
+ "72"
],
[
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
+ "--tensor-dim-range",
+ "5,20",
"--target-shape",
"2,65536,1",
"--target-shape",
@@ -1992,6 +1997,7 @@
"profile": [
"tosa-mi"
],
+ "support_for": [ "lazy_data_gen" ],
"generation": {
"standard": {
"negative_dim_range": "1,10",
@@ -2004,15 +2010,17 @@
"--target-dtype",
"bf16",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--tensor-dim-range",
- "5,56"
+ "5,56",
+ "--max-batch-size",
+ "56"
],
[
"--target-dtype",
"fp32",
"--fp-values-range",
- "-2.0,2.0",
+ "-max,max",
"--target-shape",
"1,65541,1",
"--target-shape",
diff --git a/verif/generator/datagenerator.py b/verif/generator/datagenerator.py
index 0dd60e5..b5ef35d 100644
--- a/verif/generator/datagenerator.py
+++ b/verif/generator/datagenerator.py
@@ -78,6 +78,10 @@ class GenerateLibrary:
size_bytes = size * 2
# Create buffer of bytes and initialize to zero
buffer = (ct.c_ubyte * size_bytes)(0)
+ elif dtype == "INT32":
+ # Create buffer and initialize to zero
+ buffer = (ct.c_int32 * size)(0)
+ size_bytes = size * 4
else:
raise GenerateError(f"Unsupported data type {dtype}")
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 35253e0..50811ac 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -204,7 +204,7 @@ class TosaTensorGen:
return shape_list
@staticmethod
- def tgScatter(testGen, opName, rank, error_name=None):
+ def tgGather(testGen, opName, rank, error_name=None):
pl, const = opName["operands"]
assert pl == 2
@@ -212,12 +212,31 @@ class TosaTensorGen:
if error_name != ErrorIf.WrongRank:
assert rank == 3
+ values_shape = testGen.makeShape(rank)
+ values_shape = testGen.constrictBatchSize(values_shape)
+
+ N = values_shape[0]
+ W = testGen.makeDimension()
+ indices_shape = [N, W]
+
+ shape_list = [values_shape, indices_shape]
+ return shape_list
+
+ @staticmethod
+ def tgScatter(testGen, opName, rank, error_name=None):
+ pl, const = opName["operands"]
+
+ assert pl == 3
+ assert const == 0
+ if error_name != ErrorIf.WrongRank:
+ assert rank == 3
+
values_in_shape = testGen.makeShape(rank)
- K = values_in_shape[1]
+ values_in_shape = testGen.constrictBatchSize(values_in_shape)
- # ignore max batch size if target shape is set
- if testGen.args.max_batch_size and not testGen.args.target_shapes:
- values_in_shape[0] = min(values_in_shape[0], testGen.args.max_batch_size)
+ N = values_in_shape[0]
+ K = values_in_shape[1]
+ C = values_in_shape[2]
# Make sure W is not greater than K, as we can only write each output index
# once (having a W greater than K means that you have to repeat a K index)
@@ -225,11 +244,12 @@ class TosaTensorGen:
W_max = min(testGen.args.tensor_shape_range[1], K)
W = testGen.randInt(W_min, W_max) if W_min < W_max else W_min
- input_shape = [values_in_shape[0], W, values_in_shape[2]]
+ input_shape = [N, W, C]
shape_list = []
- shape_list.append(values_in_shape.copy())
- shape_list.append(input_shape.copy())
+ shape_list.append(values_in_shape)
+ shape_list.append([N, W]) # indices
+ shape_list.append(input_shape)
return shape_list
@@ -695,6 +715,13 @@ class TosaTensorValuesGen:
"round" in argsDict["data_range_list"][idx]
and argsDict["data_range_list"][idx]["round"] is True
)
+ if data_range is not None and dtype not in (
+ DType.FP16,
+ DType.FP32,
+ DType.BF16,
+ ):
+ # Change from inclusive to exclusive range
+ data_range = (data_range[0], data_range[1] + 1)
# Ignore lazy data gen option and create data array using any range limits
arr = testGen.getRandTensor(shape, dtype, data_range)
if roundMode:
@@ -732,13 +759,15 @@ class TosaTensorValuesGen:
# TODO - generate seed for this generator based on test
info["rng_seed"] = 42
+ data_range = None
if "data_range_list" in argsDict:
data_range = argsDict["data_range_list"][idx]["range"]
if "round" in argsDict["data_range_list"][idx]:
info["round"] = argsDict["data_range_list"][idx]["round"]
elif "data_range" in argsDict:
data_range = argsDict["data_range"]
- else:
+
+ if data_range is None:
data_range = testGen.getDTypeRange(
dtypeList[idx], high_inclusive=True
)
@@ -1455,6 +1484,109 @@ class TosaTensorValuesGen:
testGen, opName, dtypeList, shapeList, argsDict, error_name
)
+ @staticmethod
+ def tvgGather(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+ K = shapeList[0][1]
+
+ # Fix the type of the indices tensor
+ dtypeList[1] = DType.INT32
+
+ dtype = dtypeList[0]
+ if not gtu.dtypeIsSupportedByCompliance(dtype):
+ # Test unsupported by data generator
+ op = testGen.TOSA_OP_LIST[opName]
+ pCount, cCount = op["operands"]
+ assert (
+ pCount == 2 and cCount == 0
+ ), "Op.GATHER must have 2 placeholders, 0 consts"
+
+ tens_ser_list = []
+ for idx, shape in enumerate(shapeList):
+ dtype = dtypeList[idx]
+ if idx != 1:
+ arr = testGen.getRandTensor(shape, dtype)
+ tens_ser_list.append(testGen.ser.addPlaceholder(shape, dtype, arr))
+ else:
+ # Limit data range of indices tensor upto K (exclusive)
+ arr = testGen.getRandTensor(shape, dtype, (0, K))
+ # To match old functionality - create indices as CONST
+ tens_ser_list.append(testGen.ser.addConst(shape, dtype, arr))
+
+ return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
+
+ else:
+ # ERROR_IF or floating point test
+ # Use inclusive values upto index K for indices tensor
+ data_range_list = (
+ {"range": None},
+ {"range": (0, K - 1)},
+ )
+ argsDict["data_range_list"] = data_range_list
+
+ return TosaTensorValuesGen.tvgLazyGenDefault(
+ testGen, opName, dtypeList, shapeList, argsDict, error_name
+ )
+
+ @staticmethod
+ def tvgScatter(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+ K = shapeList[0][1]
+ W = shapeList[2][1]
+
+ # Work out an indices tensor here with data that doesn't exceed the
+ # dimension K of the values_in tensor and does NOT repeat the same K
+ # location as needed by the spec:
+ # "It is not permitted to repeat the same output index within a single
+ # SCATTER operation and so each output index occurs at most once."
+ assert K >= W, "Op.SCATTER W must be smaller or equal to K"
+
+ # Fix the type of the indices tensor
+ dtypeList[1] = DType.INT32
+
+ dtype = dtypeList[0]
+ if not gtu.dtypeIsSupportedByCompliance(dtype):
+ # Test unsupported by data generator
+ op = testGen.TOSA_OP_LIST[opName]
+ pCount, cCount = op["operands"]
+ assert (
+ pCount == 3 and cCount == 0
+ ), "Op.SCATTER must have 3 placeholders, 0 consts"
+
+ tens_ser_list = []
+ for idx, shape in enumerate(shapeList):
+ dtype = dtypeList[idx]
+ if idx != 1:
+ arr = testGen.getRandTensor(shape, dtype)
+ tens_ser_list.append(testGen.ser.addPlaceholder(shape, dtype, arr))
+ else:
+ # Create the indices array
+ assert dtype == DType.INT32, "Op.SCATTER unexpected indices type"
+ arr = []
+ for n in range(shape[0]):
+ # Get a shuffled list of output indices (0 to K-1) and
+ # limit length to W
+ arr.append(testGen.rng.permutation(K)[:W])
+ indices_arr = np.array(arr, dtype=np.int32) # (N, W)
+ # To match old functionality - create indices as CONST
+ tens_ser_list.append(
+ testGen.ser.addConst(shape, dtype, indices_arr)
+ )
+
+ return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
+
+ else:
+ # ERROR_IF or floating point test
+ # Use inclusive values upto index K for indices tensor
+ data_range_list = (
+ {"range": None},
+ {"range": (0, K - 1)},
+ {"range": None},
+ )
+ argsDict["data_range_list"] = data_range_list
+
+ return TosaTensorValuesGen.tvgLazyGenDefault(
+ testGen, opName, dtypeList, shapeList, argsDict, error_name
+ )
+
class TosaArgGen:
"""Argument generators create exhaustive or random lists of attributes for
diff --git a/verif/generator/tosa_error_if.py b/verif/generator/tosa_error_if.py
index 86be347..5dd785f 100644
--- a/verif/generator/tosa_error_if.py
+++ b/verif/generator/tosa_error_if.py
@@ -666,12 +666,8 @@ class TosaErrorValidator:
error_reason = "Op input list does not match expected input"
if check:
- op = kwargs["op"]
input_list = kwargs["input_list"]
num_operands = kwargs["num_operands"]
- if op["op"] in [Op.SCATTER, Op.GATHER]:
- # SCATTER/GATHER add an indices input tensor in their build functions
- num_operands += 1
if len(input_list) != num_operands:
error_result = True
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 53b0b75..ee935d4 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -66,6 +66,12 @@ class TosaTestGen:
v = maxFP
elif v == "-max":
v = -maxFP
+ elif v < 0:
+ # Trim to minimum data type value
+ v = max(v, -maxFP)
+ elif v > 0:
+ # Trim to maximum data type value
+ v = min(v, maxFP)
vals.append(v)
return tuple(sorted(vals))
@@ -1722,27 +1728,19 @@ class TosaTestGen:
self.ser.addOperator(op["op"], input_list, output_list, attr)
return result_tens
- def build_gather(self, op, values, validator_fcns=None, error_name=None):
-
- # Create a new indicies tensor
- # here with data that doesn't exceed the dimensions of the values tensor
-
- K = values.shape[1] # K
- W = self.randInt(
- self.args.tensor_shape_range[0], self.args.tensor_shape_range[1]
- ) # W
- indicies_arr = np.int32(
- self.rng.integers(low=0, high=K, size=[values.shape[0], W])
- ) # (N, W)
- indicies = self.ser.addConst(indicies_arr.shape, DType.INT32, indicies_arr)
+ def build_gather(
+ self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+ ):
+ assert len(inputs) == 2
+ values, indices = inputs
- result_tens = OutputShaper.gatherOp(
- self.ser, self.rng, values, indicies, error_name
+ result_tensor = OutputShaper.gatherOp(
+ self.ser, self.rng, values, indices, error_name
)
# Invalidate Input/Output list for error if checks.
- input_list = [values.name, indicies.name]
- output_list = [result_tens.name]
+ input_list = [values.name, indices.name]
+ output_list = [result_tensor.name]
pCount, cCount = op["operands"]
num_operands = pCount + cCount
input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1755,10 +1753,10 @@ class TosaTestGen:
error_name,
op=op,
input_shape=values.shape,
- output_shape=result_tens.shape,
+ output_shape=result_tensor.shape,
input_dtype=values.dtype,
- output_dtype=result_tens.dtype,
- result_tensors=[result_tens],
+ output_dtype=result_tensor.dtype,
+ result_tensors=[result_tensor],
input_list=input_list,
output_list=output_list,
num_operands=num_operands,
@@ -1767,33 +1765,24 @@ class TosaTestGen:
self.ser.addOperator(op["op"], input_list, output_list)
- return result_tens
+ compliance = self.tensorComplianceMetaData(
+ op, values.dtype, args_dict, result_tensor, error_name
+ )
- def build_scatter(self, op, values_in, input, validator_fcns=None, error_name=None):
-
- K = values_in.shape[1] # K
- W = input.shape[1] # W
-
- # Create an indices tensor here with data that doesn't exceed the
- # dimension K of the values_in tensor and does NOT repeat the same K
- # location as needed by the spec:
- # "It is not permitted to repeat the same output index within a single
- # SCATTER operation and so each output index occurs at most once."
- assert K >= W
- arr = []
- for n in range(values_in.shape[0]):
- # Get a shuffled list of output indices and limit it to size W
- arr.append(self.rng.permutation(K)[:W])
- indices_arr = np.array(arr, dtype=np.int32) # (N, W)
- indices = self.ser.addConst(indices_arr.shape, DType.INT32, indices_arr)
-
- result_tens = OutputShaper.scatterOp(
+ return TosaTestGen.BuildInfo(result_tensor, compliance)
+
+ def build_scatter(
+ self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+ ):
+ assert len(inputs) == 3
+ values_in, indices, input = inputs
+ result_tensor = OutputShaper.scatterOp(
self.ser, self.rng, values_in, indices, input, error_name
)
# Invalidate Input/Output list for error if checks.
input_list = [values_in.name, indices.name, input.name]
- output_list = [result_tens.name]
+ output_list = [result_tensor.name]
pCount, cCount = op["operands"]
num_operands = pCount + cCount
input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1806,10 +1795,10 @@ class TosaTestGen:
error_name,
op=op,
input_shape=values_in.shape,
- output_shape=result_tens.shape,
+ output_shape=result_tensor.shape,
input_dtype=values_in.dtype,
- output_dtype=result_tens.dtype,
- result_tensors=[result_tens],
+ output_dtype=result_tensor.dtype,
+ result_tensors=[result_tensor],
input_list=input_list,
output_list=output_list,
num_operands=num_operands,
@@ -1818,7 +1807,11 @@ class TosaTestGen:
self.ser.addOperator(op["op"], input_list, output_list)
- return result_tens
+ compliance = self.tensorComplianceMetaData(
+ op, values_in.dtype, args_dict, result_tensor, error_name
+ )
+
+ return TosaTestGen.BuildInfo(result_tensor, compliance)
def build_resize(
self,
@@ -4310,14 +4303,13 @@ class TosaTestGen:
# Scatter/Gather
"gather": {
"op": Op.GATHER,
- # Only specify 'values' tensor here. 'indices' is generated in op building stage
- "operands": (1, 0),
+ "operands": (2, 0),
"rank": (3, 3),
"build_fcn": (
build_gather,
- TosaTensorGen.tgBasic,
- TosaTensorValuesGen.tvgDefault,
- None,
+ TosaTensorGen.tgGather,
+ TosaTensorValuesGen.tvgGather,
+ TosaArgGen.agNone,
),
"types": (
DType.INT8,
@@ -4334,18 +4326,19 @@ class TosaTestGen:
TosaErrorValidator.evWrongOutputList,
TosaErrorValidator.evWrongRank,
),
+ "data_gen": {
+ "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+ },
},
"scatter": {
"op": Op.SCATTER,
- # Only specify 'values_in' tensor here.
- # 'indices' and 'input' are generated in op building stage
- "operands": (2, 0),
+ "operands": (3, 0),
"rank": (3, 3),
"build_fcn": (
build_scatter,
TosaTensorGen.tgScatter,
- TosaTensorValuesGen.tvgDefault,
- None,
+ TosaTensorValuesGen.tvgScatter,
+ TosaArgGen.agNone,
),
"types": TYPE_INT_FP,
"error_if_validators": (
@@ -4355,6 +4348,9 @@ class TosaTestGen:
TosaErrorValidator.evWrongOutputList,
TosaErrorValidator.evWrongRank,
),
+ "data_gen": {
+ "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+ },
},
# Image operations
"resize": {