aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Johnson <jeremy.johnson@arm.com>2024-02-28 13:20:05 +0000
committerTatWai Chong <tatwai.chong@arm.com>2024-03-01 13:16:56 -0800
commit0a042997ac24fee1a338e806caf18bd8dfba28f3 (patch)
tree1cfe325d7d775b778873a3940407e68d39c80a48
parent3195a665e3f96809a67b4cb04a57330d2bfeb0de (diff)
downloadreference_model-0a042997ac24fee1a338e806caf18bd8dfba28f3.tar.gz
Testing support for MUL with shift as input
Always create the shift as a tensor for all types in testing. In the reference model, set the shift operand to be available for all types, but only read in the shift tensor for i32. Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com> Signed-off-by: TatWai Chong <tatwai.chong@arm.com> Change-Id: Ia267cbf8b63ca0a9c97b38e8fb4db83eeb8c0538
-rw-r--r--examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosabin11752 -> 11932 bytes
-rw-r--r--reference_model/src/generate/generate_fixed_data.cc41
-rw-r--r--reference_model/src/ops/ewise_binary.cc22
-rw-r--r--reference_model/src/ops/ewise_binary.h17
-rw-r--r--verif/generator/datagenerator.py4
-rw-r--r--verif/generator/tosa_arg_gen.py47
-rw-r--r--verif/generator/tosa_test_gen.py17
7 files changed, 97 insertions, 51 deletions
diff --git a/examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosa b/examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosa
index 39087fa..f9606a1 100644
--- a/examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosa
+++ b/examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosa
Binary files differ
diff --git a/reference_model/src/generate/generate_fixed_data.cc b/reference_model/src/generate/generate_fixed_data.cc
index 3d4ee3e..b0b6c81 100644
--- a/reference_model/src/generate/generate_fixed_data.cc
+++ b/reference_model/src/generate/generate_fixed_data.cc
@@ -20,8 +20,22 @@
#include <type_traits>
#include <vector>
+namespace
+{
+template <typename OutType>
+bool copyFixedData(const int64_t elements, const std::vector<int32_t> inData, OutType* outData)
+{
+ for (auto t = 0; t < elements; t++)
+ {
+ outData[t] = inData[t];
+ }
+ return true;
+}
+} // namespace
+
namespace TosaReference
{
+
bool generateFixedData(const GenerateConfig& cfg, void* data, size_t size)
{
// Check we support the operator
@@ -31,22 +45,23 @@ bool generateFixedData(const GenerateConfig& cfg, void* data, size_t size)
return false;
}
+ std::vector<int32_t> inData = cfg.fixedDataInfo.data;
+ const auto T = TosaReference::numElementsFromShape(cfg.shape);
+ if (T != static_cast<int64_t>(inData.size()))
+ {
+ WARNING("[Generator][FD] Given data size %d does not match output size %d.", inData.size(), T);
+ return false;
+ }
+
switch (cfg.dataType)
{
case DType::DType_SHAPE: {
- int32_t* outData = reinterpret_cast<int32_t*>(data);
- std::vector<int32_t> inData = cfg.fixedDataInfo.data;
- const auto T = TosaReference::numElementsFromShape(cfg.shape);
- if (T != static_cast<int64_t>(inData.size()))
- {
- WARNING("[Generator][FD] Size does not match.");
- return false;
- }
- for (auto t = 0; t < T; t++)
- {
- outData[t] = inData[t];
- }
- return true;
+ int32_t* outData = reinterpret_cast<int32_t*>(data);
+ return copyFixedData(T, inData, outData);
+ }
+ case DType::DType_INT8: {
+ int8_t* outData = reinterpret_cast<int8_t*>(data);
+ return copyFixedData(T, inData, outData);
}
default:
WARNING("[Generator][FD] Unsupported type.");
diff --git a/reference_model/src/ops/ewise_binary.cc b/reference_model/src/ops/ewise_binary.cc
index ed176f3..8cc1319 100644
--- a/reference_model/src/ops/ewise_binary.cc
+++ b/reference_model/src/ops/ewise_binary.cc
@@ -463,11 +463,18 @@ int OpMul<Rank, InDtype, OutDtype>::eval()
using TInt64 = Eigen::Tensor<int64_t, Rank>;
TInt64 tmp_result = ia.binaryExpr(ib, this->mul_fcn);
- // Retrieve `shift` value and construct a Eigen tensor instance for it.
- s = dynamic_cast<TosaReference::TensorTemplate<TShift>*>(this->inputs[2]);
- ASSERT_MEM(s);
+ // Retrieve `shift` value and construct a Eigen tensor instance for it. Shift is stored
+ // as rank-0 tensor in Flatbuffer.
+ auto s0 = dynamic_cast<TosaReference::TensorTemplate<TShiftRank0>*>(this->inputs[2]);
- int shift = s->getTensor()(0);
+ // Get zero element from rank-0 tensor (i.e. shape = (0,)) in Numpy since `class Tensor`
+ // currenly has no knowledge of the size of rank-0 tensor. Store rank-1 tensor instead
+ // for testing.
+ auto s1 = dynamic_cast<TosaReference::TensorTemplate<TShiftRank1>*>(this->inputs[2]);
+
+ ASSERT_MEM(s0 || s1);
+
+ int shift = s0 ? s0->getTensor()(0) : s1->getTensor()(0);
TIn is(ia);
is.setConstant(shift);
@@ -486,11 +493,12 @@ int OpMul<0, TOSA_REF_TYPE_INT32, TOSA_REF_TYPE_INT32>::eval()
Eigen::Tensor<int64_t, 0> tmp_result = this->a->getTensor().binaryExpr(this->b->getTensor(), this->mul_fcn);
// Retrieve `shift` value.
- s = dynamic_cast<TosaReference::TensorTemplate<TShift>*>(this->inputs[2]);
- ASSERT_MEM(s);
+ auto s0 = dynamic_cast<TosaReference::TensorTemplate<TShiftRank0>*>(this->inputs[2]);
+ auto s1 = dynamic_cast<TosaReference::TensorTemplate<TShiftRank1>*>(this->inputs[2]);
+ ASSERT_MEM(s0 || s1);
Eigen::Tensor<int64_t, 0> shift;
- shift.setConstant(s->getTensor()(0));
+ shift.setConstant(s0 ? s0->getTensor()(0) : s1->getTensor()(0));
this->result->getTensor() = tmp_result.binaryExpr(shift, this->shr_fcn);
diff --git a/reference_model/src/ops/ewise_binary.h b/reference_model/src/ops/ewise_binary.h
index 8d2e486..7ebd852 100644
--- a/reference_model/src/ops/ewise_binary.h
+++ b/reference_model/src/ops/ewise_binary.h
@@ -159,11 +159,8 @@ public:
OpMul(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, uint64_t id_)
: BinaryNode<Rank, InDtype, OutDtype>(sgt_, Op_MUL, id_)
{
- if constexpr (InDtype == TOSA_REF_TYPE_INT32)
- {
- // Require `shift` operand.
- this->setRequiredOperands(3, 1);
- }
+ // Require `shift` operand.
+ this->setRequiredOperands(3, 1);
register_fcn();
}
static constexpr int64_t QMin = GetQMin<OutDtype>::value;
@@ -173,9 +170,10 @@ public:
using OutEigenType = typename GetEigenType<OutDtype>::type;
using ShiftEigenType = typename GetEigenType<TOSA_REF_TYPE_INT8>::type;
- using TIn = Eigen::Tensor<InEigenType, Rank>;
- using TOut = Eigen::Tensor<OutEigenType, Rank>;
- using TShift = Eigen::Tensor<ShiftEigenType, 0>;
+ using TIn = Eigen::Tensor<InEigenType, Rank>;
+ using TOut = Eigen::Tensor<OutEigenType, Rank>;
+ using TShiftRank0 = Eigen::Tensor<ShiftEigenType, 0>;
+ using TShiftRank1 = Eigen::Tensor<ShiftEigenType, 1>;
int register_fcn();
int eval();
@@ -183,9 +181,6 @@ public:
// Note that INT64 is not natively supported in Dtype system.
std::function<int64_t(InEigenType, InEigenType)> mul_fcn;
std::function<OutEigenType(int64_t, InEigenType)> shr_fcn;
-
-protected:
- TosaReference::TensorTemplate<TShift>* s;
};
template <int Rank, TOSA_REF_TYPE InDtype>
diff --git a/verif/generator/datagenerator.py b/verif/generator/datagenerator.py
index 743475c..c63a2d5 100644
--- a/verif/generator/datagenerator.py
+++ b/verif/generator/datagenerator.py
@@ -82,6 +82,10 @@ class GenerateLibrary:
# Create buffer and initialize to zero
buffer = (ct.c_int32 * size)(0)
size_bytes = size * 4
+ elif dtype == "INT8":
+ size_bytes = size
+ # Create buffer of bytes and initialize to zero
+ buffer = (ct.c_ubyte * size_bytes)(0)
else:
raise GenerateError(f"Unsupported data type {dtype}")
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 0851aca..592c491 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -254,19 +254,16 @@ class TosaTensorGen:
return shape_list
@staticmethod
- def tgBroadcastFuzz(testGen, op, rank, error_name=None):
+ def _get_broadcast_shapes(testGen, num_shapes, rank, error_name=None):
shape = testGen.makeShape(rank)
-
- pl, const = op["operands"]
-
shape_list = []
# Choose one of the inputs to broadcast
# Note: Simplifies OutputShaper code if we don't change first shape for errors
- bcast_idx = testGen.randInt(0 if error_name is None else 1, pl + const)
+ bcast_idx = testGen.randInt(0 if error_name is None else 1, num_shapes)
fuzz_idx = testGen.randInt(0, rank)
- for i in range(pl + const):
+ for i in range(num_shapes):
shape_bcast = shape.copy()
# To test broadcasting, the chosen fuzz index dimension should not be 1
@@ -295,6 +292,22 @@ class TosaTensorGen:
return shape_list
@staticmethod
+ def tgBroadcastFuzz(testGen, op, rank, error_name=None):
+ pl, const = op["operands"]
+ num_shapes = pl + const
+ return TosaTensorGen._get_broadcast_shapes(
+ testGen, num_shapes, rank, error_name
+ )
+
+ @staticmethod
+ def tgMul(testGen, op, rank, error_name=None):
+ # Get broadcast shapes for the first 2 inputs as the 3rd is shift
+ shape_list = TosaTensorGen._get_broadcast_shapes(testGen, 2, rank, error_name)
+ # Add a single dimension tensor for shift
+ shape_list.append([1])
+ return shape_list
+
+ @staticmethod
def tgConv2D(testGen, op, rank, error_name=None):
pl, const = op["operands"]
@@ -727,7 +740,12 @@ class TosaTensorValuesGen:
# Ignore lazy data gen option and create data array using any range limits
if "fixed_data" in argsDict and argsDict["fixed_data"][idx] is not None:
- arr = np.int64(argsDict["fixed_data"][idx])
+ if dtype == DType.SHAPE:
+ arr = np.int64(argsDict["fixed_data"][idx])
+ elif dtype == DType.INT8:
+ arr = np.int8(argsDict["fixed_data"][idx])
+ else:
+ assert False, "Unsupported fixed_data type"
else:
arr = testGen.getRandTensor(shape, dtype, data_range)
if roundMode:
@@ -1147,6 +1165,13 @@ class TosaTensorValuesGen:
if data_range:
argsDict["data_range"] = data_range
+ if dtypeList[0] != DType.SHAPE:
+ # Need to supply shift tensor for MUL (not needed for MUL_SHAPE)
+ dtypeList[2] = DType.INT8
+ shapeList[2] = [1]
+ # Create a new list for the pre-generated data in argsDict["fixed_data"]
+ argsDict["fixed_data"] = [None, None, [argsDict["shift"]]]
+
return TosaTensorValuesGen.tvgLazyGenDefault(
testGen, opName, dtypeList, shapeList, argsDict, error_name
)
@@ -1154,9 +1179,6 @@ class TosaTensorValuesGen:
# Integer test
op = testGen.TOSA_OP_LIST[opName]
pCount, cCount = op["operands"]
- assert (
- pCount == 2 and cCount == 0
- ), "Op.MUL must have 2 placeholders, 0 consts"
tens_ser_list = []
@@ -1213,6 +1235,7 @@ class TosaTensorValuesGen:
b_arr = b_arr // 2
if dtypeList[0] == DType.SHAPE:
+ # MUL_SHAPE with 2 inputs
tens_ser_list.append(
testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr_64)
)
@@ -1220,12 +1243,16 @@ class TosaTensorValuesGen:
testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr_64)
)
else:
+ # MUL with 3 inputs (3rd is shift)
tens_ser_list.append(
testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
)
tens_ser_list.append(
testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
)
+ tens_ser_list.append(
+ testGen.ser.addPlaceholder([1], DType.INT8, np.int8([shift]))
+ )
return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index ee45f0e..b472087 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -587,9 +587,9 @@ class TosaTestGen:
def build_mul(
self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
):
- assert len(inputs) == 2
- a, b = inputs
- shift = args_dict["shift"]
+ # Note that mul is binary operator but it has a shift value tensor
+ assert len(inputs) == 3
+ a, b, s = inputs
result_tensor = OutputShaper.binaryBroadcastOp(
self.ser, self.rng, a, b, error_name
@@ -605,7 +605,7 @@ class TosaTestGen:
result_tensor.setDtype(outputDType)
# Invalidate Input/Output list for error if checks.
- input_list = [a.name, b.name]
+ input_list = [a.name, b.name, s.name]
output_list = [result_tensor.name]
pCount, cCount = op["operands"]
num_operands = pCount + cCount
@@ -629,10 +629,7 @@ class TosaTestGen:
):
return None
- attr = ts.TosaSerializerAttribute()
- attr.MulAttribute(shift)
-
- self.ser.addOperator(op["op"], input_list, output_list, attr)
+ self.ser.addOperator(op["op"], input_list, output_list)
compliance = self.tensorComplianceMetaData(
op, a.dtype, args_dict, result_tensor, error_name
@@ -3874,10 +3871,10 @@ class TosaTestGen:
},
"mul": {
"op": Op.MUL,
- "operands": (2, 0),
+ "operands": (3, 0),
"build_fcn": (
build_mul,
- TosaTensorGen.tgBroadcastFuzz,
+ TosaTensorGen.tgMul,
TosaTensorValuesGen.tvgMul,
TosaArgGen.agMul,
),