From 0a042997ac24fee1a338e806caf18bd8dfba28f3 Mon Sep 17 00:00:00 2001 From: Jeremy Johnson Date: Wed, 28 Feb 2024 13:20:05 +0000 Subject: Testing support for MUL with shift as input Always create the shift as a tensor for all types in testing. In the reference model, set the shift operand to be available for all types, but only read in the shift tensor for i32. Signed-off-by: Jeremy Johnson Signed-off-by: TatWai Chong Change-Id: Ia267cbf8b63ca0a9c97b38e8fb4db83eeb8c0538 --- .../test_lstm_stateful_13x21x3_f32.tosa | Bin 11752 -> 11932 bytes .../src/generate/generate_fixed_data.cc | 41 ++++++++++++------ reference_model/src/ops/ewise_binary.cc | 22 +++++++--- reference_model/src/ops/ewise_binary.h | 17 +++----- verif/generator/datagenerator.py | 4 ++ verif/generator/tosa_arg_gen.py | 47 ++++++++++++++++----- verif/generator/tosa_test_gen.py | 17 +++----- 7 files changed, 97 insertions(+), 51 deletions(-) diff --git a/examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosa b/examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosa index 39087fa..f9606a1 100644 Binary files a/examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosa and b/examples/test_lstm_stateful_13x21x3_f32/flatbuffer-tflite/test_lstm_stateful_13x21x3_f32.tosa differ diff --git a/reference_model/src/generate/generate_fixed_data.cc b/reference_model/src/generate/generate_fixed_data.cc index 3d4ee3e..b0b6c81 100644 --- a/reference_model/src/generate/generate_fixed_data.cc +++ b/reference_model/src/generate/generate_fixed_data.cc @@ -20,8 +20,22 @@ #include #include +namespace +{ +template +bool copyFixedData(const int64_t elements, const std::vector inData, OutType* outData) +{ + for (auto t = 0; t < elements; t++) + { + outData[t] = inData[t]; + } + return true; +} +} // namespace + namespace TosaReference { + bool generateFixedData(const GenerateConfig& cfg, void* data, size_t size) { // Check we support the operator @@ -31,22 +45,23 @@ bool generateFixedData(const GenerateConfig& cfg, void* data, size_t size) return false; } + std::vector inData = cfg.fixedDataInfo.data; + const auto T = TosaReference::numElementsFromShape(cfg.shape); + if (T != static_cast(inData.size())) + { + WARNING("[Generator][FD] Given data size %d does not match output size %d.", inData.size(), T); + return false; + } + switch (cfg.dataType) { case DType::DType_SHAPE: { - int32_t* outData = reinterpret_cast(data); - std::vector inData = cfg.fixedDataInfo.data; - const auto T = TosaReference::numElementsFromShape(cfg.shape); - if (T != static_cast(inData.size())) - { - WARNING("[Generator][FD] Size does not match."); - return false; - } - for (auto t = 0; t < T; t++) - { - outData[t] = inData[t]; - } - return true; + int32_t* outData = reinterpret_cast(data); + return copyFixedData(T, inData, outData); + } + case DType::DType_INT8: { + int8_t* outData = reinterpret_cast(data); + return copyFixedData(T, inData, outData); } default: WARNING("[Generator][FD] Unsupported type."); diff --git a/reference_model/src/ops/ewise_binary.cc b/reference_model/src/ops/ewise_binary.cc index ed176f3..8cc1319 100644 --- a/reference_model/src/ops/ewise_binary.cc +++ b/reference_model/src/ops/ewise_binary.cc @@ -463,11 +463,18 @@ int OpMul::eval() using TInt64 = Eigen::Tensor; TInt64 tmp_result = ia.binaryExpr(ib, this->mul_fcn); - // Retrieve `shift` value and construct a Eigen tensor instance for it. - s = dynamic_cast*>(this->inputs[2]); - ASSERT_MEM(s); + // Retrieve `shift` value and construct a Eigen tensor instance for it. Shift is stored + // as rank-0 tensor in Flatbuffer. + auto s0 = dynamic_cast*>(this->inputs[2]); - int shift = s->getTensor()(0); + // Get zero element from rank-0 tensor (i.e. shape = (0,)) in Numpy since `class Tensor` + // currenly has no knowledge of the size of rank-0 tensor. Store rank-1 tensor instead + // for testing. + auto s1 = dynamic_cast*>(this->inputs[2]); + + ASSERT_MEM(s0 || s1); + + int shift = s0 ? s0->getTensor()(0) : s1->getTensor()(0); TIn is(ia); is.setConstant(shift); @@ -486,11 +493,12 @@ int OpMul<0, TOSA_REF_TYPE_INT32, TOSA_REF_TYPE_INT32>::eval() Eigen::Tensor tmp_result = this->a->getTensor().binaryExpr(this->b->getTensor(), this->mul_fcn); // Retrieve `shift` value. - s = dynamic_cast*>(this->inputs[2]); - ASSERT_MEM(s); + auto s0 = dynamic_cast*>(this->inputs[2]); + auto s1 = dynamic_cast*>(this->inputs[2]); + ASSERT_MEM(s0 || s1); Eigen::Tensor shift; - shift.setConstant(s->getTensor()(0)); + shift.setConstant(s0 ? s0->getTensor()(0) : s1->getTensor()(0)); this->result->getTensor() = tmp_result.binaryExpr(shift, this->shr_fcn); diff --git a/reference_model/src/ops/ewise_binary.h b/reference_model/src/ops/ewise_binary.h index 8d2e486..7ebd852 100644 --- a/reference_model/src/ops/ewise_binary.h +++ b/reference_model/src/ops/ewise_binary.h @@ -159,11 +159,8 @@ public: OpMul(SubgraphTraverser* sgt_, TosaAttributeBase* attribute_, uint64_t id_) : BinaryNode(sgt_, Op_MUL, id_) { - if constexpr (InDtype == TOSA_REF_TYPE_INT32) - { - // Require `shift` operand. - this->setRequiredOperands(3, 1); - } + // Require `shift` operand. + this->setRequiredOperands(3, 1); register_fcn(); } static constexpr int64_t QMin = GetQMin::value; @@ -173,9 +170,10 @@ public: using OutEigenType = typename GetEigenType::type; using ShiftEigenType = typename GetEigenType::type; - using TIn = Eigen::Tensor; - using TOut = Eigen::Tensor; - using TShift = Eigen::Tensor; + using TIn = Eigen::Tensor; + using TOut = Eigen::Tensor; + using TShiftRank0 = Eigen::Tensor; + using TShiftRank1 = Eigen::Tensor; int register_fcn(); int eval(); @@ -183,9 +181,6 @@ public: // Note that INT64 is not natively supported in Dtype system. std::function mul_fcn; std::function shr_fcn; - -protected: - TosaReference::TensorTemplate* s; }; template diff --git a/verif/generator/datagenerator.py b/verif/generator/datagenerator.py index 743475c..c63a2d5 100644 --- a/verif/generator/datagenerator.py +++ b/verif/generator/datagenerator.py @@ -82,6 +82,10 @@ class GenerateLibrary: # Create buffer and initialize to zero buffer = (ct.c_int32 * size)(0) size_bytes = size * 4 + elif dtype == "INT8": + size_bytes = size + # Create buffer of bytes and initialize to zero + buffer = (ct.c_ubyte * size_bytes)(0) else: raise GenerateError(f"Unsupported data type {dtype}") diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py index 0851aca..592c491 100644 --- a/verif/generator/tosa_arg_gen.py +++ b/verif/generator/tosa_arg_gen.py @@ -254,19 +254,16 @@ class TosaTensorGen: return shape_list @staticmethod - def tgBroadcastFuzz(testGen, op, rank, error_name=None): + def _get_broadcast_shapes(testGen, num_shapes, rank, error_name=None): shape = testGen.makeShape(rank) - - pl, const = op["operands"] - shape_list = [] # Choose one of the inputs to broadcast # Note: Simplifies OutputShaper code if we don't change first shape for errors - bcast_idx = testGen.randInt(0 if error_name is None else 1, pl + const) + bcast_idx = testGen.randInt(0 if error_name is None else 1, num_shapes) fuzz_idx = testGen.randInt(0, rank) - for i in range(pl + const): + for i in range(num_shapes): shape_bcast = shape.copy() # To test broadcasting, the chosen fuzz index dimension should not be 1 @@ -294,6 +291,22 @@ class TosaTensorGen: return shape_list + @staticmethod + def tgBroadcastFuzz(testGen, op, rank, error_name=None): + pl, const = op["operands"] + num_shapes = pl + const + return TosaTensorGen._get_broadcast_shapes( + testGen, num_shapes, rank, error_name + ) + + @staticmethod + def tgMul(testGen, op, rank, error_name=None): + # Get broadcast shapes for the first 2 inputs as the 3rd is shift + shape_list = TosaTensorGen._get_broadcast_shapes(testGen, 2, rank, error_name) + # Add a single dimension tensor for shift + shape_list.append([1]) + return shape_list + @staticmethod def tgConv2D(testGen, op, rank, error_name=None): pl, const = op["operands"] @@ -727,7 +740,12 @@ class TosaTensorValuesGen: # Ignore lazy data gen option and create data array using any range limits if "fixed_data" in argsDict and argsDict["fixed_data"][idx] is not None: - arr = np.int64(argsDict["fixed_data"][idx]) + if dtype == DType.SHAPE: + arr = np.int64(argsDict["fixed_data"][idx]) + elif dtype == DType.INT8: + arr = np.int8(argsDict["fixed_data"][idx]) + else: + assert False, "Unsupported fixed_data type" else: arr = testGen.getRandTensor(shape, dtype, data_range) if roundMode: @@ -1147,6 +1165,13 @@ class TosaTensorValuesGen: if data_range: argsDict["data_range"] = data_range + if dtypeList[0] != DType.SHAPE: + # Need to supply shift tensor for MUL (not needed for MUL_SHAPE) + dtypeList[2] = DType.INT8 + shapeList[2] = [1] + # Create a new list for the pre-generated data in argsDict["fixed_data"] + argsDict["fixed_data"] = [None, None, [argsDict["shift"]]] + return TosaTensorValuesGen.tvgLazyGenDefault( testGen, opName, dtypeList, shapeList, argsDict, error_name ) @@ -1154,9 +1179,6 @@ class TosaTensorValuesGen: # Integer test op = testGen.TOSA_OP_LIST[opName] pCount, cCount = op["operands"] - assert ( - pCount == 2 and cCount == 0 - ), "Op.MUL must have 2 placeholders, 0 consts" tens_ser_list = [] @@ -1213,6 +1235,7 @@ class TosaTensorValuesGen: b_arr = b_arr // 2 if dtypeList[0] == DType.SHAPE: + # MUL_SHAPE with 2 inputs tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr_64) ) @@ -1220,12 +1243,16 @@ class TosaTensorValuesGen: testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr_64) ) else: + # MUL with 3 inputs (3rd is shift) tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr) ) tens_ser_list.append( testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr) ) + tens_ser_list.append( + testGen.ser.addPlaceholder([1], DType.INT8, np.int8([shift])) + ) return TosaTensorValuesGen.TVGInfo(tens_ser_list, None) diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py index ee45f0e..b472087 100644 --- a/verif/generator/tosa_test_gen.py +++ b/verif/generator/tosa_test_gen.py @@ -587,9 +587,9 @@ class TosaTestGen: def build_mul( self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None ): - assert len(inputs) == 2 - a, b = inputs - shift = args_dict["shift"] + # Note that mul is binary operator but it has a shift value tensor + assert len(inputs) == 3 + a, b, s = inputs result_tensor = OutputShaper.binaryBroadcastOp( self.ser, self.rng, a, b, error_name @@ -605,7 +605,7 @@ class TosaTestGen: result_tensor.setDtype(outputDType) # Invalidate Input/Output list for error if checks. - input_list = [a.name, b.name] + input_list = [a.name, b.name, s.name] output_list = [result_tensor.name] pCount, cCount = op["operands"] num_operands = pCount + cCount @@ -629,10 +629,7 @@ class TosaTestGen: ): return None - attr = ts.TosaSerializerAttribute() - attr.MulAttribute(shift) - - self.ser.addOperator(op["op"], input_list, output_list, attr) + self.ser.addOperator(op["op"], input_list, output_list) compliance = self.tensorComplianceMetaData( op, a.dtype, args_dict, result_tensor, error_name @@ -3874,10 +3871,10 @@ class TosaTestGen: }, "mul": { "op": Op.MUL, - "operands": (2, 0), + "operands": (3, 0), "build_fcn": ( build_mul, - TosaTensorGen.tgBroadcastFuzz, + TosaTensorGen.tgMul, TosaTensorValuesGen.tvgMul, TosaArgGen.agMul, ), -- cgit v1.2.1