From 14d7f7a2b5d0d85b83d8c84a5456828feb1a0ea1 Mon Sep 17 00:00:00 2001 From: Kevin Cheng Date: Wed, 12 May 2021 10:44:49 -0700 Subject: Update to v0.22.0 - remove identityN and placeholder - add div - update serialization_lib hash - update apply_scale_16() assertion - regenerate examples/ due to serialization_lib change Change-Id: I7183d92bec33697c65adfc07cb8eb89c6882675a --- .../flatbuffer-tf/test_add_1x4x4x4_f32.tosa | Bin 596 -> 492 bytes .../flatbuffer-tflite/test_add_1x4x4x4_f32.tosa | Bin 628 -> 516 bytes ...v2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa | Bin 1016 -> 964 bytes ...v2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa | Bin 800 -> 744 bytes ...v2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa | Bin 1128 -> 1072 bytes reference_model/src/ops/control_flow.cc | 5 +- reference_model/src/ops/data_nodes.cc | 78 +-------- reference_model/src/ops/data_nodes.h | 30 ---- reference_model/src/ops/ewise_binary.cc | 23 +++ reference_model/src/ops/ewise_binary.h | 1 + reference_model/src/ops/op_factory.cc | 12 +- reference_model/src/quant_util.h | 4 +- reference_model/src/subgraph_traverser.cc | 29 +--- thirdparty/serialization_lib | 2 +- verif/tosa_serializer.py | 9 +- verif/tosa_test_gen.py | 179 +++++++++++---------- 16 files changed, 139 insertions(+), 233 deletions(-) diff --git a/examples/test_add_1x4x4x4_f32/flatbuffer-tf/test_add_1x4x4x4_f32.tosa b/examples/test_add_1x4x4x4_f32/flatbuffer-tf/test_add_1x4x4x4_f32.tosa index 7a195af..003d1bb 100644 Binary files a/examples/test_add_1x4x4x4_f32/flatbuffer-tf/test_add_1x4x4x4_f32.tosa and b/examples/test_add_1x4x4x4_f32/flatbuffer-tf/test_add_1x4x4x4_f32.tosa differ diff --git a/examples/test_add_1x4x4x4_f32/flatbuffer-tflite/test_add_1x4x4x4_f32.tosa b/examples/test_add_1x4x4x4_f32/flatbuffer-tflite/test_add_1x4x4x4_f32.tosa index c686131..864aaac 100644 Binary files a/examples/test_add_1x4x4x4_f32/flatbuffer-tflite/test_add_1x4x4x4_f32.tosa and b/examples/test_add_1x4x4x4_f32/flatbuffer-tflite/test_add_1x4x4x4_f32.tosa differ diff --git a/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tf/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa b/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tf/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa index 1fb5f17..ea794c8 100644 Binary files a/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tf/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa and b/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tf/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa differ diff --git a/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa b/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa index d1b740b..dfc7c9c 100644 Binary files a/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa and b/examples/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_f32_st11_padSAME_dilat11.tosa differ diff --git a/examples/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa b/examples/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa index 30bd194..1e21d49 100644 Binary files a/examples/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa and b/examples/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11/flatbuffer-tflite/test_conv2d_1x1_1x32x32x8_qi8_st11_padSAME_dilat11.tosa differ diff --git a/reference_model/src/ops/control_flow.cc b/reference_model/src/ops/control_flow.cc index 827e01f..1a6a63a 100644 --- a/reference_model/src/ops/control_flow.cc +++ b/reference_model/src/ops/control_flow.cc @@ -93,6 +93,8 @@ int OpControlFlow::evalBlock(TosaSerializationBasicBlock* block, return 1; } + tensor->setIsValid(); + // Push ready consumers to the next node list for (auto gn : tensor->getConsumers()) { @@ -292,8 +294,7 @@ int OpWhileLoop::checkTensorAttributes() int OpWhileLoop::eval() { - TosaReference::Tensor0 cond_output_ctensor(std::string("cond_output"), DType_BOOL, - std::vector({})); + TosaReference::Tensor0 cond_output_ctensor(std::string("cond_output"), DType_BOOL, std::vector({})); cond_output_ctensor.allocate(); std::vector cond_block_outputs; diff --git a/reference_model/src/ops/data_nodes.cc b/reference_model/src/ops/data_nodes.cc index 883cd1b..baae019 100644 --- a/reference_model/src/ops/data_nodes.cc +++ b/reference_model/src/ops/data_nodes.cc @@ -42,29 +42,6 @@ int OpConst::eval() return GraphNode::eval(); } -OpPlaceholder::OpPlaceholder(uint64_t id_) - : GraphNode(Op_PLACEHOLDER, id_) -{ - setRequiredOperands(0, 1); -} - -OpPlaceholder::~OpPlaceholder() -{} - -int OpPlaceholder::checkTensorAttributes() -{ - if (validateRequiredOperands()) - return 1; - - return 0; -} - -int OpPlaceholder::eval() -{ - // Evaluation is trivial for placeholders - return GraphNode::eval(); -} - template OpIdentity::OpIdentity(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_) : GraphNode(Op_IDENTITY, id_) @@ -107,64 +84,11 @@ int OpIdentity::eval() return GraphNode::eval(); } -template -OpIdentityN::OpIdentityN(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_) - : GraphNode(Op_IDENTITYN, id_) -{ - setRequiredRank(0, 6); -} - -template -OpIdentityN::~OpIdentityN() -{} - -template -int OpIdentityN::checkTensorAttributes() -{ - - if (inputs.size() != outputs.size()) - { - printNodeValidationError("Input and output tensor list lengths are not equal"); - return 1; - } - - for (size_t i = 0; i < inputs.size(); i++) - { - ins.push_back(dynamic_cast*>(inputs[i])); - outs.push_back(dynamic_cast*>(outputs[i])); - - if (ins[i]->matchRankTypeShape(*outs[i])) - { - printNodeValidationError("Input and output tensor rank, type, or shape do not match"); - return 1; - } - } - - return 0; -} - -template -int OpIdentityN::eval() -{ - for (size_t i = 0; i < ins.size(); i++) - { - outs[i]->getTensor() = ins[i]->getTensor(); - } - - return GraphNode::eval(); -} - // template explicit instantiation -// note OpConst and OpPlaceholder are not templated +// note OpConst is not templated DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, FLOAT); DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT8); DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT16); DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT32); DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, BOOL); - -DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, FLOAT); -DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT8); -DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT16); -DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT32); -DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, BOOL); diff --git a/reference_model/src/ops/data_nodes.h b/reference_model/src/ops/data_nodes.h index bec4669..a02d441 100644 --- a/reference_model/src/ops/data_nodes.h +++ b/reference_model/src/ops/data_nodes.h @@ -31,16 +31,6 @@ public: virtual int eval(); }; -class OpPlaceholder : public GraphNode -{ -public: - OpPlaceholder(uint64_t id_); - virtual ~OpPlaceholder(); - - virtual int checkTensorAttributes(); - virtual int eval(); -}; - template class OpIdentity : public GraphNode { @@ -61,26 +51,6 @@ protected: TosaReference::TensorTemplate* out; }; -template -class OpIdentityN : public GraphNode -{ -public: - OpIdentityN(TosaAttributeBase* attribute_, TosaQuantInfoBase* qinfo_, uint64_t id_); - virtual ~OpIdentityN(); - - virtual int checkTensorAttributes(); - virtual int eval(); - - using InEigenType = typename GetEigenType::type; - using OutEigenType = typename GetEigenType::type; - using TIn = Eigen::Tensor; - using TOut = Eigen::Tensor; - -protected: - std::vector*> ins; - std::vector*> outs; -}; - }; // namespace TosaReference #endif diff --git a/reference_model/src/ops/ewise_binary.cc b/reference_model/src/ops/ewise_binary.cc index fc587f1..76cebeb 100644 --- a/reference_model/src/ops/ewise_binary.cc +++ b/reference_model/src/ops/ewise_binary.cc @@ -297,6 +297,27 @@ int OpBitwiseXor::register_fcn() return 0; } +template +int OpDiv::register_fcn() +{ + switch (InDtype) + { + case DType_INT32: + this->fcn = [this](InEigenType a, InEigenType b) -> OutEigenType { + ASSERT_MSG_NODE(b != 0, "OpDiv: divisor must be non-zero value"); + int64_t res_in_64 = static_cast(a) / b; + int64_t i32_max_in_64 = static_cast(std::numeric_limits::max()); + ASSERT_MSG_NODE(a <= i32_max_in_64, "OpDiv: result not in i32 range"); + return static_cast(res_in_64); + }; + break; + default: + FATAL_ERROR_NODE("unsupported DType %s", EnumNamesDType()[InDtype]); + } + + return 0; +} + template int OpLogicalAnd::register_fcn() { @@ -579,6 +600,8 @@ DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT8); DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT16); DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT32); +DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpDiv, INT32); + DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpLogicalAnd, BOOL); DEF_INSTANTIATE_RANK0_6_ONE_RANK_ONE_TYPE(OpLogicalLeftShift, INT8); diff --git a/reference_model/src/ops/ewise_binary.h b/reference_model/src/ops/ewise_binary.h index 5bc5630..6b9c98d 100644 --- a/reference_model/src/ops/ewise_binary.h +++ b/reference_model/src/ops/ewise_binary.h @@ -125,6 +125,7 @@ DEF_TEMPLATE_BINARY_OP_DEFAULT(Add, ADD) DEF_TEMPLATE_BINARY_OP_DEFAULT(BitwiseAnd, BITWISE_AND) DEF_TEMPLATE_BINARY_OP_DEFAULT(BitwiseOr, BITWISE_OR) DEF_TEMPLATE_BINARY_OP_DEFAULT(BitwiseXor, BITWISE_XOR) +DEF_TEMPLATE_BINARY_OP_DEFAULT(Div, DIV) DEF_TEMPLATE_BINARY_OP_DEFAULT(LogicalAnd, LOGICAL_AND) DEF_TEMPLATE_BINARY_OP_DEFAULT(LogicalLeftShift, LOGICAL_LEFT_SHIFT) DEF_TEMPLATE_BINARY_OP_DEFAULT(LogicalRightShift, LOGICAL_RIGHT_SHIFT) diff --git a/reference_model/src/ops/op_factory.cc b/reference_model/src/ops/op_factory.cc index b326c63..440d624 100644 --- a/reference_model/src/ops/op_factory.cc +++ b/reference_model/src/ops/op_factory.cc @@ -134,6 +134,9 @@ GraphNode* OpFactory::newOp(TosaSerializationHandler* tsh, DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT16); DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpBitwiseXor, INT32); break; + case Op_DIV: + DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpDiv, INT32); + break; case Op_LOGICAL_AND: DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpLogicalAnd, BOOL); break; @@ -346,8 +349,6 @@ GraphNode* OpFactory::newOp(TosaSerializationHandler* tsh, // data_nodes case Op_CONST: return new OpConst(id); - case Op_PLACEHOLDER: - return new OpPlaceholder(id); case Op_IDENTITY: DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, FLOAT); DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT32); @@ -355,13 +356,6 @@ GraphNode* OpFactory::newOp(TosaSerializationHandler* tsh, DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, INT16); DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentity, BOOL); break; - case Op_IDENTITYN: - DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, FLOAT); - DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT32); - DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT8); - DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, INT16); - DEF_FACTORY_RANK0_6_ONE_RANK_ONE_TYPE(OpIdentityN, BOOL); - break; // type_conversion case Op_CAST: diff --git a/reference_model/src/quant_util.h b/reference_model/src/quant_util.h index f07dd10..c595869 100644 --- a/reference_model/src/quant_util.h +++ b/reference_model/src/quant_util.h @@ -65,8 +65,8 @@ public: static int32_t apply_scale_16(int64_t value, int16_t multiplier, int32_t shift) { ASSERT_MSG(multiplier >= 0, "apply_scale_16() error: multiplier should >= 0 but is %d", multiplier); - ASSERT_MSG(value >= -(static_cast(1) << 47) && value < (static_cast(1) << 47), - "apply_scale_16() error: value should be within [-(1^47), 1^47]"); + ASSERT_MSG(shift >= 2 && shift <= 62, "apply_scale_16() error: shift should be within [2, 62] but is %d", + shift); int64_t round = 1L << (shift - 1); int64_t result = value * (int64_t)multiplier + round; result = result >> shift; diff --git a/reference_model/src/subgraph_traverser.cc b/reference_model/src/subgraph_traverser.cc index 5096ffa..1995b5c 100644 --- a/reference_model/src/subgraph_traverser.cc +++ b/reference_model/src/subgraph_traverser.cc @@ -499,7 +499,6 @@ int SubgraphTraverser::linkTensorsAndNodes() // For each node, read this list, link up the tensors with their inputs/outputs for (GraphNode* currNode : nodes) { - // Link inputs/consuming nodes for (std::string& name : currNode->getInputNames()) { @@ -566,36 +565,18 @@ int SubgraphTraverser::validateGraph() for (TosaReference::Tensor* currTensor : tensors) { - if (!currTensor->getProducer() && currTensor->getConsumers().empty()) - { - WARNING("Graph inconsistency: TosaReference::Tensor %s has no producers or consumers\n", - currTensor->getName().c_str()); - return 1; - } - - if (currTensor->getIsSubgraphInput()) + // It's okay for block input tensor not being consumed by operators. + // This is common in control flow op execution. + if (!currTensor->getIsSubgraphInput()) { - if (currTensor->getProducer() && currTensor->getProducer()->getOp() != Op_PLACEHOLDER) + if (!currTensor->getProducer() && currTensor->getConsumers().empty()) { - WARNING("Graph inconsistency: TosaReference::Tensor %s is a subgraph input and has a producer\n", + WARNING("Graph inconsistency: TosaReference::Tensor %s has no producers or consumers\n", currTensor->getName().c_str()); return 1; } } - // comment this check out as this is possible when graph have multiple output - // for example: - // %0 = add(%arg0, %arg1) - // %1 = mul(%arg0, %0) - // yields(%0, %1) - //if (currTensor->getIsSubgraphOutput()) { - // if (!currTensor->getConsumers().empty()) { - // WARNING ("Graph inconsistency: TosaReference::Tensor %s is a subgraph output and has a consumer\n", - // currTensor->getName().c_str()); - // return 1; - // } - //} - if (g_func_config.tosa_profile == 0) { DType dtype = currTensor->getDtype(); diff --git a/thirdparty/serialization_lib b/thirdparty/serialization_lib index 2364dcd..a8b4eaf 160000 --- a/thirdparty/serialization_lib +++ b/thirdparty/serialization_lib @@ -1 +1 @@ -Subproject commit 2364dcd7241d730021bf68e000e5a6411b9f09d1 +Subproject commit a8b4eafda31fe41b99a46c09c131ac7295382570 diff --git a/verif/tosa_serializer.py b/verif/tosa_serializer.py index 726ffc4..5ed9877 100644 --- a/verif/tosa_serializer.py +++ b/verif/tosa_serializer.py @@ -548,8 +548,6 @@ class TosaSerializer: tens = self.currBasicBlock.addTensor(name, shape, dtype, None, filename) # This is always an input to the block self.currBasicBlock.addInput(name) - # Add the operator now - self.currBasicBlock.addOperator(tosa.Op.Op().PLACEHOLDER, [], name) if vals is not None: np.save(os.path.join(self.pathPrefix, filename), vals, False) @@ -586,7 +584,6 @@ class TosaSerializer: return tens def addInputTensor(self, tensor): - self.currBasicBlock.addOperator(tosa.Op.Op().PLACEHOLDER, [], tensor.name) self.currBasicBlock.addTensor(tensor.name, tensor.shape, tensor.dtype) self.currBasicBlock.addInput(tensor.name) @@ -606,10 +603,8 @@ class TosaSerializer: def addOperator(self, op, inputs, outputs, attributes=None, quant_info=None): - if op == tosa.Op.Op().PLACEHOLDER or op == tosa.Op.Op().CONST: - raise Exception( - "Use addPlaceholderTensor() or addConstTensor() to add PLACEHOLDER and CONST ops" - ) + if op == tosa.Op.Op().CONST: + raise Exception("Use addConstTensor() to add CONST ops") return self.currBasicBlock.addOperator( op, inputs, outputs, attributes, quant_info diff --git a/verif/tosa_test_gen.py b/verif/tosa_test_gen.py index 7731a75..bc97f15 100644 --- a/verif/tosa_test_gen.py +++ b/verif/tosa_test_gen.py @@ -1710,10 +1710,101 @@ class TosaTestGen: else: raise Exception("OpArithmeticRightShift: invalid input dtype") else: - arr = self.getRandTensor(shapeList[0], dtypeList[idx]) + arr = self.getRandTensor(shape, dtypeList[idx]) placeholders.append(self.ser.addPlaceholder(shape, dtypeList[idx], arr)) tens.extend(placeholders) + elif op["op"] == Op.DIV: + assert ( + pCount == 2 and cCount == 0 + ), "Op.Div must have 2 placeholders, 0 consts" + + placeholders = [] + + # Two invalid cases for Op.DIV: + # 1. divisor == 0 + # 2. dividend == (1<<31) and divisor == -1 + while True: + dividend_arr = self.getRandTensor(shapeList[0], dtypeList[0]) + divisor_arr = self.getRandTensor(shapeList[1], dtypeList[1]) + + if (divisor_arr == 0).any(): + continue + + if (dividend_arr == (2 ** 31)).any() and (divisor_arr == -1).any(): + continue + + break + + placeholders.append( + self.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr) + ) + placeholders.append( + self.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr) + ) + + tens.extend(placeholders) + elif op["op"] == Op.MUL: + assert ( + pCount == 2 and cCount == 0 + ), "Op.MUL must have 2 placeholders, 0 consts" + + if dtypeList[0] == DType.FLOAT: + tens.extend(self.buildPlaceholderTensors(shapeList[:], dtypeList[:])) + else: + placeholders = [] + + # Make sure multiply result in int32 range + shift = testArgs[0] + if dtypeList[0] == DType.INT8: + num_bits = 8 + elif dtypeList[0] == DType.INT16: + num_bits = 16 + elif dtypeList[0] == DType.INT32: + num_bits = 32 + else: + raise Exception("OpMul: invalid input dtype") + + for idx, shape in enumerate(shapeList[:]): + low = -(2 ** (num_bits - 1)) + high = (2 ** (num_bits - 1)) - 1 + + a_arr = np.int32( + self.rng.integers(low=low, high=high, size=shapeList[0]) + ) + b_arr = np.int32( + self.rng.integers(low=low, high=high, size=shapeList[1]) + ) + + i = 0 + while True: + + a_arr_64 = a_arr.astype(np.int64) + b_arr_64 = b_arr.astype(np.int64) + + if shift > 0: + rounding = 1 << (shift - 1) + result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift + else: + result_arr = a_arr_64 * b_arr_64 + + if (result_arr > -(2 ** 31)).all() and ( + result_arr <= ((2 ** 31) - 1) + ).all(): + break + + i = i + 1 + a_arr = a_arr // 2 + b_arr = b_arr // 2 + + placeholders.append( + self.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr) + ) + placeholders.append( + self.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr) + ) + + tens.extend(placeholders) else: tens.extend( self.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount]) @@ -1858,7 +1949,6 @@ class TosaTestGen: "build_fcn": (build_argmax, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_NARROW_INT_FP, }, - "avg_pool2d": { "op": Op.AVG_POOL2D, "operands": (1, 0), @@ -1867,7 +1957,6 @@ class TosaTestGen: "qgen": TosaQuantGen.qgUnary, "types": TYPE_NARROW_INT_FP, }, - # Templated operator. Filled in by createDynamicOpLists "conv2d_TEMPLATE": { "op": Op.CONV2D, @@ -1878,9 +1967,7 @@ class TosaTestGen: "types": TYPE_CONV2D, "template": True, }, - # Conv3d TBD - # Templated operator. Filled in by createDynamicOpLists "depthwise_conv2d_TEMPLATE": { "op": Op.DEPTHWISE_CONV2D, @@ -1896,7 +1983,6 @@ class TosaTestGen: "types": TYPE_CONV2D, "template": True, }, - "fully_connected": { "op": Op.FULLY_CONNECTED, "operands": (1, 2), @@ -1905,7 +1991,6 @@ class TosaTestGen: "qgen": TosaQuantGen.qgConv, "types": TYPE_CONV2D, }, - "matmul": { "op": Op.MATMUL, "operands": (2, 0), @@ -1914,7 +1999,6 @@ class TosaTestGen: "qgen": TosaQuantGen.qgMatmul, "types": TYPE_NARROW_INT_FP, }, - "max_pool2d": { "op": Op.MAX_POOL2D, "operands": (1, 0), @@ -1922,7 +2006,6 @@ class TosaTestGen: "build_fcn": (build_pool2d, TosaTensorGen.tgNHWC, TosaArgGen.agPooling), "types": TYPE_NARROW_INT_FP, }, - # Templated operator. Filled in by createDynamicOpLists "transpose_conv2d_TEMPLATE": { "op": Op.TRANSPOSE_CONV2D, @@ -1937,7 +2020,6 @@ class TosaTestGen: "types": TYPE_CONV2D, "template": True, }, - # Activation functions "clamp": { "op": Op.CLAMP, @@ -1945,28 +2027,24 @@ class TosaTestGen: "build_fcn": (build_clamp, TosaTensorGen.tgBasic, None), "types": TYPE_NARROW_INT_FP, }, - "relun": { "op": Op.RELUN, "operands": (1, 0), "build_fcn": (build_relun, TosaTensorGen.tgBasic, None), "types": TYPE_FI32, }, - "sigmoid": { "op": Op.SIGMOID, "operands": (1, 0), "build_fcn": (build_sigmoid, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - "tanh": { "op": Op.TANH, "operands": (1, 0), "build_fcn": (build_tanh, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - # Elementwise Binary Operators "add": { "op": Op.ADD, @@ -1974,7 +2052,6 @@ class TosaTestGen: "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_FI32, }, - "arithmetic_right_shift": { "op": Op.ARITHMETIC_RIGHT_SHIFT, "operands": (2, 0), @@ -1985,98 +2062,90 @@ class TosaTestGen: ), "types": TYPE_INT, }, - "bitwise_and": { "op": Op.BITWISE_AND, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_INT, }, - "bitwise_or": { "op": Op.BITWISE_OR, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_INT, }, - "bitwise_xor": { "op": Op.BITWISE_XOR, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_INT, }, - + "div": { + "op": Op.DIV, + "operands": (2, 0), + "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), + "types": [DType.INT32], + }, "logical_and": { "op": Op.LOGICAL_AND, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_BOOL, }, - "logical_left_shift": { "op": Op.LOGICAL_LEFT_SHIFT, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_INT, }, - "logical_right_shift": { "op": Op.LOGICAL_RIGHT_SHIFT, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_INT, }, - "logical_or": { "op": Op.LOGICAL_OR, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_BOOL, }, - "logical_xor": { "op": Op.LOGICAL_XOR, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_BOOL, }, - "maximum": { "op": Op.MAXIMUM, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_FI32, }, - "minimum": { "op": Op.MINIMUM, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_FI32, }, - "mul": { "op": Op.MUL, "operands": (2, 0), "build_fcn": (build_mul, TosaTensorGen.tgBroadcastFuzz, TosaArgGen.agMul), "types": TYPE_INT_FP, }, - "pow": { "op": Op.POW, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - "sub": { "op": Op.SUB, "operands": (2, 0), "build_fcn": (build_binary_broadcast, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_FI32, }, - "table": { "op": Op.TABLE, # Use the automatic generation functions to create the input array @@ -2086,7 +2155,6 @@ class TosaTestGen: "build_fcn": (build_table, TosaTensorGen.tgBasic, None), "types": [DType.INT16], }, - # Elementwise Unary operators "abs": { "op": Op.ABS, @@ -2094,56 +2162,48 @@ class TosaTestGen: "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_FI32, }, - "bitwise_not": { "op": Op.BITWISE_NOT, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_INT, }, - "ceil": { "op": Op.CEIL, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - "clz": { "op": Op.CLZ, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": [DType.INT32], }, - "exp": { "op": Op.EXP, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - "floor": { "op": Op.FLOOR, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - "log": { "op": Op.LOG, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - "logical_not": { "op": Op.LOGICAL_NOT, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_BOOL, }, - "negate": { "op": Op.NEGATE, "operands": (1, 0), @@ -2151,21 +2211,18 @@ class TosaTestGen: "qgen": TosaQuantGen.qgUnary, "types": TYPE_INT_FP, }, - "reciprocal": { "op": Op.RECIPROCAL, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - "rsqrt": { "op": Op.RSQRT, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_FP, }, - # Elementwise Ternary operators "select": { "op": Op.SELECT, @@ -2173,7 +2230,6 @@ class TosaTestGen: "build_fcn": (build_select, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_FIB, }, - # Comparison operators "equal": { "op": Op.EQUAL, @@ -2181,21 +2237,18 @@ class TosaTestGen: "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_FI32, }, - "greater_equal": { "op": Op.GREATER_EQUAL, "operands": (2, 0), "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_FI32, }, - "greater": { "op": Op.GREATER, "operands": (2, 0), "build_fcn": (build_comparison, TosaTensorGen.tgBroadcastFuzz, None), "types": TYPE_FI32, }, - # Reduction operators "reduce_all": { "op": Op.REDUCE_ALL, @@ -2203,42 +2256,36 @@ class TosaTestGen: "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_BOOL, }, - "reduce_any": { "op": Op.REDUCE_ANY, "operands": (1, 0), "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_BOOL, }, - "reduce_max": { "op": Op.REDUCE_MAX, "operands": (1, 0), "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_INT_FP, }, - "reduce_min": { "op": Op.REDUCE_MAX, "operands": (1, 0), "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_INT_FP, }, - "reduce_product": { "op": Op.REDUCE_PRODUCT, "operands": (1, 0), "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_FP, }, - "reduce_sum": { "op": Op.REDUCE_SUM, "operands": (1, 0), "build_fcn": (build_reduce, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_FI32, }, - # Data layout operators "concat": { "op": Op.CONCAT, @@ -2246,7 +2293,6 @@ class TosaTestGen: "build_fcn": (build_concat, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_FIB, }, - "pad": { "op": Op.PAD, "operands": (1, 0), @@ -2254,35 +2300,30 @@ class TosaTestGen: "qgen": TosaQuantGen.qgPad, "types": TYPE_FIB, }, - "reshape": { "op": Op.RESHAPE, "operands": (1, 0), "build_fcn": (build_reshape, TosaTensorGen.tgBasic, TosaArgGen.agReshape), "types": TYPE_FIB, }, - "reverse": { "op": Op.REVERSE, "operands": (1, 0), "build_fcn": (build_reverse, TosaTensorGen.tgBasic, TosaArgGen.agAxis), "types": TYPE_FIB, }, - "slice": { "op": Op.SLICE, "operands": (1, 0), "build_fcn": (build_slice, TosaTensorGen.tgBasic, TosaArgGen.agSlice), "types": TYPE_FIB, }, - "tile": { "op": Op.TILE, "operands": (1, 0), "build_fcn": (build_tile, TosaTensorGen.tgBasic, TosaArgGen.agTile), "types": TYPE_FIB, }, - "transpose": { "op": Op.TRANSPOSE, "operands": (1, 0), @@ -2294,7 +2335,6 @@ class TosaTestGen: ), "types": TYPE_FIB, }, - # Data nodes "const": { "op": Op.CONST, @@ -2302,28 +2342,12 @@ class TosaTestGen: "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None), "types": TYPE_FIB, }, - "identity": { "op": Op.IDENTITY, "operands": (1, 0), "build_fcn": (build_unary, TosaTensorGen.tgBasic, None), "types": TYPE_FIB, }, - - "identityn": { - "op": Op.IDENTITYN, - "operands": (2, 0), - "build_fcn": (build_identityn, TosaTensorGen.tgBasic, None), - "types": TYPE_FIB, - }, - - "placeholder": { - "op": Op.PLACEHOLDER, - "operands": (1, 0), - "build_fcn": (build_placeholder, TosaTensorGen.tgBasic, None), - "types": TYPE_FIB, - }, - # Scatter/Gather "gather": { "op": Op.GATHER, @@ -2333,7 +2357,6 @@ class TosaTestGen: "build_fcn": (build_gather, TosaTensorGen.tgBasic, None), "types": TYPE_INT_FP, }, - "scatter": { "op": Op.SCATTER, # Only specify 'values_in' tensor here. @@ -2343,7 +2366,6 @@ class TosaTestGen: "build_fcn": (build_scatter, TosaTensorGen.tgScatter, None), "types": TYPE_INT_FP, }, - # Image operations "resize": { "op": Op.RESIZE, @@ -2352,7 +2374,6 @@ class TosaTestGen: "build_fcn": (build_resize, TosaTensorGen.tgNHWC, TosaArgGen.agResize), "types": [DType.INT8, DType.INT16, DType.FLOAT], }, - # Type conversion "cast": { "op": Op.CAST, @@ -2360,18 +2381,14 @@ class TosaTestGen: "build_fcn": (build_cast, TosaTensorGen.tgBasic, TosaArgGen.agCast), "types": [DType.FLOAT, DType.INT8, DType.INT16, DType.INT32, DType.BOOL], }, - "rescale": { "op": Op.RESCALE, "operands": (1, 0), "build_fcn": (build_rescale, TosaTensorGen.tgBasic, TosaArgGen.agRescale), "types": [DType.INT8, DType.INT16, DType.INT32, DType.INT48], }, - # Custom # Not implemented. - - # Control flow operators # Two varients of cond_if, one that generates one of two constant tensors (no # inputs to the basic blocks, one output) and another that either adds or subtracts two tensors -- cgit v1.2.1