21 #include <schema_generated.h> 23 #include <flatbuffers/flexbuffers.h> 25 #include <boost/assert.hpp> 26 #include <boost/format.hpp> 27 #include <boost/numeric/conversion/cast.hpp> 28 #include <boost/filesystem.hpp> 36 #define ARMNN_THROW_PARSE_EXCEPTION(msg) \ 38 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \ 40 << CHECK_LOCATION().AsString()).str()); \ 43 using namespace armnn;
50 const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
56 if (model.get() ==
nullptr)
60 boost::format(
"%1% was called with invalid (null) model. " 61 "Possible reason is that the model is not yet loaded and Unpack(ed). " 62 "subgraph:%2% at %3%") %
67 else if (subgraphIndex >= model->subgraphs.size())
71 boost::format(
"%1% was called with an invalid subgraph index. " 72 "subgraph:%2% at %3%") %
79 #define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \ 80 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION()) 87 if (model.get() ==
nullptr)
91 boost::format(
"%1% was called with invalid (null) model. " 92 "Possible reason is that the model is not yet loaded and Unpack(ed). " 93 "subgraph:%2% operator:%3% at %4%") %
99 else if (subgraphIndex >= model->subgraphs.size())
103 boost::format(
"%1% was called with an invalid subgraph index. " 104 "subgraph:%2% operator:%3% at %4%") %
110 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
111 operatorIndex != VIRTUAL_OPERATOR_ID)
115 boost::format(
"%1% was called with an invalid operator index. " 116 "subgraph:%2% operator:%3% at %4%") %
124 #define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \ 125 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION()) 128 size_t subgraphIndex,
134 BOOST_ASSERT_MSG(model.get() !=
nullptr,
"Expecting a valid model in this function");
138 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(),
"Expecting a valid subgraph index");
141 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
145 boost::format(
"%1% was called with an invalid tensor index. " 146 "subgraph:%2% tensor:%3% at %4%") %
154 #define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \ 155 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION()) 160 if (rawPtr ==
nullptr)
164 boost::format(
"%1% was called with a null tensor pointer. " 172 #define CHECK_TENSOR_PTR(TENSOR_PTR) \ 173 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION()) 179 if (model.get() ==
nullptr)
183 boost::format(
"%1% was called with invalid (null) model. " 184 "Possible reason is that the model is not yet loaded and Unpack(ed). " 185 "buffer:%2% at %3%") %
190 else if (bufferIndex >= model->buffers.size())
194 boost::format(
"%1% was called with an invalid buffer index. " 195 "buffer index:%2% at %3%") %
200 else if (model->buffers[bufferIndex].get() ==
nullptr)
204 boost::format(
"The buffer #%1% is null. %3%") %
210 #define CHECK_BUFFER(MODEL, BUFFER_INDEX) \ 211 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION()) 213 void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
218 if (bufferPtr ==
nullptr)
222 boost::format(
"BufferPtr is null for buffer:%1%. %2%") %
229 std::stringstream ss;
230 ss <<
"Buffer #" << bufferId <<
" has " << bufferPtr->data.size() <<
" bytes. " 231 <<
"For tensor: " << tensorInfo.
GetShape()
232 <<
" expecting: " << tensorInfo.
GetNumBytes() <<
" bytes and " 238 #define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \ 239 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION()) 243 switch(activationType)
245 case tflite::ActivationFunctionType_NONE:
246 case tflite::ActivationFunctionType_RELU:
247 case tflite::ActivationFunctionType_RELU6:
248 case tflite::ActivationFunctionType_TANH:
259 #define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \ 261 if (IsActivationSupported(OPTION->fused_activation_function) == false) \ 263 throw ParseException( \ 265 boost::format("TfLite parser doesn't suppport fused activation: " \ 266 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \ 267 OPTION->fused_activation_function % \ 268 tflite::EnumNameActivationFunctionType(\ 269 OPTION->fused_activation_function) % \ 273 CHECK_LOCATION().FileLine())); \ 278 std::vector<unsigned int> AsUnsignedVector(
const std::vector<int32_t> & in)
280 std::vector<unsigned int> result;
281 result.reserve(in.size());
293 uint32_t& paddingFront,
294 uint32_t& paddingBack,
295 tflite::Padding padding)
299 if (padding == tflite::Padding_SAME)
301 uint32_t outputSize = (inputSize + stride - 1) / stride;
302 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
303 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
304 if (temp > inputSize)
306 paddingFront = (temp - inputSize) / 2;
307 paddingBack = (temp - inputSize) - paddingFront;
318 switch (tensorPtr->type)
320 case tflite::TensorType_UINT8:
323 case tflite::TensorType_FLOAT32:
326 case tflite::TensorType_INT8:
327 if (tensorPtr->quantization->zero_point.size() == 1)
338 case tflite::TensorType_INT16:
341 case tflite::TensorType_INT32:
350 boost::format(
"Unsupported data type %1% = %2% for tensor: %3%. %4%") %
352 tflite::EnumNameTensorType(tensorPtr->type) %
357 std::vector<unsigned int> safeShape = shapes;
358 if (safeShape.size() == 0)
360 safeShape.push_back(1);
363 float quantizationScale = 0.0f;
364 int32_t quantizationOffset = 0;
366 if (tensorPtr->quantization.get())
368 if (tensorPtr->quantization->scale.size() <= 1)
373 if (tensorPtr->quantization->scale.size() == 1)
375 quantizationScale = tensorPtr->quantization->scale[0];
377 if (tensorPtr->quantization->zero_point.size() == 1)
394 std::vector<float> quantizationScales;
395 std::vector<int32_t> quantizationOffsets;
398 std::copy(tensorPtr->quantization->scale.begin(),
399 tensorPtr->quantization->scale.end(),
400 std::back_inserter(quantizationScales));
408 tensorPtr->quantization->quantized_dimension)]);
426 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
427 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
431 std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
432 CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
438 BOOST_ASSERT_MSG(tensorPtr !=
nullptr,
"tensorPtr is null");
439 BOOST_ASSERT_MSG(bufferPtr !=
nullptr,
441 boost::format(
"Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
449 reinterpret_cast<const T*
>(bufferPtr->data.data()), data.get(),
sizeof(T));
453 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.
GetNumBytes());
456 return std::make_pair(
ConstTensor(tensorInfo, data.get()), std::move(data));
469 if (actualSize != expected.size())
474 for (
unsigned int i = 0u; i < actualSize; i++)
476 if (expected[i] < 0 ||
477 actual[i] != static_cast<unsigned int>(expected[i]))
490 , m_Network(nullptr, nullptr)
491 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &
TfLiteParser::ParseUnsupportedOperator)
494 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
495 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
496 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
497 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
498 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
499 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
500 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
501 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
502 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
503 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
504 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
505 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
506 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
507 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
508 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
509 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
510 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
511 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
512 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
513 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
514 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
515 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
516 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
517 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
518 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
519 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
520 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
521 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
522 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
523 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
524 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
525 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
526 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
527 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
528 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
531 m_CustomParserFunctions[
"TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
534 void TfLiteParser::ResetParser()
538 m_SubgraphConnections.clear();
541 void TfLiteParser::AddBroadcastReshapeLayer(
size_t subgraphIndex,
542 size_t operatorIndex,
545 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
546 BOOST_ASSERT(layer !=
nullptr);
548 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
549 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
551 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
554 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
556 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
563 uint32_t
id = reshapedInputId;
564 reshapedInputId = inputId;
573 std::vector<unsigned> reshapedDim;
576 reshapedDim.push_back(reshapedTensorInfo.
GetShape()[i]);
579 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
580 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
584 std::string layerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
592 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
595 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
602 return CreateNetworkFromModel();
609 return CreateNetworkFromModel();
614 m_Network = INetwork::Create();
615 BOOST_ASSERT(m_Model.get() !=
nullptr);
617 bool failedToCreate =
false;
618 std::stringstream errors;
620 if (m_Model->subgraphs.size() != 1)
624 boost::format(
"Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
625 m_Model->subgraphs.size() %
629 size_t subgraphIndex = 0;
630 for (
SubgraphPtr const & subgraph : m_Model->subgraphs)
632 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
634 size_t operatorIndex = 0;
639 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
640 auto builtinCode = opCodePtr->builtin_code;
642 if (builtinCode > tflite::BuiltinOperator_MAX)
646 boost::format(
"Operator code %1% is out of range 0-%2%. " 647 "subgraph:%3% operator idx:%4%. %5%") %
649 tflite::BuiltinOperator_MAX %
656 auto & parserFunction = m_ParserFunctions[builtinCode];
657 (this->*parserFunction)(subgraphIndex, operatorIndex);
661 failedToCreate =
true;
662 std::stringstream errorString;
664 errorString <<
"Failed to parse operator #" << operatorIndex
665 <<
" within subgraph #" << subgraphIndex
666 <<
" error: " << e.
what();
669 errors << errorString.str() <<
"\n";
674 SetupInputLayers(subgraphIndex);
675 SetupOutputLayers(subgraphIndex);
676 SetupConstantLayers(subgraphIndex);
688 for (
size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
690 for (
size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
692 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot !=
nullptr)
694 for (
size_t inputSlotIdx = 0;
695 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
698 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
699 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
705 return std::move(m_Network);
708 void TfLiteParser::RegisterProducerOfTensor(
size_t subgraphIndex,
713 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
714 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
716 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
719 if (tensorSlots.outputSlot !=
nullptr)
722 boost::format(
"Another layer has already registered itself as the producer of " 723 "subgraph:%1% tensor:%2% %3%") %
729 tensorSlots.outputSlot = slot;
732 void TfLiteParser::RegisterConsumerOfTensor(
size_t subgraphIndex,
737 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
738 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
740 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
741 tensorSlots.inputSlots.push_back(slot);
744 void TfLiteParser::ParseCustomOperator(
size_t subgraphIndex,
size_t operatorIndex)
746 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
749 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
752 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
753 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
756 auto iterator = m_CustomParserFunctions.find(customCode);
757 if (iterator != m_CustomParserFunctions.end())
759 customParserFunction = iterator->second;
763 (this->*customParserFunction)(subgraphIndex, operatorIndex);
766 void TfLiteParser::ParseUnsupportedOperator(
size_t subgraphIndex,
size_t operatorIndex)
768 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
770 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
772 auto opcodeIndex = operatorPtr->opcode_index;
773 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
775 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
780 boost::format(
"Operator not supported. " 781 "subgraph:%1% operator:%2% " 782 "opcode_index:%3% opcode:%4% / %5% %6%") %
787 tflite::EnumNameBuiltinOperator(opcode) %
791 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
792 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
798 auto layerName = boost::str(boost::format(
"StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
801 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
802 for (
unsigned int i = 0u; i < numOutputs; ++i)
807 auto inputTensorIds = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
808 auto outputTensorIds = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
810 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
811 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
814 void TfLiteParser::ParseConv2D(
size_t subgraphIndex,
size_t operatorIndex)
816 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
818 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
819 const auto *
options = operatorPtr->builtin_options.AsConv2DOptions();
831 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
834 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
841 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
842 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
846 unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
847 unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
854 auto filterTensorAndData = CreateConstTensor(inputs[1],
859 auto layerName = boost::str(boost::format(
"Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
861 if (inputs.size() == 3)
865 auto biasTensorAndData = CreateConstTensor(inputs[2],
868 layer = m_Network->AddConvolution2dLayer(desc,
869 filterTensorAndData.first,
875 layer = m_Network->AddConvolution2dLayer(desc,
876 filterTensorAndData.first,
881 BOOST_ASSERT(layer !=
nullptr);
888 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
889 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
891 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
893 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
894 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
897 void TfLiteParser::ParseDepthwiseConv2D(
size_t subgraphIndex,
size_t operatorIndex)
899 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
901 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
902 const auto *
options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
913 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
915 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
927 unsigned int inputHeight = inputTensorInfo.
GetShape()[1];
928 unsigned int inputWidth = inputTensorInfo.
GetShape()[2];
931 unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
932 unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
935 filterTensorInfo.
SetShape({ filterHeight,
945 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
947 auto layerName = boost::str(boost::format(
"DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
949 if (inputs.size() == 3)
953 auto biasTensorAndData = CreateConstTensor(inputs[2],
956 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
957 filterTensorAndData.first,
963 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
964 filterTensorAndData.first,
968 BOOST_ASSERT(layer !=
nullptr);
975 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
976 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
978 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
980 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
981 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
984 void TfLiteParser::ParseDequantize(
size_t subgraphIndex,
size_t operatorIndex)
986 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
988 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
991 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
994 auto layerName = boost::str(boost::format(
"Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
997 BOOST_ASSERT(layer !=
nullptr);
1002 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1003 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1005 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1006 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1009 void TfLiteParser::ParseTranspose(
size_t subgraphIndex,
size_t operatorIndex)
1011 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1013 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1016 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1020 auto layerName = boost::str(boost::format(
"Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1024 if (inputs.size() == 2)
1029 std::vector<unsigned int> permuteShape(numPermVecElements);
1030 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.
GetNumBytes());
1036 layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
1038 BOOST_ASSERT(layer !=
nullptr);
1041 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1043 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1044 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1046 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1047 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1050 void TfLiteParser::ParseTransposeConv(
size_t subgraphIndex,
size_t operatorIndex)
1052 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1054 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1055 const auto *
options = operatorPtr->builtin_options.AsTransposeConvOptions();
1063 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1066 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1073 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1074 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1076 const unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
1077 const unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
1095 auto filterTensorAndData = CreateConstTensor(inputs[1],
1100 auto layerName = boost::str(boost::format(
"TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1102 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1103 filterTensorAndData.first,
1107 BOOST_ASSERT(layer !=
nullptr);
1113 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1114 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
1116 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1117 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1120 void TfLiteParser::ParseAveragePool2D(
size_t subgraphIndex,
size_t operatorIndex)
1122 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1125 void TfLiteParser::ParseBatchToSpaceND(
size_t subgraphIndex,
size_t operatorIndex)
1127 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1129 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1132 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1141 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1142 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1144 std::vector<unsigned int> cropsVector(cropsTensorInfo.
GetNumElements());
1145 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.
GetNumBytes());
1148 std::vector<std::pair<unsigned int, unsigned int>> crops;
1149 for (
unsigned int i = 0; i < cropsTensorInfo.
GetNumElements() / step; ++i)
1151 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1161 auto layerName = boost::str(boost::format(
"BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1162 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1166 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1167 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1169 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1170 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1173 void TfLiteParser::ParseL2Normalization(
size_t subgraphIndex,
size_t operatorIndex)
1175 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1177 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1180 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1185 auto layerName = boost::str(boost::format(
"L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1186 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1188 BOOST_ASSERT(layer !=
nullptr);
1193 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1194 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1196 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1197 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1200 void TfLiteParser::ParseMaxPool2D(
size_t subgraphIndex,
size_t operatorIndex)
1202 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1205 void TfLiteParser::ParseMaximum(
size_t subgraphIndex,
size_t operatorIndex)
1207 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1209 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1212 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1218 auto layerName = boost::str(boost::format(
"Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1224 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1225 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1227 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1231 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1234 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1235 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1238 void TfLiteParser::ParseMinimum(
size_t subgraphIndex,
size_t operatorIndex)
1240 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1242 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1245 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1251 auto layerName = boost::str(boost::format(
"Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1257 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1258 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1260 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1264 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1267 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1268 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1271 void TfLiteParser::ParsePool(
size_t subgraphIndex,
1272 size_t operatorIndex,
1275 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1277 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1278 const auto *
options = operatorPtr->builtin_options.AsPool2DOptions();
1282 std::string layerName;
1286 case PoolingAlgorithm::Average:
1288 boost::str(boost::format(
"AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1290 case PoolingAlgorithm::Max:
1292 boost::str(boost::format(
"MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1295 BOOST_ASSERT_MSG(
false,
"Unsupported Pooling Algorithm");
1309 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1314 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1315 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1322 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1325 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1327 BOOST_ASSERT(layer !=
nullptr);
1330 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1334 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1335 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1337 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1339 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1340 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1343 void TfLiteParser::ParseSlice(
size_t subgraphIndex,
size_t operatorIndex)
1345 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1347 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1349 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1358 std::vector<unsigned int> begin(beginTensorInfo.
GetNumElements());
1359 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.
GetNumBytes());
1365 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1366 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1369 auto layerName = boost::str(boost::format(
"Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1370 IConnectableLayer*
const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1377 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1378 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1381 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1382 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1385 void TfLiteParser::ParseSoftmax(
size_t subgraphIndex,
size_t operatorIndex)
1387 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1388 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1389 const auto *
options = operatorPtr->builtin_options.AsSoftmaxOptions();
1394 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1396 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1399 auto layerName = boost::str(boost::format(
"Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1400 IConnectableLayer*
const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1407 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1408 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1411 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1412 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1415 void TfLiteParser::ParseSpaceToBatchND(
size_t subgraphIndex,
size_t operatorIndex)
1417 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1419 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1422 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1431 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1432 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1434 std::vector<unsigned int> padListVector(padListTensorInfo.
GetNumElements());
1435 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.
GetNumBytes());
1438 std::vector<std::pair<unsigned int, unsigned int>> padList;
1439 for (
unsigned int i = 0; i < padListTensorInfo.
GetNumElements() / step; ++i)
1441 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1451 auto layerName = boost::str(boost::format(
"SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1452 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1456 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1457 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1459 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1460 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1467 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1468 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1472 std::stringstream ss;
1473 ss <<
"Input tensor has unexpected number of dimensions:" << inputTensorInfo.
GetNumDimensions()
1474 <<
" shape:" << inputTensorInfo.
GetShape() <<
" " 1479 if (squeezeDims.empty())
1481 squeezeDims.assign(dimensionSequence,
1485 std::vector<uint32_t> outputDims;
1488 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1489 auto currentDimension = inputTensorInfo.
GetShape()[i];
1490 if (skipSqueeze || currentDimension != 1)
1492 outputDims.push_back(currentDimension);
1496 if (outputDims.size() > 4)
1498 std::stringstream ss;
1499 ss <<
"Output tensor has unexpected number of dimensions:" << inputTensorInfo.
GetNumDimensions()
1500 <<
" shape:" << inputTensorInfo.
GetShape() <<
" " 1512 return outTensorInfo;
1515 void TfLiteParser::ParseSqueeze(
size_t subgraphIndex,
size_t operatorIndex)
1517 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1519 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1522 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1525 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1526 const auto *
options = operatorPtr->builtin_options.AsSqueezeOptions();
1536 auto layerName = boost::str(boost::format(
"Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1537 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1540 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1541 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1543 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1544 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1547 void TfLiteParser::ParseStridedSlice(
size_t subgraphIndex,
size_t operatorIndex)
1549 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1551 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1554 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1557 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1558 const auto *
options = operatorPtr->builtin_options.AsStridedSliceOptions();
1572 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.
GetNumBytes());
1577 std::vector<int> end(endTensorInfo.GetNumElements());
1578 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1583 std::vector<int> stride(strideTensorInfo.GetNumElements());
1584 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1590 auto layerName = boost::str(boost::format(
"StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1591 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1596 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1597 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1599 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1600 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1603 void TfLiteParser::ParseSub(
size_t subgraphIndex,
size_t operatorIndex)
1605 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1607 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1608 const auto *
options = operatorPtr->builtin_options.AsSubOptions();
1610 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1613 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1619 auto layerName = boost::str(boost::format(
"Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1625 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1626 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1628 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1632 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1635 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1637 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1638 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1641 void TfLiteParser::ParseAdd(
size_t subgraphIndex,
size_t operatorIndex)
1643 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1645 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1646 const auto *
options = operatorPtr->builtin_options.AsAddOptions();
1648 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1651 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1657 auto layerName = boost::str(boost::format(
"Add:%1%:%2%") % subgraphIndex % operatorIndex);
1663 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1664 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1666 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1670 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1673 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1675 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1676 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1679 void TfLiteParser::ParseMul(
size_t subgraphIndex,
size_t operatorIndex)
1681 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1683 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1684 const auto *
options = operatorPtr->builtin_options.AsMulOptions();
1686 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1689 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1695 auto layerName = boost::str(boost::format(
"Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1696 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1701 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1702 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1704 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1708 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1711 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1713 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1714 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1717 void TfLiteParser::ParseMean(
size_t subgraphIndex,
size_t operatorIndex)
1719 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1721 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1723 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1730 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1731 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1741 auto layerName = boost::str(boost::format(
"Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1746 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1747 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1749 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1750 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1753 void TfLiteParser::ParsePad(
size_t subgraphIndex,
size_t operatorIndex)
1755 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1765 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1766 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1770 for (
unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1772 desc.
m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1775 auto layerName = boost::str(boost::format(
"Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1781 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1782 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1784 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1785 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1788 void TfLiteParser::ParseQuantize(
size_t subgraphIndex,
size_t operatorIndex)
1790 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1792 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1795 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1798 auto layerName = boost::str(boost::format(
"Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1801 BOOST_ASSERT(layer !=
nullptr);
1806 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1807 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1809 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1810 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1813 void TfLiteParser::ParseRelu(
size_t subgraphIndex,
size_t operatorIndex)
1815 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
1818 void TfLiteParser::ParseRelu6(
size_t subgraphIndex,
size_t operatorIndex)
1820 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1823 void TfLiteParser::ParseLogistic(
size_t subgraphIndex,
size_t operatorIndex)
1825 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1828 void TfLiteParser::ParseTanH(
size_t subgraphIndex,
size_t operatorIndex)
1830 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1834 void TfLiteParser::ParseActivation(
size_t subgraphIndex,
size_t operatorIndex,
ActivationFunction activationType)
1836 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1837 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1840 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1843 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1846 auto layerName = str(boost::format(
"Activation:"));
1850 switch (activationType)
1852 case ActivationFunction::ReLu:
1854 layerName += str(boost::format(
"RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1857 case ActivationFunction::BoundedReLu:
1859 layerName += str(boost::format(
"RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1860 activationDesc.
m_A = 6.0f;
1861 activationDesc.
m_B = 0.0f;
1864 case ActivationFunction::Sigmoid:
1866 layerName += str(boost::format(
"SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1869 case ActivationFunction::TanH:
1871 layerName += str(boost::format(
"TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1872 activationDesc.
m_A = 1.0f;
1873 activationDesc.
m_B = 1.0f;
1879 boost::str(boost::format(
"Unexpected ActivationFunction[%1%] when creating layerName " 1880 " %2% ") %static_cast<int>(activationType)%
CHECK_LOCATION().AsString()));
1884 IConnectableLayer*
const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1891 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1892 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1895 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1896 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1899 const std::vector<int32_t> & targetDimsIn)
1901 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1902 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1904 if (stretchDim != targetDimsIn.end())
1906 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1910 boost::format(
"At most one component of shape can be -1 %1%") %
CHECK_LOCATION().AsString()));
1913 auto targetNumElements =
1915 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1917 auto stretchIndex =
static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1918 outputDims[stretchIndex] = inputTensorInfo.
GetNumElements() / targetNumElements;
1929 void TfLiteParser::ParseReshape(
size_t subgraphIndex,
size_t operatorIndex)
1931 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1933 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1935 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1938 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1939 const auto *
options = operatorPtr->builtin_options.AsReshapeOptions();
1944 std::vector<int32_t> targetShape;
1945 if (inputs.size() > 1 && inputs[1] !=
nullptr)
1950 "Only one method expected");
1953 if (inputs[1]->is_variable)
1958 if (inputs[1]->shape.size() != 1)
1963 if (inputs[1]->type != tflite::TensorType_INT32)
1968 auto bufferPtr =
GetBuffer(m_Model, inputs[1]->buffer);
1969 auto vals =
reinterpret_cast<const int32_t*
>(bufferPtr->data.data());
1970 for (
int i=0; i < inputs[1]->shape[0]; i++)
1972 targetShape.push_back(vals[i]);
1980 "At least one method required");
1983 targetShape =
options->new_shape;
1991 if (inputs.size() > 1 && !
CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
1993 std::stringstream ss;
1994 ss <<
"New shape defined in reshape parameters " 1995 << reshapeOutputTensorShape
1996 <<
" does not equal output shape " 1997 << actualOutputTensorInfo.
GetShape()
2006 auto layerName = boost::str(boost::format(
"Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
2007 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
2010 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2011 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2013 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2014 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2017 void TfLiteParser::ParseResizeBilinear(
size_t subgraphIndex,
size_t operatorIndex)
2019 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2022 void TfLiteParser::ParseResizeNearestNeighbor(
size_t subgraphIndex,
size_t operatorIndex)
2024 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2027 void TfLiteParser::ParseResize(
size_t subgraphIndex,
size_t operatorIndex,
ResizeMethod resizeMethod)
2029 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2031 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2034 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2040 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2043 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2047 desc.m_TargetHeight =
static_cast<uint32_t
> (sizeTensorData[0]);
2048 desc.m_TargetWidth =
static_cast<uint32_t
> (sizeTensorData[1]);
2051 auto layerName = str(boost::format(
"Resize:"));
2053 switch (resizeMethod)
2055 case ResizeMethod::Bilinear:
2057 layerName += str(boost::format(
"BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
2059 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2060 const auto *
options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2062 desc.m_BilinearAlignCorners =
options->align_corners;
2065 case ResizeMethod::NearestNeighbor:
2067 layerName += str(boost::format(
"NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2073 boost::str(boost::format(
"Unexpected ResizeMethod[%1%] when creating layerName " 2074 " %2% ") %static_cast<int>(resizeMethod)%
CHECK_LOCATION().AsString()));
2083 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2084 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2086 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2087 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2090 void TfLiteParser::ParseConcatenation(
size_t subgraphIndex,
size_t operatorIndex)
2092 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2094 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2095 const auto *
options = operatorPtr->builtin_options.AsConcatenationOptions();
2099 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2100 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2103 unsigned int numConcatView =
static_cast<unsigned int>(inputs.size());
2106 const unsigned int concatDimInput =
static_cast<unsigned int>(
2107 (
static_cast<int>(inputRank) +
options->axis) %
static_cast<int>(inputRank));
2109 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2112 unsigned int mergeDimOrigin = 0;
2114 for (
unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2120 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
2123 auto layerName = boost::str(boost::format(
"Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
2124 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
2126 BOOST_ASSERT(layer !=
nullptr);
2129 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2133 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2136 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
2138 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2139 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2142 void TfLiteParser::ParseFullyConnected(
size_t subgraphIndex,
size_t operatorIndex)
2144 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2146 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2147 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2155 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2156 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2162 int32_t weightsDimension =
static_cast<int32_t
>(filterTensorInfo.GetNumDimensions());
2163 if (weightsDimension != 2)
2168 "Dimension %1% for Fully Connected weights is not supported by Armnn. " 2174 auto filterTensorAndData = CreateConstTensor(inputs[1],
2178 auto layerName = boost::str(boost::format(
"FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2180 if (inputs.size() == 3)
2184 auto biasTensorAndData = CreateConstTensor(inputs[2],
2187 layer = m_Network->AddFullyConnectedLayer(desc,
2188 filterTensorAndData.first,
2194 layer = m_Network->AddFullyConnectedLayer(desc,
2195 filterTensorAndData.first,
2199 BOOST_ASSERT(layer !=
nullptr);
2203 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2211 std::vector<unsigned int> reshapedDimensions(2);
2212 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2213 reshapedDimensions[0] = inputTensorInfo.
GetNumElements() / reshapedDimensions[1];
2215 if (inputTensorInfo.
GetNumElements() % reshapedDimensions[1] != 0)
2220 "Failed to deduce input tensor shape from filter size %1%")
2221 % reshapedDimensions[1]
2228 std::string reshapeLayerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
2230 desc.m_TargetShape = reshapedTensorInfo.
GetShape();
2236 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2242 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2250 options->fused_activation_function);
2253 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2254 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2257 void TfLiteParser::ParseDetectionPostProcess(
size_t subgraphIndex,
size_t operatorIndex)
2259 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2261 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2263 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2264 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2268 auto custom_options = operatorPtr->custom_options;
2269 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2278 desc.
m_ScaleH = m[
"h_scale"].AsFloat();
2279 desc.
m_ScaleW = m[
"w_scale"].AsFloat();
2280 desc.
m_ScaleX = m[
"x_scale"].AsFloat();
2281 desc.
m_ScaleY = m[
"y_scale"].AsFloat();
2283 if (!(m[
"use_regular_nms"].IsNull()))
2287 if (!(m[
"detections_per_class"].IsNull()))
2295 "must be positive and less than or equal to 1.");
2299 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2302 auto layerName = boost::str(boost::format(
"DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2303 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2306 BOOST_ASSERT(layer !=
nullptr);
2311 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2312 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2313 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2314 m_OverridenOutputShapes.push_back({ 1 });
2316 for (
unsigned int i = 0 ; i < outputs.size() ; ++i)
2324 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2325 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2328 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2329 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2330 outputTensorIndexes[1],
2331 outputTensorIndexes[2],
2332 outputTensorIndexes[3]});
2336 void TfLiteParser::ParsePack(
size_t subgraphIndex,
size_t operatorIndex)
2338 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2340 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2341 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2344 if (inputs.size() < 1)
2349 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2350 const auto*
options = operatorPtr->builtin_options.AsPackOptions();
2354 desc.
m_NumInputs =
static_cast<uint32_t
>(inputs.size());
2360 auto layerName = boost::str(boost::format(
"Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2363 BOOST_ASSERT(layer !=
nullptr);
2368 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2369 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2371 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2372 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2375 void TfLiteParser::ParseUnpack(
size_t subgraphIndex,
size_t operatorIndex)
2377 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2379 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2380 const auto *
options = operatorPtr->builtin_options.AsUnpackOptions();
2385 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2390 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2395 "The unpack axis: %1% cannot be greater than or equal to " 2396 "the number of input dimension %2% %3%")
2398 % inputTensorInfo.GetNumDimensions()
2406 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2412 throw ParseException(
"Number to unpack must greater than zero.");
2415 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2418 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2419 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2422 for (
unsigned int i = 0; i < inputDimSize; ++i)
2424 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2427 if (unpackDimSizes[unpackAxis] != unpackNum)
2429 throw ParseException(
"Number to unpack must be the same as length of the dimension to " 2433 unpackDimSizes[unpackAxis] /= unpackNum;
2435 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2436 for (
unsigned int j = 0; j < unpackNum; ++j)
2439 for (
unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2441 splitDesc.
SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2446 auto layerName = boost::str(boost::format(
"Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2447 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2450 unpackDimSizes.data());
2452 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2453 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2459 std::string reshapeLayerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
2474 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2478 void TfLiteParser::ParseSplit(
size_t subgraphIndex,
size_t operatorIndex)
2480 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2482 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2483 const auto *
options = operatorPtr->builtin_options.AsSplitOptions();
2490 throw ParseException(
"Number to splits must greater than zero.");
2493 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2495 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2502 std::vector<unsigned int> axisData(axisTensorInfo.
GetNumElements());
2503 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.
GetNumBytes());
2506 const unsigned int splitDim = axisData[0];
2508 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2514 "The number of dimensions: %1% for input tensors of the " 2515 "split op cannot be greater than %2% %3%")
2516 % inputTensorInfo.GetNumDimensions()
2521 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2524 for (
unsigned int i = 0; i < inputDimSize; ++i)
2526 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2529 if (splitterDimSizes[splitDim] % numSplits != 0)
2531 throw ParseException(
"Number of splits must evenly divide the dimension");
2533 splitterDimSizes[splitDim] /= numSplits;
2536 for (
unsigned int j = 0; j < numSplits; ++j)
2539 for (
unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2541 splitDesc.
SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2546 auto layerName = boost::str(boost::format(
"Split:%1%:%2%") % subgraphIndex % operatorIndex);
2547 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2549 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2550 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
2558 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2559 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2563 unsigned int outputSlot,
2564 tflite::ActivationFunctionType activationType)
2567 std::string layerName = prevLayer->
GetName();
2569 switch(activationType)
2571 case tflite::ActivationFunctionType_NONE:
2576 case tflite::ActivationFunctionType_RELU:
2578 activationDesc.
m_Function = ActivationFunction::ReLu;
2579 layerName +=
":RELU";
2582 case tflite::ActivationFunctionType_RELU6:
2584 activationDesc.
m_Function = ActivationFunction::BoundedReLu;
2585 activationDesc.
m_A = 6.0f;
2586 activationDesc.
m_B = 0.0f;
2587 layerName +=
":RELU6";
2590 case tflite::ActivationFunctionType_TANH:
2592 activationDesc.
m_Function = ActivationFunction::TanH;
2593 activationDesc.
m_A = 1.0f;
2594 activationDesc.
m_B = 1.0f;
2595 layerName +=
":TANH";
2600 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2601 case tflite::ActivationFunctionType_SIGN_BIT:
2606 boost::format(
"TfLite parser doesn't suppport fused activation: " 2609 tflite::EnumNameActivationFunctionType(activationType) %
2616 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2618 auto & prevOutputSlot = prevLayer->
GetOutputSlot(outputSlot);
2621 return activationLayer;
2626 if (fileName ==
nullptr)
2631 boost::system::error_code errorCode;
2632 boost::filesystem::path pathToFile(fileName);
2633 if (!boost::filesystem::exists(pathToFile, errorCode))
2636 std::string msg = boost::str(boost::format(
"Cannot find the file (%1%) errorCode: %2% %3%") %
2642 std::ifstream file(fileName, std::ios::binary);
2643 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2645 fileContent.size());
2650 if (binaryContent ==
nullptr)
2655 flatbuffers::Verifier verifier(binaryContent, len);
2656 if (verifier.VerifyBuffer<tflite::Model>() ==
false)
2659 boost::str(boost::format(
"Buffer doesn't conform to the expected Tensorflow Lite " 2660 "flatbuffers format. size:%1% %2%") %
2664 return tflite::UnPackModel(binaryContent);
2668 size_t subgraphIndex,
2669 size_t operatorIndex)
2673 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2674 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2676 size_t inputCount = operatorPtr->inputs.size();
2678 for (
size_t i=0; i<inputCount; ++i)
2681 result[i] = subgraphPtr->tensors[inputId].get();
2687 size_t subgraphIndex,
2688 size_t operatorIndex)
2692 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2693 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2695 size_t outputCount = operatorPtr->outputs.size();
2697 for (
size_t i=0; i<outputCount; ++i)
2701 result[i] = subgraphPtr->tensors[outputId].get();
2707 size_t subgraphIndex)
2710 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2712 size_t inputCount = subgraphPtr->inputs.size();
2714 for (
size_t i=0; i<inputCount; ++i)
2718 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
2724 size_t subgraphIndex)
2727 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2729 size_t outputCount = subgraphPtr->outputs.size();
2731 for (
size_t i=0; i<outputCount; ++i)
2734 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
2740 size_t subgraphIndex,
2741 size_t operatorIndex)
2744 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2745 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2746 return operatorPtr->inputs;
2750 size_t subgraphIndex,
2751 size_t operatorIndex)
2754 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2755 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2756 return operatorPtr->outputs;
2759 void TfLiteParser::RegisterInputSlots(
size_t subgraphIndex,
2760 size_t operatorIndex,
2762 const std::vector<unsigned int>& tensorIndexes)
2764 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2765 BOOST_ASSERT(layer !=
nullptr);
2769 boost::str(boost::format(
"The number of tensor inputs (%1%) does not match the number expected (%2%)" 2770 " for subgraph:%3% operator index:%4% %5%") %
2771 tensorIndexes.size() %
2778 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumInputSlots(); ++slotIndex)
2780 unsigned int tensorIndex = tensorIndexes[slotIndex];
2782 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2786 void TfLiteParser::RegisterOutputSlots(
size_t subgraphIndex,
2787 size_t operatorIndex,
2789 const std::vector<unsigned int>& tensorIndexes)
2791 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2792 BOOST_ASSERT(layer !=
nullptr);
2796 boost::str(boost::format(
"The number of tensor outputs (%1%) does not match the number expected (%2%)" 2797 " for subgraph:%3% operator index:%4% %5%") %
2798 tensorIndexes.size() %
2805 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumOutputSlots(); ++slotIndex)
2807 unsigned int tensorIndex = tensorIndexes[slotIndex];
2809 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2813 void TfLiteParser::SetupInputLayers(
size_t subgraphIndex)
2818 for (
auto const & tensorIdAndPtr : inputs)
2820 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2822 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2827 RegisterOutputSlots(subgraphIndex,
2828 VIRTUAL_OPERATOR_ID,
2830 {
static_cast<uint32_t
>(tensorIdAndPtr.first) });
2834 void TfLiteParser::SetupOutputLayers(
size_t subgraphIndex)
2839 for (
auto const & tensorIdAndPtr : outputs)
2841 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2843 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2845 RegisterInputSlots(subgraphIndex,
2846 VIRTUAL_OPERATOR_ID,
2848 {
static_cast<uint32_t
>(tensorIdAndPtr.first) });
2852 void TfLiteParser::SetupConstantLayers(
size_t subgraphIndex)
2856 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
2857 for (
unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2859 for (
unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2861 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot ==
nullptr &&
2862 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2864 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
2866 auto tensorAndData = CreateConstTensor(tensorPtr,
2870 std::string layerName = boost::str(boost::format(
"Constant:%1%") % tensorPtr->name);
2872 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2875 RegisterOutputSlots(subgraphIndex,
2876 VIRTUAL_OPERATOR_ID,
2889 return model->buffers[bufferIndex].get();
2892 template<
typename T>
2893 std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2899 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2903 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2904 return std::make_pair(constData.first, std::move(storage));
2907 std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2908 TfLiteParser::CreateConstTensor(
TensorRawPtr tensorPtr,
2913 auto bufferPtr =
GetBuffer(m_Model, tensorPtr->buffer);
2919 return CreateConstTensorAndStoreData<float>(bufferPtr,
2924 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2929 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2934 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2939 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2945 std::stringstream errString;
2946 errString <<
"Unexpected datatype when creating const tensor: " 2948 <<
" shape:" << tensorInfo.GetShape()
2956 const std::string& name)
const 2960 for (
auto const & input : inputs)
2962 if (input.second->name == name)
2964 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2965 return std::make_pair(bindingId,
ToTensorInfo(input.second));
2969 std::stringstream bindings;
2970 for (
auto const & input : inputs)
2972 bindings <<
"'" << input.second->name <<
"' ";
2977 boost::format(
"No input binding found for subgraph:%1% and name:%2%. " 2978 "Possible inputs are: [%3%] %4%") %
2986 const std::string& name)
const 2990 for (
unsigned int i = 0; i < outputs.size(); ++i)
2992 auto const output = outputs[i];
2993 if (output.second->name == name)
2995 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
2996 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2997 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2998 return std::make_pair(bindingId,
ToTensorInfo(output.second, shape));
3002 std::stringstream bindings;
3003 for (
auto const & output : outputs)
3005 bindings <<
"'" << output.second->name <<
"' ";
3010 boost::format(
"No output binding found for subgraph:%1% and name:%2%. " 3011 "Possible outputs are: [%3%] %4%") %
3020 return m_Model->subgraphs.size();
3027 std::vector<std::string> result;
3028 result.reserve(inputs.size());
3029 for (
auto const & input : inputs)
3031 result.push_back(input.second->name);
3040 std::vector<std::string> result;
3041 result.reserve(outputs.size());
3042 for (
auto const & output : outputs)
3044 result.push_back(output.second->name);
3064 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<
float[]> && data)
3065 : m_FloatData(std::move(data))
3066 , m_Uint8Data(
nullptr)
3067 , m_Int8Data(
nullptr)
3068 , m_Int32Data(
nullptr)
3072 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3073 : m_FloatData(
nullptr)
3074 , m_Uint8Data(std::move(data))
3075 , m_Int8Data(
nullptr)
3076 , m_Int32Data(
nullptr)
3080 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3081 : m_FloatData(
nullptr)
3082 , m_Uint8Data(
nullptr)
3083 , m_Int8Data(std::move(data))
3084 , m_Int32Data(
nullptr)
3088 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3089 : m_FloatData(
nullptr)
3090 , m_Uint8Data(
nullptr)
3091 , m_Int8Data(
nullptr)
3092 , m_Int32Data(std::move(data))
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile) override
Create the network from a flatbuffers binary file on disk.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId, const std::string &name) const override
Retrieve binding info (layer id and tensor info) for the network output identified by the given layer...
uint32_t m_Axis
0-based axis along which to stack the input tensors.
std::unique_ptr< tflite::SubGraphT > SubgraphPtr
static BufferRawPtr GetBuffer(const ModelPtr &model, size_t bufferIndex)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
#define ARMNN_THROW_PARSE_EXCEPTION(msg)
const TensorShape & GetShape() const
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector< uint8_t > &binaryContent) override
Create the network from a flatbuffers binary.
std::string AsString() const
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
std::vector< TensorRawPtr > TensorRawPtrVector
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
uint32_t m_PoolWidth
Pooling width value.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
static ModelPtr LoadModelFromFile(const char *fileName)
bool m_BiasEnabled
Enable/disable bias.
unsigned int GetNumBytes() const
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Beta
Exponentiation value.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
#define CHECK_BUFFER(MODEL, BUFFER_INDEX)
static TensorRawPtrVector GetInputs(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
virtual const char * what() const noexcept override
#define ARMNN_LOG(severity)
uint32_t m_PadTop
Padding top value in the height dimension.
void ProcessConcatInputTensorInfo(armnn::TensorInfo &inputTensorInfo, armnn::OriginsDescriptor &concatDescriptor, const unsigned int &concatAxis, unsigned int inputIndex, unsigned int &mergeDimOrigin)
void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
uint32_t m_PadRight
Padding right value in the width dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
virtual BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId, const std::string &name) const override
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer ...
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
virtual size_t GetSubgraphCount() const override
Return the number of subgraphs in the parsed model.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
static armnn::TensorInfo OutputShapeOfReshape(const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
static ITfLiteParserPtr Create(const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
constexpr const char * GetDataTypeName(DataType dataType)
void SetShape(const TensorShape &newShape)
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
static TensorRawPtrVector GetOutputs(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< onnx::ModelProto > ModelPtr
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void CheckTensor(const ConstTensor &t)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
bool CheckShape(const armnn::TensorShape &actual, const std::vector< uint32_t > &expected)
float m_NmsIouThreshold
Intersection over union threshold.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::string FileLine() const
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
armnnSerializer::TensorInfo * TensorRawPtr
std::vector< unsigned int > m_BlockShape
Block shape values.
static void Destroy(ITfLiteParser *parser)
An output connection slot for a layer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
static std::vector< int32_t > & GetOutputTensorIds(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
DataType GetDataType() const
An OriginsDescriptor for the ConcatLayer.
bool has_value() const noexcept
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
static ITfLiteParser * CreateRaw(const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
#define CHECK_VALID_SIZE(ACTUAL,...)
uint32_t m_NumClasses
Number of classes.
#define CHECKED_NON_NEGATIVE(VALUE)
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
uint32_t m_NumInputs
Number of input tensors.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
static std::vector< int32_t > & GetInputTensorIds(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
#define CHECK_TENSOR_PTR(TENSOR_PTR)
float m_ScaleH
Center size encoding scale height.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_PadLeft
Padding left value in the width dimension.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
static armnn::TensorInfo OutputShapeOfSqueeze(const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
unsigned int GetNumDimensions() const
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
const tflite::BufferT * BufferRawPtr
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
virtual std::vector< std::string > GetSubgraphInputTensorNames(size_t subgraphId) const override
Return the input tensor names for a given subgraph.
armnn::BindingPointInfo BindingPointInfo
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual const char * GetName() const =0
Returns the name of the layer.
virtual std::vector< std::string > GetSubgraphOutputTensorNames(size_t subgraphId) const override
Return the output tensor names for a given subgraph.
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
std::unique_ptr< tflite::ModelT > ModelPtr
unsigned int GetNumDimensions() const
#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
A SoftmaxDescriptor for the SoftmaxLayer.
const tflite::TensorT * TensorRawPtr
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
std::unique_ptr< tflite::OperatorT > OperatorPtr
TfLiteParser(const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
constexpr unsigned int MaxNumOfTensorDimensions
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
uint32_t m_PadRight
Padding right value in the width dimension.