23 #include <schema_generated.h> 25 #include <flatbuffers/flexbuffers.h> 27 #include <boost/format.hpp> 28 #include <boost/numeric/conversion/cast.hpp> 29 #include <boost/filesystem.hpp> 37 #define ARMNN_THROW_PARSE_EXCEPTION(msg) \ 39 throw armnn::ParseException( static_cast<const std::stringstream&>( std::stringstream() << msg \ 41 << CHECK_LOCATION().AsString()).str()); \ 44 using namespace armnn;
51 const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
57 if (model.get() ==
nullptr)
61 boost::format(
"%1% was called with invalid (null) model. " 62 "Possible reason is that the model is not yet loaded and Unpack(ed). " 63 "subgraph:%2% at %3%") %
68 else if (subgraphIndex >= model->subgraphs.size())
72 boost::format(
"%1% was called with an invalid subgraph index. " 73 "subgraph:%2% at %3%") %
80 #define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \ 81 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION()) 88 if (model.get() ==
nullptr)
92 boost::format(
"%1% was called with invalid (null) model. " 93 "Possible reason is that the model is not yet loaded and Unpack(ed). " 94 "subgraph:%2% operator:%3% at %4%") %
100 else if (subgraphIndex >= model->subgraphs.size())
104 boost::format(
"%1% was called with an invalid subgraph index. " 105 "subgraph:%2% operator:%3% at %4%") %
111 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
112 operatorIndex != VIRTUAL_OPERATOR_ID)
116 boost::format(
"%1% was called with an invalid operator index. " 117 "subgraph:%2% operator:%3% at %4%") %
125 #define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \ 126 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION()) 129 size_t subgraphIndex,
135 ARMNN_ASSERT_MSG(model.get() !=
nullptr,
"Expecting a valid model in this function");
139 ARMNN_ASSERT_MSG(subgraphIndex < model->subgraphs.size(),
"Expecting a valid subgraph index");
142 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
146 boost::format(
"%1% was called with an invalid tensor index. " 147 "subgraph:%2% tensor:%3% at %4%") %
155 #define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \ 156 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION()) 161 if (rawPtr ==
nullptr)
165 boost::format(
"%1% was called with a null tensor pointer. " 173 #define CHECK_TENSOR_PTR(TENSOR_PTR) \ 174 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION()) 180 if (model.get() ==
nullptr)
184 boost::format(
"%1% was called with invalid (null) model. " 185 "Possible reason is that the model is not yet loaded and Unpack(ed). " 186 "buffer:%2% at %3%") %
191 else if (bufferIndex >= model->buffers.size())
195 boost::format(
"%1% was called with an invalid buffer index. " 196 "buffer index:%2% at %3%") %
201 else if (model->buffers[bufferIndex].get() ==
nullptr)
205 boost::format(
"The buffer #%1% is null. %3%") %
211 #define CHECK_BUFFER(MODEL, BUFFER_INDEX) \ 212 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION()) 214 void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
219 if (bufferPtr ==
nullptr)
223 boost::format(
"BufferPtr is null for buffer:%1%. %2%") %
230 std::stringstream ss;
231 ss <<
"Buffer #" << bufferId <<
" has " << bufferPtr->data.size() <<
" bytes. " 232 <<
"For tensor: " << tensorInfo.
GetShape()
233 <<
" expecting: " << tensorInfo.
GetNumBytes() <<
" bytes and " 239 #define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \ 240 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION()) 244 switch(activationType)
246 case tflite::ActivationFunctionType_NONE:
247 case tflite::ActivationFunctionType_RELU:
248 case tflite::ActivationFunctionType_RELU6:
249 case tflite::ActivationFunctionType_TANH:
260 #define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \ 262 if (IsActivationSupported(OPTION->fused_activation_function) == false) \ 264 throw ParseException( \ 266 boost::format("TfLite parser doesn't suppport fused activation: " \ 267 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \ 268 OPTION->fused_activation_function % \ 269 tflite::EnumNameActivationFunctionType(\ 270 OPTION->fused_activation_function) % \ 274 CHECK_LOCATION().FileLine())); \ 279 std::vector<unsigned int> AsUnsignedVector(
const std::vector<int32_t> & in)
281 std::vector<unsigned int> result;
282 result.reserve(in.size());
294 uint32_t& paddingFront,
295 uint32_t& paddingBack,
296 tflite::Padding padding)
300 if (padding == tflite::Padding_SAME)
302 uint32_t outputSize = (inputSize + stride - 1) / stride;
303 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
304 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
305 if (temp > inputSize)
307 paddingFront = (temp - inputSize) / 2;
308 paddingBack = (temp - inputSize) - paddingFront;
319 switch (tensorPtr->type)
321 case tflite::TensorType_UINT8:
324 case tflite::TensorType_FLOAT32:
327 case tflite::TensorType_INT8:
328 if (tensorPtr->quantization->zero_point.size() == 1)
339 case tflite::TensorType_INT16:
342 case tflite::TensorType_INT32:
351 boost::format(
"Unsupported data type %1% = %2% for tensor: %3%. %4%") %
353 tflite::EnumNameTensorType(tensorPtr->type) %
358 std::vector<unsigned int> safeShape = shapes;
359 if (safeShape.size() == 0)
361 safeShape.push_back(1);
364 float quantizationScale = 0.0f;
365 int32_t quantizationOffset = 0;
367 if (tensorPtr->quantization.get())
369 if (tensorPtr->quantization->scale.size() <= 1)
374 if (tensorPtr->quantization->scale.size() == 1)
376 quantizationScale = tensorPtr->quantization->scale[0];
378 if (tensorPtr->quantization->zero_point.size() == 1)
395 std::vector<float> quantizationScales;
396 std::vector<int32_t> quantizationOffsets;
399 std::copy(tensorPtr->quantization->scale.begin(),
400 tensorPtr->quantization->scale.end(),
401 std::back_inserter(quantizationScales));
409 tensorPtr->quantization->quantized_dimension)]);
427 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
428 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
432 std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
433 CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
442 boost::format(
"Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
450 reinterpret_cast<const T*
>(bufferPtr->data.data()), data.get(),
sizeof(T));
454 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.
GetNumBytes());
457 return std::make_pair(
ConstTensor(tensorInfo, data.get()), std::move(data));
470 if (actualSize != expected.size())
475 for (
unsigned int i = 0u; i < actualSize; i++)
477 if (expected[i] < 0 ||
478 actual[i] != static_cast<unsigned int>(expected[i]))
491 , m_Network(nullptr, nullptr)
492 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &
TfLiteParser::ParseUnsupportedOperator)
495 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
496 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
497 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
498 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
499 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
500 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
501 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
502 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
503 m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParser::ParseExp;
504 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
505 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
506 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
507 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
508 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
509 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
510 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
511 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
512 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
513 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
514 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
515 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
516 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
517 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
518 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
519 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
520 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
521 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
522 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
523 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
524 m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParser::ParseSplitV;
525 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
526 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
527 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
528 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
529 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
530 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
531 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
534 m_CustomParserFunctions[
"TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
537 void TfLiteParser::ResetParser()
541 m_SubgraphConnections.clear();
544 void TfLiteParser::AddBroadcastReshapeLayer(
size_t subgraphIndex,
545 size_t operatorIndex,
548 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
551 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
552 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
557 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
559 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
566 uint32_t
id = reshapedInputId;
567 reshapedInputId = inputId;
576 std::vector<unsigned> reshapedDim;
579 reshapedDim.push_back(reshapedTensorInfo.
GetShape()[i]);
582 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
583 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
587 std::string layerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
595 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
598 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
605 return CreateNetworkFromModel();
612 return CreateNetworkFromModel();
617 m_Network = INetwork::Create();
620 bool failedToCreate =
false;
621 std::stringstream errors;
623 if (m_Model->subgraphs.size() != 1)
627 boost::format(
"Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
628 m_Model->subgraphs.size() %
632 size_t subgraphIndex = 0;
633 for (
SubgraphPtr const & subgraph : m_Model->subgraphs)
635 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
637 size_t operatorIndex = 0;
642 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
643 auto builtinCode = opCodePtr->builtin_code;
645 if (builtinCode > tflite::BuiltinOperator_MAX)
649 boost::format(
"Operator code %1% is out of range 0-%2%. " 650 "subgraph:%3% operator idx:%4%. %5%") %
652 tflite::BuiltinOperator_MAX %
659 auto & parserFunction = m_ParserFunctions[builtinCode];
660 (this->*parserFunction)(subgraphIndex, operatorIndex);
664 failedToCreate =
true;
665 std::stringstream errorString;
667 errorString <<
"Failed to parse operator #" << operatorIndex
668 <<
" within subgraph #" << subgraphIndex
669 <<
" error: " << e.
what();
672 errors << errorString.str() <<
"\n";
677 SetupInputLayers(subgraphIndex);
678 SetupOutputLayers(subgraphIndex);
679 SetupConstantLayers(subgraphIndex);
691 for (
size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
693 for (
size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
695 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot !=
nullptr)
697 for (
size_t inputSlotIdx = 0;
698 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
701 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
702 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
708 return std::move(m_Network);
711 void TfLiteParser::RegisterProducerOfTensor(
size_t subgraphIndex,
716 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
717 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
719 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
722 if (tensorSlots.outputSlot !=
nullptr)
725 boost::format(
"Another layer has already registered itself as the producer of " 726 "subgraph:%1% tensor:%2% %3%") %
732 tensorSlots.outputSlot = slot;
735 void TfLiteParser::RegisterConsumerOfTensor(
size_t subgraphIndex,
740 ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
741 ARMNN_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
743 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
744 tensorSlots.inputSlots.push_back(slot);
747 void TfLiteParser::ParseCustomOperator(
size_t subgraphIndex,
size_t operatorIndex)
749 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
752 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
755 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
756 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
759 auto iterator = m_CustomParserFunctions.find(customCode);
760 if (iterator != m_CustomParserFunctions.end())
762 customParserFunction = iterator->second;
766 (this->*customParserFunction)(subgraphIndex, operatorIndex);
769 void TfLiteParser::ParseUnsupportedOperator(
size_t subgraphIndex,
size_t operatorIndex)
771 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
773 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
775 auto opcodeIndex = operatorPtr->opcode_index;
776 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
778 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
783 boost::format(
"Operator not supported. " 784 "subgraph:%1% operator:%2% " 785 "opcode_index:%3% opcode:%4% / %5% %6%") %
790 tflite::EnumNameBuiltinOperator(opcode) %
794 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
795 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
801 auto layerName = boost::str(boost::format(
"StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
804 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
805 for (
unsigned int i = 0u; i < numOutputs; ++i)
810 auto inputTensorIds = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
811 auto outputTensorIds = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
813 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
814 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
817 void TfLiteParser::ParseConv2D(
size_t subgraphIndex,
size_t operatorIndex)
819 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
821 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
822 const auto *
options = operatorPtr->builtin_options.AsConv2DOptions();
834 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
837 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
844 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
845 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
849 unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
850 unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
857 auto filterTensorAndData = CreateConstTensor(inputs[1],
862 auto layerName = boost::str(boost::format(
"Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
864 if (inputs.size() == 3)
868 auto biasTensorAndData = CreateConstTensor(inputs[2],
871 layer = m_Network->AddConvolution2dLayer(desc,
872 filterTensorAndData.first,
878 layer = m_Network->AddConvolution2dLayer(desc,
879 filterTensorAndData.first,
891 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
892 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
894 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
896 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
897 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
900 void TfLiteParser::ParseDepthwiseConv2D(
size_t subgraphIndex,
size_t operatorIndex)
902 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
904 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
905 const auto *
options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
916 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
918 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
930 unsigned int inputHeight = inputTensorInfo.
GetShape()[1];
931 unsigned int inputWidth = inputTensorInfo.
GetShape()[2];
934 unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
935 unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
938 filterTensorInfo.
SetShape({ filterHeight,
948 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
950 auto layerName = boost::str(boost::format(
"DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
952 if (inputs.size() == 3)
956 auto biasTensorAndData = CreateConstTensor(inputs[2],
959 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
960 filterTensorAndData.first,
966 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
967 filterTensorAndData.first,
978 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
979 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
981 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
983 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
984 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
987 void TfLiteParser::ParseDequantize(
size_t subgraphIndex,
size_t operatorIndex)
989 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
991 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
994 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
997 auto layerName = boost::str(boost::format(
"Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
1005 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1006 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1008 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1009 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1012 void TfLiteParser::ParseExp(
size_t subgraphIndex,
size_t operatorIndex)
1014 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1016 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1019 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1022 auto layerName = boost::str(boost::format(
"Exp:%1%:%2%") % subgraphIndex % operatorIndex);
1026 IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
1032 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1033 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1035 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1036 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1039 void TfLiteParser::ParseTranspose(
size_t subgraphIndex,
size_t operatorIndex)
1041 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1043 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1046 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1050 auto layerName = boost::str(boost::format(
"Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1054 if (inputs.size() == 2)
1059 std::vector<unsigned int> permuteShape(numPermVecElements);
1060 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.
GetNumBytes());
1066 layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
1071 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1073 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1074 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1076 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1077 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1080 void TfLiteParser::ParseTransposeConv(
size_t subgraphIndex,
size_t operatorIndex)
1082 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1084 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1085 const auto *
options = operatorPtr->builtin_options.AsTransposeConvOptions();
1093 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1096 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1103 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1104 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1106 const unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
1107 const unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
1125 auto filterTensorAndData = CreateConstTensor(inputs[1],
1130 auto layerName = boost::str(boost::format(
"TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1132 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1133 filterTensorAndData.first,
1143 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1144 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
1146 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1147 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1150 void TfLiteParser::ParseAveragePool2D(
size_t subgraphIndex,
size_t operatorIndex)
1152 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1155 void TfLiteParser::ParseBatchToSpaceND(
size_t subgraphIndex,
size_t operatorIndex)
1157 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1159 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1162 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1171 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1172 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1174 std::vector<unsigned int> cropsVector(cropsTensorInfo.
GetNumElements());
1175 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.
GetNumBytes());
1178 std::vector<std::pair<unsigned int, unsigned int>> crops;
1179 for (
unsigned int i = 0; i < cropsTensorInfo.
GetNumElements() / step; ++i)
1181 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1191 auto layerName = boost::str(boost::format(
"BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1192 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1196 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1197 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1199 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1200 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1203 void TfLiteParser::ParseL2Normalization(
size_t subgraphIndex,
size_t operatorIndex)
1205 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1207 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1210 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1215 auto layerName = boost::str(boost::format(
"L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1216 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1223 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1224 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1226 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1227 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1230 void TfLiteParser::ParseMaxPool2D(
size_t subgraphIndex,
size_t operatorIndex)
1232 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1235 void TfLiteParser::ParseMaximum(
size_t subgraphIndex,
size_t operatorIndex)
1237 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1239 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1242 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1248 auto layerName = boost::str(boost::format(
"Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1254 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1255 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1257 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1261 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1264 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1265 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1268 void TfLiteParser::ParseMinimum(
size_t subgraphIndex,
size_t operatorIndex)
1270 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1272 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1275 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1281 auto layerName = boost::str(boost::format(
"Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1287 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1288 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1290 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1294 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1297 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1298 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1301 void TfLiteParser::ParsePool(
size_t subgraphIndex,
1302 size_t operatorIndex,
1305 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1307 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1308 const auto *
options = operatorPtr->builtin_options.AsPool2DOptions();
1312 std::string layerName;
1316 case PoolingAlgorithm::Average:
1318 boost::str(boost::format(
"AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1320 case PoolingAlgorithm::Max:
1322 boost::str(boost::format(
"MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1339 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1344 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1345 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1352 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1355 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1360 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1364 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1365 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1367 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1369 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1370 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1373 void TfLiteParser::ParseSlice(
size_t subgraphIndex,
size_t operatorIndex)
1375 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1377 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1379 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1388 std::vector<unsigned int> begin(beginTensorInfo.
GetNumElements());
1389 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.
GetNumBytes());
1395 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1396 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1399 auto layerName = boost::str(boost::format(
"Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1400 IConnectableLayer*
const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1407 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1408 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1411 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1412 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1415 void TfLiteParser::ParseSoftmax(
size_t subgraphIndex,
size_t operatorIndex)
1417 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1418 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1419 const auto *
options = operatorPtr->builtin_options.AsSoftmaxOptions();
1424 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1426 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1429 auto layerName = boost::str(boost::format(
"Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1430 IConnectableLayer*
const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1437 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1438 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1441 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1442 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1445 void TfLiteParser::ParseSpaceToBatchND(
size_t subgraphIndex,
size_t operatorIndex)
1447 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1449 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1452 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1461 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1462 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1464 std::vector<unsigned int> padListVector(padListTensorInfo.
GetNumElements());
1465 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.
GetNumBytes());
1468 std::vector<std::pair<unsigned int, unsigned int>> padList;
1469 for (
unsigned int i = 0; i < padListTensorInfo.
GetNumElements() / step; ++i)
1471 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1481 auto layerName = boost::str(boost::format(
"SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1482 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1486 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1487 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1489 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1490 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1497 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1498 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1502 std::stringstream ss;
1503 ss <<
"Input tensor has unexpected number of dimensions:" << inputTensorInfo.
GetNumDimensions()
1504 <<
" shape:" << inputTensorInfo.
GetShape() <<
" " 1509 if (squeezeDims.empty())
1511 squeezeDims.assign(dimensionSequence,
1515 std::vector<uint32_t> outputDims;
1518 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1519 auto currentDimension = inputTensorInfo.
GetShape()[i];
1520 if (skipSqueeze || currentDimension != 1)
1522 outputDims.push_back(currentDimension);
1526 if (outputDims.size() > 4)
1528 std::stringstream ss;
1529 ss <<
"Output tensor has unexpected number of dimensions:" << inputTensorInfo.
GetNumDimensions()
1530 <<
" shape:" << inputTensorInfo.
GetShape() <<
" " 1542 return outTensorInfo;
1545 void TfLiteParser::ParseSqueeze(
size_t subgraphIndex,
size_t operatorIndex)
1547 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1549 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1552 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1555 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1556 const auto *
options = operatorPtr->builtin_options.AsSqueezeOptions();
1566 auto layerName = boost::str(boost::format(
"Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1567 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1570 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1571 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1573 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1574 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1577 void TfLiteParser::ParseStridedSlice(
size_t subgraphIndex,
size_t operatorIndex)
1579 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1581 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1584 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1587 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1588 const auto *
options = operatorPtr->builtin_options.AsStridedSliceOptions();
1602 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.
GetNumBytes());
1607 std::vector<int> end(endTensorInfo.GetNumElements());
1608 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1613 std::vector<int> stride(strideTensorInfo.GetNumElements());
1614 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1620 auto layerName = boost::str(boost::format(
"StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1621 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1626 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1627 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1629 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1630 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1633 void TfLiteParser::ParseSub(
size_t subgraphIndex,
size_t operatorIndex)
1635 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1637 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1638 const auto *
options = operatorPtr->builtin_options.AsSubOptions();
1640 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1643 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1649 auto layerName = boost::str(boost::format(
"Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1655 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1656 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1658 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1662 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1665 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1667 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1668 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1671 void TfLiteParser::ParseAdd(
size_t subgraphIndex,
size_t operatorIndex)
1673 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1675 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1676 const auto *
options = operatorPtr->builtin_options.AsAddOptions();
1678 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1681 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1687 auto layerName = boost::str(boost::format(
"Add:%1%:%2%") % subgraphIndex % operatorIndex);
1693 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1694 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1696 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1700 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1703 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1705 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1706 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1709 void TfLiteParser::ParseMul(
size_t subgraphIndex,
size_t operatorIndex)
1711 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1713 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1714 const auto *
options = operatorPtr->builtin_options.AsMulOptions();
1716 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1719 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1725 auto layerName = boost::str(boost::format(
"Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1726 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1731 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1732 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1734 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1738 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1741 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1743 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1744 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1747 void TfLiteParser::ParseMean(
size_t subgraphIndex,
size_t operatorIndex)
1749 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1751 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1753 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1760 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1761 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1771 auto layerName = boost::str(boost::format(
"Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1776 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1777 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1779 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1780 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1783 void TfLiteParser::ParsePad(
size_t subgraphIndex,
size_t operatorIndex)
1785 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1795 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1796 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1800 for (
unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1802 desc.
m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1805 auto layerName = boost::str(boost::format(
"Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1811 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1812 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1814 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1815 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1818 void TfLiteParser::ParseQuantize(
size_t subgraphIndex,
size_t operatorIndex)
1820 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1822 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1825 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1828 auto layerName = boost::str(boost::format(
"Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1836 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1837 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1839 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1840 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1843 void TfLiteParser::ParseRelu(
size_t subgraphIndex,
size_t operatorIndex)
1845 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
1848 void TfLiteParser::ParseRelu6(
size_t subgraphIndex,
size_t operatorIndex)
1850 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1853 void TfLiteParser::ParseLogistic(
size_t subgraphIndex,
size_t operatorIndex)
1855 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1858 void TfLiteParser::ParseTanH(
size_t subgraphIndex,
size_t operatorIndex)
1860 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1864 void TfLiteParser::ParseActivation(
size_t subgraphIndex,
size_t operatorIndex,
ActivationFunction activationType)
1866 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1867 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1870 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1873 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1876 auto layerName = str(boost::format(
"Activation:"));
1880 switch (activationType)
1882 case ActivationFunction::ReLu:
1884 layerName += str(boost::format(
"RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1887 case ActivationFunction::BoundedReLu:
1889 layerName += str(boost::format(
"RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1890 activationDesc.
m_A = 6.0f;
1891 activationDesc.
m_B = 0.0f;
1894 case ActivationFunction::Sigmoid:
1896 layerName += str(boost::format(
"SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1899 case ActivationFunction::TanH:
1901 layerName += str(boost::format(
"TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1902 activationDesc.
m_A = 1.0f;
1903 activationDesc.
m_B = 1.0f;
1909 boost::str(boost::format(
"Unexpected ActivationFunction[%1%] when creating layerName " 1910 " %2% ") %static_cast<int>(activationType)%
CHECK_LOCATION().AsString()));
1914 IConnectableLayer*
const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1921 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1922 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1925 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1926 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1929 const std::vector<int32_t> & targetDimsIn)
1931 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1932 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1934 if (stretchDim != targetDimsIn.end())
1936 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1940 boost::format(
"At most one component of shape can be -1 %1%") %
CHECK_LOCATION().AsString()));
1943 auto targetNumElements =
1945 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1947 auto stretchIndex =
static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1948 outputDims[stretchIndex] = inputTensorInfo.
GetNumElements() / targetNumElements;
1959 void TfLiteParser::ParseReshape(
size_t subgraphIndex,
size_t operatorIndex)
1961 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1963 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1965 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1968 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1969 const auto *
options = operatorPtr->builtin_options.AsReshapeOptions();
1974 std::vector<int32_t> targetShape;
1975 if (inputs.size() > 1 && inputs[1] !=
nullptr)
1977 if (inputs[1]->is_variable)
1982 if (inputs[1]->shape.size() != 1)
1987 if (inputs[1]->type != tflite::TensorType_INT32)
1992 auto bufferPtr =
GetBuffer(m_Model, inputs[1]->buffer);
1993 auto vals =
reinterpret_cast<const int32_t*
>(bufferPtr->data.data());
1994 for (
int i=0; i < inputs[1]->shape[0]; i++)
1996 targetShape.push_back(vals[i]);
2000 options->new_shape.empty() ==
false &&
2001 options->new_shape != targetShape)
2004 "the values do not match");
2012 "At least one method required");
2015 targetShape =
options->new_shape;
2023 if (inputs.size() > 1 && !
CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
2025 std::stringstream ss;
2026 ss <<
"New shape defined in reshape parameters " 2027 << reshapeOutputTensorShape
2028 <<
" does not equal output shape " 2029 << actualOutputTensorInfo.
GetShape()
2038 auto layerName = boost::str(boost::format(
"Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
2039 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
2042 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2043 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2045 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2046 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2049 void TfLiteParser::ParseResizeBilinear(
size_t subgraphIndex,
size_t operatorIndex)
2051 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
2054 void TfLiteParser::ParseResizeNearestNeighbor(
size_t subgraphIndex,
size_t operatorIndex)
2056 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
2059 void TfLiteParser::ParseResize(
size_t subgraphIndex,
size_t operatorIndex,
ResizeMethod resizeMethod)
2061 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2063 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2066 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2072 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2075 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2079 desc.m_TargetHeight =
static_cast<uint32_t
> (sizeTensorData[0]);
2080 desc.m_TargetWidth =
static_cast<uint32_t
> (sizeTensorData[1]);
2083 auto layerName = str(boost::format(
"Resize:"));
2085 switch (resizeMethod)
2087 case ResizeMethod::Bilinear:
2089 layerName += str(boost::format(
"BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
2091 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2092 const auto *
options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2094 desc.m_BilinearAlignCorners =
options->align_corners;
2097 case ResizeMethod::NearestNeighbor:
2099 layerName += str(boost::format(
"NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2105 boost::str(boost::format(
"Unexpected ResizeMethod[%1%] when creating layerName " 2106 " %2% ") %static_cast<int>(resizeMethod)%
CHECK_LOCATION().AsString()));
2115 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2116 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2118 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2119 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2122 void TfLiteParser::ParseConcatenation(
size_t subgraphIndex,
size_t operatorIndex)
2124 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2126 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2127 const auto *
options = operatorPtr->builtin_options.AsConcatenationOptions();
2131 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2132 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2135 unsigned int numConcatView =
static_cast<unsigned int>(inputs.size());
2138 const unsigned int concatDimInput =
static_cast<unsigned int>(
2139 (
static_cast<int>(inputRank) +
options->axis) %
static_cast<int>(inputRank));
2141 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2144 unsigned int mergeDimOrigin = 0;
2146 for (
unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2152 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
2155 auto layerName = boost::str(boost::format(
"Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
2156 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
2161 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2165 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2168 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
2170 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2171 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2174 void TfLiteParser::ParseFullyConnected(
size_t subgraphIndex,
size_t operatorIndex)
2176 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2178 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2179 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2187 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2188 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2194 int32_t weightsDimension =
static_cast<int32_t
>(filterTensorInfo.GetNumDimensions());
2195 if (weightsDimension != 2)
2200 "Dimension %1% for Fully Connected weights is not supported by Armnn. " 2206 auto filterTensorAndData = CreateConstTensor(inputs[1],
2210 auto layerName = boost::str(boost::format(
"FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2212 if (inputs.size() == 3)
2216 auto biasTensorAndData = CreateConstTensor(inputs[2],
2219 layer = m_Network->AddFullyConnectedLayer(desc,
2220 filterTensorAndData.first,
2226 layer = m_Network->AddFullyConnectedLayer(desc,
2227 filterTensorAndData.first,
2235 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2243 std::vector<unsigned int> reshapedDimensions(2);
2244 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2245 reshapedDimensions[0] = inputTensorInfo.
GetNumElements() / reshapedDimensions[1];
2247 if (inputTensorInfo.
GetNumElements() % reshapedDimensions[1] != 0)
2252 "Failed to deduce input tensor shape from filter size %1%")
2253 % reshapedDimensions[1]
2260 std::string reshapeLayerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
2262 desc.m_TargetShape = reshapedTensorInfo.
GetShape();
2268 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2274 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2282 options->fused_activation_function);
2285 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2286 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2289 void TfLiteParser::ParseDetectionPostProcess(
size_t subgraphIndex,
size_t operatorIndex)
2291 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2293 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2295 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2296 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2300 auto custom_options = operatorPtr->custom_options;
2301 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2310 desc.
m_ScaleH = m[
"h_scale"].AsFloat();
2311 desc.
m_ScaleW = m[
"w_scale"].AsFloat();
2312 desc.
m_ScaleX = m[
"x_scale"].AsFloat();
2313 desc.
m_ScaleY = m[
"y_scale"].AsFloat();
2315 if (!(m[
"use_regular_nms"].IsNull()))
2319 if (!(m[
"detections_per_class"].IsNull()))
2327 "must be positive and less than or equal to 1.");
2331 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2334 auto layerName = boost::str(boost::format(
"DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2335 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2343 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2344 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2345 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2346 m_OverridenOutputShapes.push_back({ 1 });
2348 for (
unsigned int i = 0 ; i < outputs.size() ; ++i)
2356 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2357 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2360 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2361 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2362 outputTensorIndexes[1],
2363 outputTensorIndexes[2],
2364 outputTensorIndexes[3]});
2368 void TfLiteParser::ParsePack(
size_t subgraphIndex,
size_t operatorIndex)
2370 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2372 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2373 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2376 if (inputs.size() < 1)
2381 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2382 const auto*
options = operatorPtr->builtin_options.AsPackOptions();
2386 desc.
m_NumInputs =
static_cast<uint32_t
>(inputs.size());
2392 auto layerName = boost::str(boost::format(
"Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2400 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2401 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2403 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2404 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2407 void TfLiteParser::ParseUnpack(
size_t subgraphIndex,
size_t operatorIndex)
2409 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2411 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2412 const auto *
options = operatorPtr->builtin_options.AsUnpackOptions();
2417 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2422 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2427 "The unpack axis: %1% cannot be greater than or equal to " 2428 "the number of input dimension %2% %3%")
2430 % inputTensorInfo.GetNumDimensions()
2438 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2444 throw ParseException(
"Number to unpack must greater than zero.");
2447 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2450 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2451 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2454 for (
unsigned int i = 0; i < inputDimSize; ++i)
2456 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2459 if (unpackDimSizes[unpackAxis] != unpackNum)
2461 throw ParseException(
"Number to unpack must be the same as length of the dimension to " 2465 unpackDimSizes[unpackAxis] /= unpackNum;
2467 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2468 for (
unsigned int j = 0; j < unpackNum; ++j)
2471 for (
unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2473 splitDesc.
SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2478 auto layerName = boost::str(boost::format(
"Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2479 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2482 unpackDimSizes.data());
2484 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2485 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2491 std::string reshapeLayerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
2506 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2510 void TfLiteParser::ParseSplit(
size_t subgraphIndex,
size_t operatorIndex)
2512 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2514 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2515 const auto *
options = operatorPtr->builtin_options.AsSplitOptions();
2522 throw ParseException(
"Number to splits must greater than zero.");
2525 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2527 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2534 std::vector<unsigned int> axisData(axisTensorInfo.
GetNumElements());
2535 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.
GetNumBytes());
2538 const unsigned int splitDim = axisData[0];
2540 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2546 "The number of dimensions: %1% for input tensors of the " 2547 "split op cannot be greater than %2% %3%")
2548 % inputTensorInfo.GetNumDimensions()
2553 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2556 for (
unsigned int i = 0; i < inputDimSize; ++i)
2558 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2561 if (splitterDimSizes[splitDim] % numSplits != 0)
2563 throw ParseException(
"Number of splits must evenly divide the dimension");
2565 splitterDimSizes[splitDim] /= numSplits;
2568 for (
unsigned int j = 0; j < numSplits; ++j)
2571 for (
unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2573 splitDesc.
SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2578 auto layerName = boost::str(boost::format(
"Split:%1%:%2%") % subgraphIndex % operatorIndex);
2579 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2581 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2582 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
2590 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2591 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2597 int v = idx < 0 ? numDims + idx : idx;
2601 return static_cast<unsigned int>(v);
2604 void TfLiteParser::ParseSplitV(
size_t subgraphIndex,
size_t operatorIndex)
2606 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2608 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2611 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2614 auto& inputTensor = inputs[0];
2615 auto& splitsTensor = inputs[1];
2616 auto& axisTensor = inputs[2];
2630 "The number of dimensions: %1% for input tensors of the " 2631 "split op cannot be greater than %2% %3%")
2640 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.
GetNumBytes());
2645 const auto *
options = operatorPtr->builtin_options.AsSplitOptions();
2647 unsigned int numSplits = 0;
2648 std::vector<int> splitsData(0);
2652 splitsData.resize(numSplits);
2654 if (inputTensorInfo.
GetShape()[splitDim] % numSplits != 0)
2656 throw ParseException(
"Number of splits must evenly divide the split axis");
2658 unsigned int splitSize = inputTensorInfo.
GetShape()[splitDim] / numSplits;
2659 for (
auto& split : splitsData)
2666 numSplits = splitsInfo.
GetShape()[0];
2667 splitsData.resize(numSplits);
2670 ::memcpy(splitsData.data(), splitsBufferPtr->data.data(), splitsInfo.
GetNumBytes());
2672 int numInferred = 0;
2673 int specifiedSizes = 0;
2674 unsigned int inferIdx = 0;
2675 unsigned int idx = 0;
2676 for (
auto split : splitsData)
2685 specifiedSizes += split;
2690 if (numInferred > 0)
2692 if (numInferred > 1)
2694 throw ParseException(
"Cannot infer split size for more than one split");
2696 splitsData[inferIdx] =
numeric_cast<
int>(inputTensorInfo.
GetShape()[splitDim]) - specifiedSizes;
2706 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2711 unsigned int accumSplit = 0;
2712 for (
unsigned int j = 0; j < numSplits; ++j)
2714 unsigned int splitSize =
numeric_cast<
unsigned int>(splitsData[j]);
2717 for (
unsigned int dimIdx = 0; dimIdx < inputTensorInfo.
GetNumDimensions(); ++dimIdx)
2719 unsigned int dimSize = inputTensorInfo.
GetShape()[dimIdx];
2720 if (dimIdx == splitDim)
2722 dimSize = splitSize;
2724 splitDesc.SetViewSize(j, dimIdx, dimSize);
2727 splitDesc.SetViewOriginCoord(j, splitDim, accumSplit);
2728 accumSplit += splitSize;
2731 auto layerName = boost::str(boost::format(
"Split:%1%:%2%") % subgraphIndex % operatorIndex);
2732 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2734 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2735 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2743 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2744 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2748 unsigned int outputSlot,
2749 tflite::ActivationFunctionType activationType)
2752 std::string layerName = prevLayer->
GetName();
2754 switch(activationType)
2756 case tflite::ActivationFunctionType_NONE:
2761 case tflite::ActivationFunctionType_RELU:
2763 activationDesc.
m_Function = ActivationFunction::ReLu;
2764 layerName +=
":RELU";
2767 case tflite::ActivationFunctionType_RELU6:
2769 activationDesc.
m_Function = ActivationFunction::BoundedReLu;
2770 activationDesc.
m_A = 6.0f;
2771 activationDesc.
m_B = 0.0f;
2772 layerName +=
":RELU6";
2775 case tflite::ActivationFunctionType_TANH:
2777 activationDesc.
m_Function = ActivationFunction::TanH;
2778 activationDesc.
m_A = 1.0f;
2779 activationDesc.
m_B = 1.0f;
2780 layerName +=
":TANH";
2785 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2786 case tflite::ActivationFunctionType_SIGN_BIT:
2791 boost::format(
"TfLite parser doesn't suppport fused activation: " 2794 tflite::EnumNameActivationFunctionType(activationType) %
2801 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2803 auto & prevOutputSlot = prevLayer->
GetOutputSlot(outputSlot);
2806 return activationLayer;
2811 if (fileName ==
nullptr)
2816 boost::system::error_code errorCode;
2817 boost::filesystem::path pathToFile(fileName);
2818 if (!boost::filesystem::exists(pathToFile, errorCode))
2821 std::string msg = boost::str(boost::format(
"Cannot find the file (%1%) errorCode: %2% %3%") %
2827 std::ifstream file(fileName, std::ios::binary);
2828 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2830 fileContent.size());
2835 if (binaryContent ==
nullptr)
2840 flatbuffers::Verifier verifier(binaryContent, len);
2841 if (verifier.VerifyBuffer<tflite::Model>() ==
false)
2844 boost::str(boost::format(
"Buffer doesn't conform to the expected Tensorflow Lite " 2845 "flatbuffers format. size:%1% %2%") %
2849 return tflite::UnPackModel(binaryContent);
2853 size_t subgraphIndex,
2854 size_t operatorIndex)
2858 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2859 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2861 size_t inputCount = operatorPtr->inputs.size();
2863 for (
size_t i=0; i<inputCount; ++i)
2866 result[i] = subgraphPtr->tensors[inputId].get();
2872 size_t subgraphIndex,
2873 size_t operatorIndex)
2877 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2878 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2880 size_t outputCount = operatorPtr->outputs.size();
2882 for (
size_t i=0; i<outputCount; ++i)
2886 result[i] = subgraphPtr->tensors[outputId].get();
2892 size_t subgraphIndex)
2895 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2897 size_t inputCount = subgraphPtr->inputs.size();
2899 for (
size_t i=0; i<inputCount; ++i)
2903 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
2909 size_t subgraphIndex)
2912 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2914 size_t outputCount = subgraphPtr->outputs.size();
2916 for (
size_t i=0; i<outputCount; ++i)
2919 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
2925 size_t subgraphIndex,
2926 size_t operatorIndex)
2929 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2930 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2931 return operatorPtr->inputs;
2935 size_t subgraphIndex,
2936 size_t operatorIndex)
2939 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2940 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2941 return operatorPtr->outputs;
2944 void TfLiteParser::RegisterInputSlots(
size_t subgraphIndex,
2945 size_t operatorIndex,
2947 const std::vector<unsigned int>& tensorIndexes)
2949 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2954 boost::str(boost::format(
"The number of tensor inputs (%1%) does not match the number expected (%2%)" 2955 " for subgraph:%3% operator index:%4% %5%") %
2956 tensorIndexes.size() %
2963 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumInputSlots(); ++slotIndex)
2965 unsigned int tensorIndex = tensorIndexes[slotIndex];
2967 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2971 void TfLiteParser::RegisterOutputSlots(
size_t subgraphIndex,
2972 size_t operatorIndex,
2974 const std::vector<unsigned int>& tensorIndexes)
2976 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2981 boost::str(boost::format(
"The number of tensor outputs (%1%) does not match the number expected (%2%)" 2982 " for subgraph:%3% operator index:%4% %5%") %
2983 tensorIndexes.size() %
2990 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumOutputSlots(); ++slotIndex)
2992 unsigned int tensorIndex = tensorIndexes[slotIndex];
2994 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2998 void TfLiteParser::SetupInputLayers(
size_t subgraphIndex)
3003 for (
auto const & tensorIdAndPtr : inputs)
3005 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3007 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3012 RegisterOutputSlots(subgraphIndex,
3013 VIRTUAL_OPERATOR_ID,
3015 {
static_cast<uint32_t
>(tensorIdAndPtr.first) });
3019 void TfLiteParser::SetupOutputLayers(
size_t subgraphIndex)
3024 for (
auto const & tensorIdAndPtr : outputs)
3026 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
3028 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
3030 RegisterInputSlots(subgraphIndex,
3031 VIRTUAL_OPERATOR_ID,
3033 {
static_cast<uint32_t
>(tensorIdAndPtr.first) });
3037 void TfLiteParser::SetupConstantLayers(
size_t subgraphIndex)
3041 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
3042 for (
unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
3044 for (
unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
3046 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot ==
nullptr &&
3047 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
3049 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
3051 auto tensorAndData = CreateConstTensor(tensorPtr,
3055 std::string layerName = boost::str(boost::format(
"Constant:%1%") % tensorPtr->name);
3057 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
3060 RegisterOutputSlots(subgraphIndex,
3061 VIRTUAL_OPERATOR_ID,
3074 return model->buffers[bufferIndex].get();
3077 template<
typename T>
3078 std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3084 auto constData = CreateConstTensorImpl<T>(bufferPtr,
3088 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
3089 return std::make_pair(constData.first, std::move(storage));
3092 std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
3093 TfLiteParser::CreateConstTensor(
TensorRawPtr tensorPtr,
3098 auto bufferPtr =
GetBuffer(m_Model, tensorPtr->buffer);
3104 return CreateConstTensorAndStoreData<float>(bufferPtr,
3109 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
3114 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3119 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
3124 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
3130 std::stringstream errString;
3131 errString <<
"Unexpected datatype when creating const tensor: " 3133 <<
" shape:" << tensorInfo.GetShape()
3141 const std::string& name)
const 3145 for (
auto const & input : inputs)
3147 if (input.second->name == name)
3149 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3150 return std::make_pair(bindingId,
ToTensorInfo(input.second));
3154 std::stringstream bindings;
3155 for (
auto const & input : inputs)
3157 bindings <<
"'" << input.second->name <<
"' ";
3162 boost::format(
"No input binding found for subgraph:%1% and name:%2%. " 3163 "Possible inputs are: [%3%] %4%") %
3171 const std::string& name)
const 3175 for (
unsigned int i = 0; i < outputs.size(); ++i)
3177 auto const output = outputs[i];
3178 if (output.second->name == name)
3180 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
3181 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3182 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3183 return std::make_pair(bindingId,
ToTensorInfo(output.second, shape));
3187 std::stringstream bindings;
3188 for (
auto const & output : outputs)
3190 bindings <<
"'" << output.second->name <<
"' ";
3195 boost::format(
"No output binding found for subgraph:%1% and name:%2%. " 3196 "Possible outputs are: [%3%] %4%") %
3205 return m_Model->subgraphs.size();
3212 std::vector<std::string> result;
3213 result.reserve(inputs.size());
3214 for (
auto const & input : inputs)
3216 result.push_back(input.second->name);
3225 std::vector<std::string> result;
3226 result.reserve(outputs.size());
3227 for (
auto const & output : outputs)
3229 result.push_back(output.second->name);
3249 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<
float[]> && data)
3250 : m_FloatData(std::move(data))
3251 , m_Uint8Data(
nullptr)
3252 , m_Int8Data(
nullptr)
3253 , m_Int32Data(
nullptr)
3257 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3258 : m_FloatData(
nullptr)
3259 , m_Uint8Data(std::move(data))
3260 , m_Int8Data(
nullptr)
3261 , m_Int32Data(
nullptr)
3265 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3266 : m_FloatData(
nullptr)
3267 , m_Uint8Data(
nullptr)
3268 , m_Int8Data(std::move(data))
3269 , m_Int32Data(
nullptr)
3273 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3274 : m_FloatData(
nullptr)
3275 , m_Uint8Data(
nullptr)
3276 , m_Int8Data(
nullptr)
3277 , m_Int32Data(std::move(data))
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile) override
Create the network from a flatbuffers binary file on disk.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId, const std::string &name) const override
Retrieve binding info (layer id and tensor info) for the network output identified by the given layer...
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
uint32_t m_Axis
0-based axis along which to stack the input tensors.
std::unique_ptr< tflite::SubGraphT > SubgraphPtr
static BufferRawPtr GetBuffer(const ModelPtr &model, size_t bufferIndex)
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
#define ARMNN_THROW_PARSE_EXCEPTION(msg)
const TensorShape & GetShape() const
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector< uint8_t > &binaryContent) override
Create the network from a flatbuffers binary.
std::string AsString() const
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
std::vector< int > m_Begin
Begin values for the input that will be sliced.
std::vector< TensorRawPtr > TensorRawPtrVector
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
uint32_t m_PoolWidth
Pooling width value.
A Convolution2dDescriptor for the Convolution2dLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
static ModelPtr LoadModelFromFile(const char *fileName)
bool m_BiasEnabled
Enable/disable bias.
unsigned int GetNumBytes() const
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Beta
Exponentiation value.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
#define CHECK_BUFFER(MODEL, BUFFER_INDEX)
static TensorRawPtrVector GetInputs(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
virtual const char * what() const noexcept override
#define ARMNN_LOG(severity)
uint32_t m_PadTop
Padding top value in the height dimension.
void ProcessConcatInputTensorInfo(armnn::TensorInfo &inputTensorInfo, armnn::OriginsDescriptor &concatDescriptor, const unsigned int &concatAxis, unsigned int inputIndex, unsigned int &mergeDimOrigin)
void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
uint32_t m_PadRight
Padding right value in the width dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
virtual BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId, const std::string &name) const override
Retrieve binding info (layer id and tensor info) for the network input identified by the given layer ...
unsigned int ComputeWrappedIndex(int idx, unsigned int numDimsIn)
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
virtual size_t GetSubgraphCount() const override
Return the number of subgraphs in the parsed model.
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
uint32_t m_DilationY
Dilation factor value for height dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
static armnn::TensorInfo OutputShapeOfReshape(const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
static ITfLiteParserPtr Create(const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
constexpr const char * GetDataTypeName(DataType dataType)
void SetShape(const TensorShape &newShape)
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
TensorShape m_TargetShape
Target shape value.
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
static TensorRawPtrVector GetOutputs(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::unique_ptr< onnx::ModelProto > ModelPtr
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void CheckTensor(const ConstTensor &t)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
bool CheckShape(const armnn::TensorShape &actual, const std::vector< uint32_t > &expected)
float m_NmsIouThreshold
Intersection over union threshold.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::string FileLine() const
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views.
#define ARMNN_ASSERT_MSG(COND, MSG)
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
armnnSerializer::TensorInfo * TensorRawPtr
std::vector< unsigned int > m_BlockShape
Block shape values.
static void Destroy(ITfLiteParser *parser)
An output connection slot for a layer.
A L2NormalizationDescriptor for the L2NormalizationLayer.
int32_t GetQuantizationOffset() const
float GetQuantizationScale() const
static std::vector< int32_t > & GetOutputTensorIds(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
DataType GetDataType() const
An OriginsDescriptor for the ConcatLayer.
bool has_value() const noexcept
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
static ITfLiteParser * CreateRaw(const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
#define CHECK_VALID_SIZE(ACTUAL,...)
uint32_t m_NumClasses
Number of classes.
#define CHECKED_NON_NEGATIVE(VALUE)
uint32_t m_PadTop
Padding top value in the height dimension.
#define ARMNN_ASSERT(COND)
A StandInDescriptor for the StandIn layer.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
An ActivationDescriptor for the ActivationLayer.
uint32_t m_NumInputs
Number of input tensors.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
static std::vector< int32_t > & GetInputTensorIds(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
#define CHECK_TENSOR_PTR(TENSOR_PTR)
float m_ScaleH
Center size encoding scale height.
std::vector< int > m_End
End values for the input that will be sliced.
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
uint32_t m_DilationX
Dilation along x axis.
uint32_t m_PadLeft
Padding left value in the width dimension.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
static armnn::TensorInfo OutputShapeOfSqueeze(const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX)
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
unsigned int GetNumDimensions() const
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
const tflite::BufferT * BufferRawPtr
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
virtual std::vector< std::string > GetSubgraphInputTensorNames(size_t subgraphId) const override
Return the input tensor names for a given subgraph.
armnn::BindingPointInfo BindingPointInfo
uint32_t m_PadRight
Padding right value in the width dimension.
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
virtual const char * GetName() const =0
Returns the name of the layer.
virtual std::vector< std::string > GetSubgraphOutputTensorNames(size_t subgraphId) const override
Return the output tensor names for a given subgraph.
float m_ScaleY
Center size encoding scale y.
float m_NmsScoreThreshold
NMS score threshold.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
std::unique_ptr< tflite::ModelT > ModelPtr
unsigned int GetNumDimensions() const
#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
A SoftmaxDescriptor for the SoftmaxLayer.
const tflite::TensorT * TensorRawPtr
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
Set the view origin coordinates.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
std::unique_ptr< tflite::OperatorT > OperatorPtr
TfLiteParser(const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
constexpr unsigned int MaxNumOfTensorDimensions
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
uint32_t m_PadRight
Padding right value in the width dimension.