12 #include <boost/filesystem.hpp> 21 #include <schema_generated.h> 23 #include <flatbuffers/flexbuffers.h> 25 #include <boost/core/ignore_unused.hpp> 26 #include <boost/assert.hpp> 27 #include <boost/format.hpp> 28 #include <boost/numeric/conversion/cast.hpp> 35 using namespace armnn;
42 const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
48 if (model.get() ==
nullptr)
52 boost::format(
"%1% was called with invalid (null) model. " 53 "Possible reason is that the model is not yet loaded and Unpack(ed). " 54 "subgraph:%2% at %3%") %
59 else if (subgraphIndex >= model->subgraphs.size())
63 boost::format(
"%1% was called with an invalid subgraph index. " 64 "subgraph:%2% at %3%") %
71 #define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \ 72 CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION()) 79 if (model.get() ==
nullptr)
83 boost::format(
"%1% was called with invalid (null) model. " 84 "Possible reason is that the model is not yet loaded and Unpack(ed). " 85 "subgraph:%2% operator:%3% at %4%") %
91 else if (subgraphIndex >= model->subgraphs.size())
95 boost::format(
"%1% was called with an invalid subgraph index. " 96 "subgraph:%2% operator:%3% at %4%") %
102 else if (operatorIndex >= model->subgraphs[subgraphIndex]->operators.size() &&
103 operatorIndex != VIRTUAL_OPERATOR_ID)
107 boost::format(
"%1% was called with an invalid operator index. " 108 "subgraph:%2% operator:%3% at %4%") %
116 #define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \ 117 CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION()) 120 size_t subgraphIndex,
126 BOOST_ASSERT_MSG(model.get() !=
nullptr,
"Expecting a valid model in this function");
130 BOOST_ASSERT_MSG(subgraphIndex < model->subgraphs.size(),
"Expecting a valid subgraph index");
133 if (tensorIndex >= model->subgraphs[subgraphIndex]->tensors.size())
137 boost::format(
"%1% was called with an invalid tensor index. " 138 "subgraph:%2% tensor:%3% at %4%") %
146 #define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \ 147 CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION()) 152 if (rawPtr ==
nullptr)
156 boost::format(
"%1% was called with a null tensor pointer. " 164 #define CHECK_TENSOR_PTR(TENSOR_PTR) \ 165 CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION()) 171 if (model.get() ==
nullptr)
175 boost::format(
"%1% was called with invalid (null) model. " 176 "Possible reason is that the model is not yet loaded and Unpack(ed). " 177 "buffer:%2% at %3%") %
182 else if (bufferIndex >= model->buffers.size())
186 boost::format(
"%1% was called with an invalid buffer index. " 187 "buffer index:%2% at %3%") %
192 else if (model->buffers[bufferIndex].get() ==
nullptr)
196 boost::format(
"The buffer #%1% is null. %3%") %
202 #define CHECK_BUFFER(MODEL, BUFFER_INDEX) \ 203 CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION()) 205 void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
210 if (bufferPtr ==
nullptr)
214 boost::format(
"BufferPtr is null for buffer:%1%. %2%") %
221 std::stringstream ss;
222 ss <<
"Buffer #" << bufferId <<
" has " << bufferPtr->data.size() <<
" bytes. " 223 <<
"For tensor: " << tensorInfo.
GetShape()
224 <<
" expecting: " << tensorInfo.
GetNumBytes() <<
" bytes and " 230 #define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID) \ 231 CheckBufferSize(BUFFER_PTR, TENSOR_INFO, BUFFER_ID, CHECK_LOCATION()) 235 switch(activationType)
237 case tflite::ActivationFunctionType_NONE:
238 case tflite::ActivationFunctionType_RELU:
239 case tflite::ActivationFunctionType_RELU6:
240 case tflite::ActivationFunctionType_TANH:
251 #define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX) \ 253 if (IsActivationSupported(OPTION->fused_activation_function) == false) \ 255 throw ParseException( \ 257 boost::format("TfLite parser doesn't suppport fused activation: " \ 258 "%1%/%2% in %3% subgraph:%4% operator:%5% at %6%") % \ 259 OPTION->fused_activation_function % \ 260 tflite::EnumNameActivationFunctionType(\ 261 OPTION->fused_activation_function) % \ 265 CHECK_LOCATION().FileLine())); \ 270 std::vector<unsigned int> AsUnsignedVector(
const std::vector<int32_t> & in)
272 std::vector<unsigned int> result;
273 result.reserve(in.size());
285 uint32_t& paddingFront,
286 uint32_t& paddingBack,
287 tflite::Padding padding)
291 if (padding == tflite::Padding_SAME)
293 uint32_t outputSize = (inputSize + stride - 1) / stride;
294 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
295 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
296 if (temp > inputSize)
298 paddingFront = (temp - inputSize) / 2;
299 paddingBack = (temp - inputSize) - paddingFront;
310 switch (tensorPtr->type)
312 case tflite::TensorType_UINT8:
315 case tflite::TensorType_FLOAT32:
318 case tflite::TensorType_INT8:
319 if (tensorPtr->quantization->zero_point.size() == 1)
330 case tflite::TensorType_INT16:
333 case tflite::TensorType_INT32:
342 boost::format(
"Unsupported data type %1% = %2% for tensor: %3%. %4%") %
344 tflite::EnumNameTensorType(tensorPtr->type) %
349 std::vector<unsigned int> safeShape = shapes;
350 if (safeShape.size() == 0)
352 safeShape.push_back(1);
355 float quantizationScale = 0.0f;
356 int32_t quantizationOffset = 0;
358 if (tensorPtr->quantization.get())
360 if (tensorPtr->quantization->scale.size() <= 1)
365 if (tensorPtr->quantization->scale.size() == 1)
367 quantizationScale = tensorPtr->quantization->scale[0];
369 if (tensorPtr->quantization->zero_point.size() == 1)
373 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
386 std::vector<float> quantizationScales;
387 std::vector<int32_t> quantizationOffsets;
390 std::copy(tensorPtr->quantization->scale.begin(),
391 tensorPtr->quantization->scale.end(),
392 std::back_inserter(quantizationScales));
399 dimensionMappings[boost::numeric_cast<
unsigned int>(
400 tensorPtr->quantization->quantized_dimension)]);
418 auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
419 return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
423 std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
424 CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
429 boost::ignore_unused(tensorPtr);
430 BOOST_ASSERT_MSG(tensorPtr !=
nullptr,
"tensorPtr is null");
431 BOOST_ASSERT_MSG(bufferPtr !=
nullptr,
433 boost::format(
"Buffer for buffer:%1% is null") % tensorPtr->buffer).c_str());
441 reinterpret_cast<const T*
>(bufferPtr->data.data()), data.get(),
sizeof(T));
445 ::memcpy(data.get(), bufferPtr->data.data(), tensorInfo.
GetNumBytes());
448 return std::make_pair(
ConstTensor(tensorInfo, data.get()), std::move(data));
461 if (actualSize != expected.size())
466 for (
unsigned int i = 0u; i < actualSize; i++)
468 if (expected[i] < 0 ||
469 actual[i] != static_cast<unsigned int>(expected[i]))
482 , m_Network(nullptr, nullptr)
483 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &
TfLiteParser::ParseUnsupportedOperator)
486 m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
487 m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
488 m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
489 m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
490 m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
491 m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
492 m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
493 m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
494 m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
495 m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
496 m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
497 m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
498 m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
499 m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
500 m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
501 m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
502 m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
503 m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
504 m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
505 m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
506 m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
507 m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
508 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
509 m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
510 m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
511 m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
512 m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
513 m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
514 m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
515 m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
516 m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
517 m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
518 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
519 m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
520 m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
523 m_CustomParserFunctions[
"TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
526 void TfLiteParser::ResetParser()
530 m_SubgraphConnections.clear();
533 void TfLiteParser::AddBroadcastReshapeLayer(
size_t subgraphIndex,
534 size_t operatorIndex,
537 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
538 BOOST_ASSERT(layer !=
nullptr);
540 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
541 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
543 BOOST_ASSERT(operatorPtr->inputs.size() > 1);
546 TensorRawPtr tensorPtr = subgraphPtr->tensors[reshapedInputId].get();
548 TensorRawPtr tensorPtr1 = subgraphPtr->tensors[inputId].get();
555 uint32_t
id = reshapedInputId;
556 reshapedInputId = inputId;
565 std::vector<unsigned> reshapedDim;
568 reshapedDim.push_back(reshapedTensorInfo.
GetShape()[i]);
571 std::vector<unsigned int> reshapedDimensions(numDimensions, 1);
572 std::copy_backward (reshapedDim.begin(), reshapedDim.end(), reshapedDimensions.end());
576 std::string layerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
584 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {reshapedInputId});
587 RegisterConsumerOfTensor(subgraphIndex, inputId, input1Slot);
594 return CreateNetworkFromModel();
601 return CreateNetworkFromModel();
606 m_Network = INetwork::Create();
607 BOOST_ASSERT(m_Model.get() !=
nullptr);
609 bool failedToCreate =
false;
610 std::stringstream errors;
612 if (m_Model->subgraphs.size() != 1)
616 boost::format(
"Current TfLite parser only supports 1 subgraph. Current one has: %1% %2%") %
617 m_Model->subgraphs.size() %
621 size_t subgraphIndex = 0;
622 for (
SubgraphPtr const & subgraph : m_Model->subgraphs)
624 m_SubgraphConnections.emplace_back(subgraph->tensors.size());
626 size_t operatorIndex = 0;
631 auto const & opCodePtr = m_Model->operator_codes[op->opcode_index];
632 auto builtinCode = opCodePtr->builtin_code;
634 if (builtinCode > tflite::BuiltinOperator_MAX)
638 boost::format(
"Operator code %1% is out of range 0-%2%. " 639 "subgraph:%3% operator idx:%4%. %5%") %
641 tflite::BuiltinOperator_MAX %
648 auto & parserFunction = m_ParserFunctions[builtinCode];
649 (this->*parserFunction)(subgraphIndex, operatorIndex);
653 failedToCreate =
true;
654 std::stringstream errorString;
656 errorString <<
"Failed to parse operator #" << operatorIndex
657 <<
" within subgraph #" << subgraphIndex
658 <<
" error: " << e.
what();
661 errors << errorString.str() <<
"\n";
666 SetupInputLayers(subgraphIndex);
667 SetupOutputLayers(subgraphIndex);
668 SetupConstantLayers(subgraphIndex);
680 for (
size_t subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
682 for (
size_t tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
684 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot !=
nullptr)
686 for (
size_t inputSlotIdx = 0;
687 inputSlotIdx < m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size();
690 m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot->Connect(
691 *(m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots[inputSlotIdx]));
697 return std::move(m_Network);
700 void TfLiteParser::RegisterProducerOfTensor(
size_t subgraphIndex,
705 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
706 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
708 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
711 if (tensorSlots.outputSlot !=
nullptr)
714 boost::format(
"Another layer has already registered itself as the producer of " 715 "subgraph:%1% tensor:%2% %3%") %
721 tensorSlots.outputSlot = slot;
724 void TfLiteParser::RegisterConsumerOfTensor(
size_t subgraphIndex,
729 BOOST_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
730 BOOST_ASSERT(m_SubgraphConnections[subgraphIndex].size() > tensorIndex);
732 TensorSlots & tensorSlots = m_SubgraphConnections[subgraphIndex][tensorIndex];
733 tensorSlots.inputSlots.push_back(slot);
736 void TfLiteParser::ParseCustomOperator(
size_t subgraphIndex,
size_t operatorIndex)
738 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
741 auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
744 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
745 const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
748 auto iterator = m_CustomParserFunctions.find(customCode);
749 if (iterator != m_CustomParserFunctions.end())
751 customParserFunction = iterator->second;
755 (this->*customParserFunction)(subgraphIndex, operatorIndex);
758 void TfLiteParser::ParseUnsupportedOperator(
size_t subgraphIndex,
size_t operatorIndex)
760 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
762 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
764 auto opcodeIndex = operatorPtr->opcode_index;
765 auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
767 if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
772 boost::format(
"Operator not supported. " 773 "subgraph:%1% operator:%2% " 774 "opcode_index:%3% opcode:%4% / %5% %6%") %
779 tflite::EnumNameBuiltinOperator(opcode) %
783 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
784 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
786 const unsigned int numInputs = boost::numeric_cast<
unsigned int>(inputs.size());
787 const unsigned int numOutputs = boost::numeric_cast<
unsigned int>(outputs.size());
790 auto layerName = boost::str(boost::format(
"StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
793 IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
794 for (
unsigned int i = 0u; i < numOutputs; ++i)
799 auto inputTensorIds = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
800 auto outputTensorIds = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
802 RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
803 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
806 void TfLiteParser::ParseConv2D(
size_t subgraphIndex,
size_t operatorIndex)
808 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
810 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
811 const auto *
options = operatorPtr->builtin_options.AsConv2DOptions();
823 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
826 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
833 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
834 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
838 unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
839 unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
846 auto filterTensorAndData = CreateConstTensor(inputs[1],
851 auto layerName = boost::str(boost::format(
"Conv2D:%1%:%2%") % subgraphIndex % operatorIndex);
853 if (inputs.size() == 3)
857 auto biasTensorAndData = CreateConstTensor(inputs[2],
860 layer = m_Network->AddConvolution2dLayer(desc,
861 filterTensorAndData.first,
867 layer = m_Network->AddConvolution2dLayer(desc,
868 filterTensorAndData.first,
873 BOOST_ASSERT(layer !=
nullptr);
880 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
881 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
883 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
885 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
886 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
889 void TfLiteParser::ParseDepthwiseConv2D(
size_t subgraphIndex,
size_t operatorIndex)
891 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
893 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
894 const auto *
options = operatorPtr->builtin_options.AsDepthwiseConv2DOptions();
905 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
907 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
919 unsigned int inputHeight = inputTensorInfo.
GetShape()[1];
920 unsigned int inputWidth = inputTensorInfo.
GetShape()[2];
923 unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
924 unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
927 filterTensorInfo.
SetShape({ filterHeight,
937 auto filterTensorAndData = CreateConstTensor(inputs[1], filterTensorInfo, permutationVector);
939 auto layerName = boost::str(boost::format(
"DepthwiseConv2D:%1%:%2%") % subgraphIndex % operatorIndex);
941 if (inputs.size() == 3)
945 auto biasTensorAndData = CreateConstTensor(inputs[2],
948 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
949 filterTensorAndData.first,
955 layer = m_Network->AddDepthwiseConvolution2dLayer(desc,
956 filterTensorAndData.first,
960 BOOST_ASSERT(layer !=
nullptr);
967 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
968 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
970 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
972 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
973 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
976 void TfLiteParser::ParseDequantize(
size_t subgraphIndex,
size_t operatorIndex)
978 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
980 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
983 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
986 auto layerName = boost::str(boost::format(
"Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
989 BOOST_ASSERT(layer !=
nullptr);
994 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
995 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
997 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
998 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1001 void TfLiteParser::ParseTranspose(
size_t subgraphIndex,
size_t operatorIndex)
1003 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1005 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1008 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1012 auto layerName = boost::str(boost::format(
"Transpose:%1%:%2%") % subgraphIndex % operatorIndex);
1016 if (inputs.size() == 2)
1021 std::vector<unsigned int> permuteShape(numPermVecElements);
1022 ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.
GetNumBytes());
1026 std::vector<unsigned int> armnnPermuteShape(numPermVecElements);
1027 std::vector<unsigned int>::iterator it;
1028 for (
unsigned int i = 0u; i < numPermVecElements; ++i)
1030 it = std::find(permuteShape.begin(), permuteShape.end(), i);
1031 armnnPermuteShape[i] =
static_cast<unsigned int>(std::distance(permuteShape.begin(), it));
1039 layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
1041 BOOST_ASSERT(layer !=
nullptr);
1044 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1046 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1047 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1049 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1050 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1053 void TfLiteParser::ParseTransposeConv(
size_t subgraphIndex,
size_t operatorIndex)
1055 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1057 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1058 const auto *
options = operatorPtr->builtin_options.AsTransposeConvOptions();
1066 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1069 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1076 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1077 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1079 const unsigned int filterHeight = filterTensorInfo.
GetShape()[1];
1080 const unsigned int filterWidth = filterTensorInfo.
GetShape()[2];
1098 auto filterTensorAndData = CreateConstTensor(inputs[1],
1103 auto layerName = boost::str(boost::format(
"TransposeConv:%1%:%2%") % subgraphIndex % operatorIndex);
1105 layer = m_Network->AddTransposeConvolution2dLayer(desc,
1106 filterTensorAndData.first,
1110 BOOST_ASSERT(layer !=
nullptr);
1116 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1117 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[2]});
1119 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1120 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1123 void TfLiteParser::ParseAveragePool2D(
size_t subgraphIndex,
size_t operatorIndex)
1125 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
1128 void TfLiteParser::ParseBatchToSpaceND(
size_t subgraphIndex,
size_t operatorIndex)
1130 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1132 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1135 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1144 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1145 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1147 std::vector<unsigned int> cropsVector(cropsTensorInfo.
GetNumElements());
1148 ::memcpy(cropsVector.data(), cropsBufferPtr->data.data(), cropsTensorInfo.
GetNumBytes());
1151 std::vector<std::pair<unsigned int, unsigned int>> crops;
1152 for (
unsigned int i = 0; i < cropsTensorInfo.
GetNumElements() / step; ++i)
1154 crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
1164 auto layerName = boost::str(boost::format(
"BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
1165 IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
1169 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1170 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1172 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1173 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1176 void TfLiteParser::ParseL2Normalization(
size_t subgraphIndex,
size_t operatorIndex)
1178 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1180 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1183 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1188 auto layerName = boost::str(boost::format(
"L2Normalization:%1%:%2%") % subgraphIndex % operatorIndex);
1189 IConnectableLayer* layer = m_Network->AddL2NormalizationLayer(desc, layerName.c_str());
1191 BOOST_ASSERT(layer !=
nullptr);
1196 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1197 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1199 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1200 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1203 void TfLiteParser::ParseMaxPool2D(
size_t subgraphIndex,
size_t operatorIndex)
1205 ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
1208 void TfLiteParser::ParseMaximum(
size_t subgraphIndex,
size_t operatorIndex)
1210 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1212 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1215 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1221 auto layerName = boost::str(boost::format(
"Maximum:%1%:%2%") % subgraphIndex % operatorIndex);
1227 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1228 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1230 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1234 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1237 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1238 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1241 void TfLiteParser::ParseMinimum(
size_t subgraphIndex,
size_t operatorIndex)
1243 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1245 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1248 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1254 auto layerName = boost::str(boost::format(
"Minimum:%1%:%2%") % subgraphIndex % operatorIndex);
1260 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1261 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1263 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1267 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1270 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1271 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1274 void TfLiteParser::ParsePool(
size_t subgraphIndex,
1275 size_t operatorIndex,
1278 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1280 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1281 const auto *
options = operatorPtr->builtin_options.AsPool2DOptions();
1285 std::string layerName;
1289 case PoolingAlgorithm::Average:
1291 boost::str(boost::format(
"AveragePool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1293 case PoolingAlgorithm::Max:
1295 boost::str(boost::format(
"MaxPool2D:%1%:%2%") % subgraphIndex % operatorIndex);
1298 BOOST_ASSERT_MSG(
false,
"Unsupported Pooling Algorithm");
1312 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1317 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
1318 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
1325 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1328 IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
1330 BOOST_ASSERT(layer !=
nullptr);
1333 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1337 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1338 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1340 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1342 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1343 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1346 void TfLiteParser::ParseSlice(
size_t subgraphIndex,
size_t operatorIndex)
1348 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1350 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1352 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1361 std::vector<unsigned int> begin(beginTensorInfo.
GetNumElements());
1362 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.
GetNumBytes());
1368 std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
1369 ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
1372 auto layerName = boost::str(boost::format(
"Slice:%1%:%2%") % subgraphIndex % operatorIndex);
1373 IConnectableLayer*
const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
1380 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1381 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1384 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1385 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1388 void TfLiteParser::ParseSoftmax(
size_t subgraphIndex,
size_t operatorIndex)
1390 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1391 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1392 const auto *
options = operatorPtr->builtin_options.AsSoftmaxOptions();
1397 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1399 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1402 auto layerName = boost::str(boost::format(
"Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
1403 IConnectableLayer*
const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
1410 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1411 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1414 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1415 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1418 void TfLiteParser::ParseSpaceToBatchND(
size_t subgraphIndex,
size_t operatorIndex)
1420 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1422 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1425 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1434 std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
1435 ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
1437 std::vector<unsigned int> padListVector(padListTensorInfo.
GetNumElements());
1438 ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.
GetNumBytes());
1441 std::vector<std::pair<unsigned int, unsigned int>> padList;
1442 for (
unsigned int i = 0; i < padListTensorInfo.
GetNumElements() / step; ++i)
1444 padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
1454 auto layerName = boost::str(boost::format(
"SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
1455 IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
1459 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1460 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1462 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1463 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1470 std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1471 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1475 std::stringstream ss;
1476 ss <<
"Input tensor has unexpected number of dimensions:" << inputTensorInfo.
GetNumDimensions()
1477 <<
" shape:" << inputTensorInfo.
GetShape() <<
" " 1482 if (squeezeDims.empty())
1484 squeezeDims.assign(dimensionSequence,
1488 std::vector<uint32_t> outputDims;
1491 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1492 auto currentDimension = inputTensorInfo.
GetShape()[i];
1493 if (skipSqueeze || currentDimension != 1)
1495 outputDims.push_back(currentDimension);
1499 if (outputDims.size() > 4)
1501 std::stringstream ss;
1502 ss <<
"Output tensor has unexpected number of dimensions:" << inputTensorInfo.
GetNumDimensions()
1503 <<
" shape:" << inputTensorInfo.
GetShape() <<
" " 1515 return outTensorInfo;
1518 void TfLiteParser::ParseSqueeze(
size_t subgraphIndex,
size_t operatorIndex)
1520 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1522 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1525 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1528 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1529 const auto *
options = operatorPtr->builtin_options.AsSqueezeOptions();
1539 auto layerName = boost::str(boost::format(
"Squeeze:%1%:%2%") % subgraphIndex % operatorIndex);
1540 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1543 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1544 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1546 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1547 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1550 void TfLiteParser::ParseStridedSlice(
size_t subgraphIndex,
size_t operatorIndex)
1552 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1554 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1557 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1560 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1561 const auto *
options = operatorPtr->builtin_options.AsStridedSliceOptions();
1575 ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.
GetNumBytes());
1580 std::vector<int> end(endTensorInfo.GetNumElements());
1581 ::memcpy(end.data(), endBufferPtr->data.data(), endTensorInfo.GetNumBytes());
1586 std::vector<int> stride(strideTensorInfo.GetNumElements());
1587 ::memcpy(stride.data(), strideBufferPtr->data.data(), strideTensorInfo.GetNumBytes());
1593 auto layerName = boost::str(boost::format(
"StridedSlice:%1%:%2%") % subgraphIndex % operatorIndex);
1594 IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
1599 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1600 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1602 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1603 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1606 void TfLiteParser::ParseSub(
size_t subgraphIndex,
size_t operatorIndex)
1608 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1610 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1611 const auto *
options = operatorPtr->builtin_options.AsSubOptions();
1613 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1616 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1622 auto layerName = boost::str(boost::format(
"Sub:%1%:%2%") % subgraphIndex % operatorIndex);
1628 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1629 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1631 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1635 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1638 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1640 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1641 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1644 void TfLiteParser::ParseAdd(
size_t subgraphIndex,
size_t operatorIndex)
1646 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1648 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1649 const auto *
options = operatorPtr->builtin_options.AsAddOptions();
1651 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1654 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1660 auto layerName = boost::str(boost::format(
"Add:%1%:%2%") % subgraphIndex % operatorIndex);
1666 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1667 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1669 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1673 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1676 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1678 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1679 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1682 void TfLiteParser::ParseMul(
size_t subgraphIndex,
size_t operatorIndex)
1684 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1686 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1687 const auto *
options = operatorPtr->builtin_options.AsMulOptions();
1689 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1692 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1698 auto layerName = boost::str(boost::format(
"Mul:%1%:%2%") % subgraphIndex % operatorIndex);
1699 IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
1704 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1705 if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.
GetNumDimensions())
1707 AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
1711 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
1714 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
1716 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1717 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1720 void TfLiteParser::ParseMean(
size_t subgraphIndex,
size_t operatorIndex)
1722 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1724 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1726 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1733 std::vector<unsigned int> axis(dimTensorInfo.GetNumElements());
1734 ::memcpy(axis.data(), bufferPtr->data.data(), dimTensorInfo.GetNumBytes());
1744 auto layerName = boost::str(boost::format(
"Mean:%1%:%2%") % subgraphIndex % operatorIndex);
1749 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1750 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1752 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1753 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1756 void TfLiteParser::ParsePad(
size_t subgraphIndex,
size_t operatorIndex)
1758 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1768 std::vector<unsigned int> padBuffer(padTensorInfo.GetNumElements());
1769 ::memcpy(padBuffer.data(), bufferPtr->data.data(), padTensorInfo.GetNumBytes());
1773 for (
unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
1775 desc.
m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
1778 auto layerName = boost::str(boost::format(
"Pad:%1%:%2%") % subgraphIndex % operatorIndex);
1784 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1785 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1787 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1788 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1791 void TfLiteParser::ParseQuantize(
size_t subgraphIndex,
size_t operatorIndex)
1793 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1795 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1798 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1801 auto layerName = boost::str(boost::format(
"Quantize:%1%:%2%") % subgraphIndex % operatorIndex);
1804 BOOST_ASSERT(layer !=
nullptr);
1809 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1810 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1812 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1813 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
1816 void TfLiteParser::ParseRelu(
size_t subgraphIndex,
size_t operatorIndex)
1818 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
1821 void TfLiteParser::ParseRelu6(
size_t subgraphIndex,
size_t operatorIndex)
1823 ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
1826 void TfLiteParser::ParseLogistic(
size_t subgraphIndex,
size_t operatorIndex)
1828 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
1831 void TfLiteParser::ParseTanH(
size_t subgraphIndex,
size_t operatorIndex)
1833 ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
1837 void TfLiteParser::ParseActivation(
size_t subgraphIndex,
size_t operatorIndex,
ActivationFunction activationType)
1839 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1840 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1841 boost::ignore_unused(operatorPtr);
1843 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1846 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1849 auto layerName = str(boost::format(
"Activation:"));
1853 switch (activationType)
1855 case ActivationFunction::ReLu:
1857 layerName += str(boost::format(
"RELU:%1%:%2%") % subgraphIndex % operatorIndex);
1860 case ActivationFunction::BoundedReLu:
1862 layerName += str(boost::format(
"RELU6:%1%:%2%") % subgraphIndex % operatorIndex);
1863 activationDesc.
m_A = 6.0f;
1864 activationDesc.
m_B = 0.0f;
1867 case ActivationFunction::Sigmoid:
1869 layerName += str(boost::format(
"SIGMOID:%1%:%2%") % subgraphIndex % operatorIndex);
1872 case ActivationFunction::TanH:
1874 layerName += str(boost::format(
"TANH:%1%:%2%") % subgraphIndex % operatorIndex);
1875 activationDesc.
m_A = 1.0f;
1876 activationDesc.
m_B = 1.0f;
1882 boost::str(boost::format(
"Unexpected ActivationFunction[%1%] when creating layerName " 1883 " %2% ") %static_cast<int>(activationType)%
CHECK_LOCATION().AsString()));
1887 IConnectableLayer*
const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
1894 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1895 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1898 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1899 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1902 const std::vector<int32_t> & targetDimsIn)
1904 std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1905 const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1907 if (stretchDim != targetDimsIn.end())
1909 if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1913 boost::format(
"At most one component of shape can be -1 %1%") %
CHECK_LOCATION().AsString()));
1916 auto targetNumElements =
1917 boost::numeric_cast<
unsigned int>(
1918 std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1920 auto stretchIndex =
static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1921 outputDims[stretchIndex] = inputTensorInfo.
GetNumElements() / targetNumElements;
1932 void TfLiteParser::ParseReshape(
size_t subgraphIndex,
size_t operatorIndex)
1934 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1936 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1938 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
1941 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
1942 const auto *
options = operatorPtr->builtin_options.AsReshapeOptions();
1951 if (inputs.size() > 1 && !
CheckShape(reshapeOutputTensorShape, outputs[0]->shape))
1953 std::stringstream ss;
1954 ss <<
"New shape defined in reshape parameters " 1955 << reshapeOutputTensorShape
1956 <<
" does not equal output shape " 1957 << actualOutputTensorInfo.
GetShape()
1966 auto layerName = boost::str(boost::format(
"Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
1967 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
1970 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
1971 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
1973 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
1974 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
1977 void TfLiteParser::ParseResizeBilinear(
size_t subgraphIndex,
size_t operatorIndex)
1979 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
1982 void TfLiteParser::ParseResizeNearestNeighbor(
size_t subgraphIndex,
size_t operatorIndex)
1984 ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
1987 void TfLiteParser::ParseResize(
size_t subgraphIndex,
size_t operatorIndex,
ResizeMethod resizeMethod)
1989 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
1991 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
1994 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2000 std::vector<int32_t> sizeTensorData(sizeTensorInfo.GetNumElements());
2003 ::memcpy(sizeTensorData.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
2007 desc.m_TargetHeight =
static_cast<uint32_t
> (sizeTensorData[0]);
2008 desc.m_TargetWidth =
static_cast<uint32_t
> (sizeTensorData[1]);
2011 auto layerName = str(boost::format(
"Resize:"));
2013 switch (resizeMethod)
2015 case ResizeMethod::Bilinear:
2017 layerName += str(boost::format(
"BILINEAR:%1%:%2%") % subgraphIndex % operatorIndex);
2019 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2020 const auto *
options = operatorPtr->builtin_options.AsResizeBilinearOptions();
2022 desc.m_BilinearAlignCorners =
options->align_corners;
2025 case ResizeMethod::NearestNeighbor:
2027 layerName += str(boost::format(
"NEARESTNEIGHBOR:%1%:%2%") % subgraphIndex % operatorIndex);
2033 boost::str(boost::format(
"Unexpected ResizeMethod[%1%] when creating layerName " 2034 " %2% ") %static_cast<int>(resizeMethod)%
CHECK_LOCATION().AsString()));
2043 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2044 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2046 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2047 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2050 void TfLiteParser::ParseConcatenation(
size_t subgraphIndex,
size_t operatorIndex)
2052 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2054 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2055 const auto *
options = operatorPtr->builtin_options.AsConcatenationOptions();
2059 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2060 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2063 unsigned int numConcatView =
static_cast<unsigned int>(inputs.size());
2066 const unsigned int concatDimInput =
static_cast<unsigned int>(
2067 (
static_cast<int>(inputRank) +
options->axis) %
static_cast<int>(inputRank));
2069 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
2072 unsigned int mergeDimOrigin = 0;
2074 for (
unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
2080 inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
2083 auto layerName = boost::str(boost::format(
"Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
2084 IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
2086 BOOST_ASSERT(layer !=
nullptr);
2089 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2093 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2096 layer = AddFusedActivationLayer(layer, 0,
options->fused_activation_function);
2098 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2099 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2102 void TfLiteParser::ParseFullyConnected(
size_t subgraphIndex,
size_t operatorIndex)
2104 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2106 const auto & operatorRfr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2107 const auto options = operatorRfr->builtin_options.AsFullyConnectedOptions();
2115 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2116 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2122 int32_t weightsDimension =
static_cast<int32_t
>(filterTensorInfo.GetNumDimensions());
2123 if (weightsDimension != 2)
2128 "Dimension %1% for Fully Connected weights is not supported by Armnn. " 2134 auto filterTensorAndData = CreateConstTensor(inputs[1],
2138 auto layerName = boost::str(boost::format(
"FullyConnected:%1%:%2%") % subgraphIndex % operatorIndex);
2140 if (inputs.size() == 3)
2144 auto biasTensorAndData = CreateConstTensor(inputs[2],
2147 layer = m_Network->AddFullyConnectedLayer(desc,
2148 filterTensorAndData.first,
2154 layer = m_Network->AddFullyConnectedLayer(desc,
2155 filterTensorAndData.first,
2159 BOOST_ASSERT(layer !=
nullptr);
2163 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2171 std::vector<unsigned int> reshapedDimensions(2);
2172 reshapedDimensions[1] = filterTensorInfo.GetShape()[1];
2173 reshapedDimensions[0] = inputTensorInfo.
GetNumElements() / reshapedDimensions[1];
2175 if (inputTensorInfo.
GetNumElements() % reshapedDimensions[1] != 0)
2180 "Failed to deduce input tensor shape from filter size %1%")
2181 % reshapedDimensions[1]
2188 std::string reshapeLayerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
2190 desc.m_TargetShape = reshapedTensorInfo.
GetShape();
2196 RegisterInputSlots(subgraphIndex, operatorIndex, reshapeLayer, {inputTensorIndexes[0]});
2202 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2210 options->fused_activation_function);
2213 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2214 RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
2217 void TfLiteParser::ParseDetectionPostProcess(
size_t subgraphIndex,
size_t operatorIndex)
2219 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2221 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2223 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2224 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2228 auto custom_options = operatorPtr->custom_options;
2229 const flexbuffers::Map& m = flexbuffers::GetRoot(custom_options.data(), custom_options.size()).AsMap();
2238 desc.
m_ScaleH = m[
"h_scale"].AsFloat();
2239 desc.
m_ScaleW = m[
"w_scale"].AsFloat();
2240 desc.
m_ScaleX = m[
"x_scale"].AsFloat();
2241 desc.
m_ScaleY = m[
"y_scale"].AsFloat();
2243 if (!(m[
"use_regular_nms"].IsNull()))
2247 if (!(m[
"detections_per_class"].IsNull()))
2255 "must be positive and less than or equal to 1.");
2259 auto anchorTensorAndData = CreateConstTensor(inputs[2], anchorTensorInfo,
2262 auto layerName = boost::str(boost::format(
"DetectionPostProcess:%1%:%2%") % subgraphIndex % operatorIndex);
2263 IConnectableLayer* layer = m_Network->AddDetectionPostProcessLayer(desc, anchorTensorAndData.first,
2266 BOOST_ASSERT(layer !=
nullptr);
2271 m_OverridenOutputShapes.push_back({ 1, numDetectedBox, 4 });
2272 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2273 m_OverridenOutputShapes.push_back({ 1, numDetectedBox });
2274 m_OverridenOutputShapes.push_back({ 1 });
2276 for (
unsigned int i = 0 ; i < outputs.size() ; ++i)
2284 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2285 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
2288 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2289 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0],
2290 outputTensorIndexes[1],
2291 outputTensorIndexes[2],
2292 outputTensorIndexes[3]});
2296 void TfLiteParser::ParsePack(
size_t subgraphIndex,
size_t operatorIndex)
2298 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2300 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2301 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2304 if (inputs.size() < 1)
2309 const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2310 const auto*
options = operatorPtr->builtin_options.AsPackOptions();
2314 desc.
m_NumInputs =
static_cast<uint32_t
>(inputs.size());
2320 auto layerName = boost::str(boost::format(
"Pack:%1%:%2%") % subgraphIndex % operatorIndex);
2323 BOOST_ASSERT(layer !=
nullptr);
2328 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2329 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes});
2331 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2332 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
2335 void TfLiteParser::ParseUnpack(
size_t subgraphIndex,
size_t operatorIndex)
2337 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2339 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2340 const auto *
options = operatorPtr->builtin_options.AsUnpackOptions();
2345 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2350 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
2355 "The unpack axis: %1% cannot be greater than or equal to " 2356 "the number of input dimension %2% %3%")
2358 % inputTensorInfo.GetNumDimensions()
2366 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
2372 throw ParseException(
"Number to unpack must greater than zero.");
2375 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2378 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2379 std::vector<unsigned int> unpackDimSizes(inputDimSize);
2382 for (
unsigned int i = 0; i < inputDimSize; ++i)
2384 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
2387 if (unpackDimSizes[unpackAxis] != unpackNum)
2389 throw ParseException(
"Number to unpack must be the same as length of the dimension to " 2393 unpackDimSizes[unpackAxis] /= unpackNum;
2395 SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
2396 for (
unsigned int j = 0; j < unpackNum; ++j)
2399 for (
unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
2401 splitDesc.
SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
2406 auto layerName = boost::str(boost::format(
"Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
2407 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2410 unpackDimSizes.data());
2412 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2413 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
2419 std::string reshapeLayerName = boost::str(boost::format(
"Reshape_for:%1%") % layer->
GetName());
2434 RegisterProducerOfTensor(subgraphIndex, reshapedOutputId, slot);
2438 void TfLiteParser::ParseSplit(
size_t subgraphIndex,
size_t operatorIndex)
2440 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2442 const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
2443 const auto *
options = operatorPtr->builtin_options.AsSplitOptions();
2450 throw ParseException(
"Number to splits must greater than zero.");
2453 auto inputs =
GetInputs(m_Model, subgraphIndex, operatorIndex);
2455 auto outputs =
GetOutputs(m_Model, subgraphIndex, operatorIndex);
2462 std::vector<unsigned int> axisData(axisTensorInfo.
GetNumElements());
2463 ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.
GetNumBytes());
2466 const unsigned int splitDim = axisData[0];
2469 if (splitDim == 0 || splitDim == 2)
2474 "Dimension %1% for split is not supported by Armnn. %2%")
2479 auto inputDimSize = inputTensorInfo.GetNumDimensions();
2485 "The number of dimensions: %1% for input tensors of the " 2486 "split op cannot be greater than %2% %3%")
2487 % inputTensorInfo.GetNumDimensions()
2492 std::vector<unsigned int> splitterDimSizes(inputDimSize);
2495 for (
unsigned int i = 0; i < inputDimSize; ++i)
2497 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
2500 if (splitterDimSizes[splitDim] % numSplits != 0)
2502 throw ParseException(
"Number of splits must evenly divide the dimension");
2504 splitterDimSizes[splitDim] /= numSplits;
2507 for (
unsigned int j = 0; j < numSplits; ++j)
2510 for (
unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
2512 splitDesc.
SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
2517 auto layerName = boost::str(boost::format(
"Split:%1%:%2%") % subgraphIndex % operatorIndex);
2518 IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
2520 auto inputTensorIndexes = AsUnsignedVector(
GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
2521 RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[1]});
2529 auto outputTensorIndexes = AsUnsignedVector(
GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
2530 RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
2534 unsigned int outputSlot,
2535 tflite::ActivationFunctionType activationType)
2538 std::string layerName = prevLayer->
GetName();
2540 switch(activationType)
2542 case tflite::ActivationFunctionType_NONE:
2547 case tflite::ActivationFunctionType_RELU:
2549 activationDesc.
m_Function = ActivationFunction::ReLu;
2550 layerName +=
":RELU";
2553 case tflite::ActivationFunctionType_RELU6:
2555 activationDesc.
m_Function = ActivationFunction::BoundedReLu;
2556 activationDesc.
m_A = 6.0f;
2557 activationDesc.
m_B = 0.0f;
2558 layerName +=
":RELU6";
2561 case tflite::ActivationFunctionType_TANH:
2563 activationDesc.
m_Function = ActivationFunction::TanH;
2564 activationDesc.
m_A = 1.0f;
2565 activationDesc.
m_B = 1.0f;
2566 layerName +=
":TANH";
2571 case tflite::ActivationFunctionType_RELU_N1_TO_1:
2572 case tflite::ActivationFunctionType_SIGN_BIT:
2577 boost::format(
"TfLite parser doesn't suppport fused activation: " 2580 tflite::EnumNameActivationFunctionType(activationType) %
2587 m_Network->AddActivationLayer(activationDesc, layerName.c_str());
2589 auto & prevOutputSlot = prevLayer->
GetOutputSlot(outputSlot);
2592 return activationLayer;
2597 if (fileName ==
nullptr)
2602 boost::system::error_code errorCode;
2603 boost::filesystem::path pathToFile(fileName);
2604 if (!boost::filesystem::exists(pathToFile, errorCode))
2606 throw FileNotFoundException(boost::str(boost::format(
"Cannot find the file (%1%) errorCode: %2% %3%") %
2611 std::ifstream file(fileName, std::ios::binary);
2612 std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2614 fileContent.size());
2619 if (binaryContent ==
nullptr)
2624 flatbuffers::Verifier verifier(binaryContent, len);
2628 boost::str(boost::format(
"Buffer doesn't conform to the expected Tensorflow Lite " 2629 "flatbuffers format. size:%1% %2%") %
2633 return tflite::UnPackModel(binaryContent);
2637 size_t subgraphIndex,
2638 size_t operatorIndex)
2642 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2643 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2645 size_t inputCount = operatorPtr->inputs.size();
2647 for (
size_t i=0; i<inputCount; ++i)
2650 result[i] = subgraphPtr->tensors[inputId].get();
2656 size_t subgraphIndex,
2657 size_t operatorIndex)
2661 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2662 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2664 size_t outputCount = operatorPtr->outputs.size();
2666 for (
size_t i=0; i<outputCount; ++i)
2670 result[i] = subgraphPtr->tensors[outputId].get();
2676 size_t subgraphIndex)
2679 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2681 size_t inputCount = subgraphPtr->inputs.size();
2683 for (
size_t i=0; i<inputCount; ++i)
2687 result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
2693 size_t subgraphIndex)
2696 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2698 size_t outputCount = subgraphPtr->outputs.size();
2700 for (
size_t i=0; i<outputCount; ++i)
2703 result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
2709 size_t subgraphIndex,
2710 size_t operatorIndex)
2713 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2714 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2715 return operatorPtr->inputs;
2719 size_t subgraphIndex,
2720 size_t operatorIndex)
2723 const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2724 const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2725 return operatorPtr->outputs;
2728 void TfLiteParser::RegisterInputSlots(
size_t subgraphIndex,
2729 size_t operatorIndex,
2731 const std::vector<unsigned int>& tensorIndexes)
2733 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2734 BOOST_ASSERT(layer !=
nullptr);
2738 boost::str(boost::format(
"The number of tensor inputs (%1%) does not match the number expected (%2%)" 2739 " for subgraph:%3% operator index:%4% %5%") %
2740 tensorIndexes.size() %
2747 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumInputSlots(); ++slotIndex)
2749 unsigned int tensorIndex = tensorIndexes[slotIndex];
2751 RegisterConsumerOfTensor(subgraphIndex, tensorIndex, slot);
2755 void TfLiteParser::RegisterOutputSlots(
size_t subgraphIndex,
2756 size_t operatorIndex,
2758 const std::vector<unsigned int>& tensorIndexes)
2760 CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
2761 BOOST_ASSERT(layer !=
nullptr);
2765 boost::str(boost::format(
"The number of tensor outputs (%1%) does not match the number expected (%2%)" 2766 " for subgraph:%3% operator index:%4% %5%") %
2767 tensorIndexes.size() %
2774 for (
unsigned int slotIndex = 0; slotIndex < layer->
GetNumOutputSlots(); ++slotIndex)
2776 unsigned int tensorIndex = tensorIndexes[slotIndex];
2778 RegisterProducerOfTensor(subgraphIndex, tensorIndex, slot);
2782 void TfLiteParser::SetupInputLayers(
size_t subgraphIndex)
2787 for (
auto const & tensorIdAndPtr : inputs)
2789 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2791 m_Network->AddInputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2796 RegisterOutputSlots(subgraphIndex,
2797 VIRTUAL_OPERATOR_ID,
2799 {
static_cast<uint32_t
>(tensorIdAndPtr.first) });
2803 void TfLiteParser::SetupOutputLayers(
size_t subgraphIndex)
2808 for (
auto const & tensorIdAndPtr : outputs)
2810 auto bindingId = GenerateLayerBindingId(subgraphIndex, tensorIdAndPtr.first);
2812 m_Network->AddOutputLayer(bindingId, tensorIdAndPtr.second->name.c_str());
2814 RegisterInputSlots(subgraphIndex,
2815 VIRTUAL_OPERATOR_ID,
2817 {
static_cast<uint32_t
>(tensorIdAndPtr.first) });
2821 void TfLiteParser::SetupConstantLayers(
size_t subgraphIndex)
2825 const auto & subgraphPtr = m_Model->subgraphs[subgraphIndex];
2826 for (
unsigned int subgraphIndex = 0; subgraphIndex < m_SubgraphConnections.size(); ++subgraphIndex)
2828 for (
unsigned int tensorIndex = 0; tensorIndex < m_SubgraphConnections[subgraphIndex].size(); ++tensorIndex)
2830 if (m_SubgraphConnections[subgraphIndex][tensorIndex].outputSlot ==
nullptr &&
2831 m_SubgraphConnections[subgraphIndex][tensorIndex].inputSlots.size() > 0)
2833 TensorRawPtr tensorPtr = subgraphPtr->tensors[tensorIndex].get();
2835 auto tensorAndData = CreateConstTensor(tensorPtr,
2839 std::string layerName = boost::str(boost::format(
"Constant:%1%") % tensorPtr->name);
2841 m_Network->AddConstantLayer(tensorAndData.first, layerName.c_str());
2844 RegisterOutputSlots(subgraphIndex,
2845 VIRTUAL_OPERATOR_ID,
2858 return model->buffers[bufferIndex].get();
2861 template<
typename T>
2862 std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2868 auto constData = CreateConstTensorImpl<T>(bufferPtr,
2872 TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
2873 return std::make_pair(constData.first, std::move(storage));
2876 std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
2877 TfLiteParser::CreateConstTensor(
TensorRawPtr tensorPtr,
2882 auto bufferPtr =
GetBuffer(m_Model, tensorPtr->buffer);
2888 return CreateConstTensorAndStoreData<float>(bufferPtr,
2893 return CreateConstTensorAndStoreData<uint8_t>(bufferPtr,
2898 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2903 return CreateConstTensorAndStoreData<int8_t>(bufferPtr,
2908 return CreateConstTensorAndStoreData<int32_t>(bufferPtr,
2914 std::stringstream errString;
2915 errString <<
"Unexpected datatype when creating const tensor: " 2917 <<
" shape:" << tensorInfo.GetShape()
2925 const std::string& name)
const 2929 for (
auto const & input : inputs)
2931 if (input.second->name == name)
2933 auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2934 return std::make_pair(bindingId,
ToTensorInfo(input.second));
2938 std::stringstream bindings;
2939 for (
auto const & input : inputs)
2941 bindings <<
"'" << input.second->name <<
"' ";
2946 boost::format(
"No input binding found for subgraph:%1% and name:%2%. " 2947 "Possible inputs are: [%3%] %4%") %
2955 const std::string& name)
const 2959 for (
unsigned int i = 0; i < outputs.size(); ++i)
2961 auto const output = outputs[i];
2962 if (output.second->name == name)
2964 auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
2965 std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2966 m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2967 return std::make_pair(bindingId,
ToTensorInfo(output.second, shape));
2971 std::stringstream bindings;
2972 for (
auto const & output : outputs)
2974 bindings <<
"'" << output.second->name <<
"' ";
2979 boost::format(
"No output binding found for subgraph:%1% and name:%2%. " 2980 "Possible outputs are: [%3%] %4%") %
2989 return m_Model->subgraphs.size();
2996 std::vector<std::string> result;
2997 result.reserve(inputs.size());
2998 for (
auto const & input : inputs)
3000 result.push_back(input.second->name);
3009 std::vector<std::string> result;
3010 result.reserve(outputs.size());
3011 for (
auto const & output : outputs)
3013 result.push_back(output.second->name);
3033 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<
float[]> && data)
3034 : m_FloatData(std::move(data))
3035 , m_Uint8Data(
nullptr)
3036 , m_Int8Data(
nullptr)
3037 , m_Int32Data(
nullptr)
3041 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
3042 : m_FloatData(
nullptr)
3043 , m_Uint8Data(std::move(data))
3044 , m_Int8Data(
nullptr)
3045 , m_Int32Data(
nullptr)
3049 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
3050 : m_FloatData(
nullptr)
3051 , m_Uint8Data(
nullptr)
3052 , m_Int8Data(std::move(data))
3053 , m_Int32Data(
nullptr)
3057 TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
3058 : m_FloatData(
nullptr)
3059 , m_Uint8Data(
nullptr)
3060 , m_Int8Data(
nullptr)
3061 , m_Int32Data(std::move(data))
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
#define CHECK_VALID_SIZE(ACTUAL,...)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
const tflite::BufferT * BufferRawPtr
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::string FileLine() const
static std::vector< int32_t > & GetInputTensorIds(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
float m_ScaleX
Center size encoding scale x.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
std::unique_ptr< onnx::ModelProto > ModelPtr
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::vector< std::string > GetSubgraphInputTensorNames(size_t subgraphId) const override
Return the input tensor names for a given subgraph.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
#define CHECK_TENSOR_PTR(TENSOR_PTR)
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
void ProcessConcatInputTensorInfo(armnn::TensorInfo &inputTensorInfo, armnn::OriginsDescriptor &concatDescriptor, const unsigned int &concatAxis, unsigned int inputIndex, unsigned int &mergeDimOrigin)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
uint32_t m_Axis
0-based axis along which to stack the input tensors.
int32_t GetQuantizationOffset() const
unsigned int GetNumDimensions() const
uint32_t m_PadRight
Padding right value in the width dimension.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_NumClasses
Number of classes.
uint32_t m_DilationX
Dilation factor value for width dimension.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
float m_ScaleW
Center size encoding scale weight.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
static armnn::TensorInfo OutputShapeOfReshape(const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
uint32_t m_PadLeft
Padding left value in the width dimension.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
unsigned int GetNumDimensions() const
uint32_t m_PoolHeight
Pooling height value.
int32_t m_NewAxisMask
New axis mask value. If set, the begin, end and stride is disregarded and a new 1 dimension is insert...
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
virtual BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId, const std::string &name) const override
uint32_t m_PadTop
Padding top value in the height dimension.
A PadDescriptor for the PadLayer.
TensorShape m_InputShape
Required shape of all input tensors.
An ActivationDescriptor for the ActivationLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value)
Set the size of the views. The arguments are: view, dimension, value. If the view is greater than or ...
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
float m_ScaleY
Center size encoding scale y.
DataLayout::NCHW DataLayout::NCHW DataLayout::NHWC DataLayout::NHWC true
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
std::vector< int > m_Begin
Begin values for the input that will be sliced.
static ITfLiteParserPtr Create(const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
#define CHECK_BUFFER_SIZE(BUFFER_PTR, TENSOR_INFO, BUFFER_ID)
std::vector< unsigned int > m_BlockShape
Block shape value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
uint32_t m_PadTop
Padding top value in the height dimension.
static armnn::TensorInfo OutputShapeOfSqueeze(const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
virtual size_t GetSubgraphCount() const override
Return the number of subgraphs in the parsed model.
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
A L2NormalizationDescriptor for the L2NormalizationLayer.
bool m_BiasEnabled
Enable/disable bias.
A ViewsDescriptor for the SplitterLayer. Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
static void Destroy(ITfLiteParser *parser)
virtual const char * GetName() const =0
#define ARMNN_LOG(severity)
float m_Beta
Exponentiation value.
static ITfLiteParser * CreateRaw(const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
#define CHECKED_NON_NEGATIVE(VALUE)
bool m_UseRegularNms
Use Regular NMS.
A ReshapeDescriptor for the ReshapeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
uint32_t m_PadTop
Padding top value in the height dimension.
TensorShape m_TargetShape
Target shape value.
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_PoolWidth
Pooling width value.
uint32_t m_NumInputs
Number of input tensors.
std::unique_ptr< tflite::OperatorT > OperatorPtr
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual const char * what() const noexcept override
float m_NmsIouThreshold
Intersection over union threshold.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual std::vector< std::string > GetSubgraphOutputTensorNames(size_t subgraphId) const override
Return the output tensor names for a given subgraph.
unsigned int GetNumBytes() const
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
static TensorRawPtrVector GetInputs(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
static std::vector< int32_t > & GetOutputTensorIds(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
static TensorRawPtrVector GetOutputs(const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
std::vector< int > m_End
End values for the input that will be sliced.
std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)> ITfLiteParserPtr
virtual unsigned int GetNumOutputSlots() const =0
armnnSerializer::TensorInfo * TensorRawPtr
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char *graphFile) override
Create the network from a flatbuffers binary file on disk.
A FullyConnectedDescriptor for the FullyConnectedLayer.
bool CheckShape(const armnn::TensorShape &actual, const std::vector< uint32_t > &expected)
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
float m_NmsScoreThreshold
NMS score threshold.
void SetShape(const TensorShape &newShape)
virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector< uint8_t > &binaryContent) override
Create the network from a flatbuffers binary.
void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t &outPadHead, uint32_t &outPadTail, bool samePadding)
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
uint32_t m_PadRight
Padding right value in the width dimension.
bool m_BiasEnabled
Enable/disable bias.
int32_t m_EllipsisMask
Ellipsis mask value.
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnn::BindingPointInfo BindingPointInfo
A StackDescriptor for the StackLayer.
int32_t m_BeginMask
Begin mask value. If set, then the begin is disregarded and the fullest range is used for the dimensi...
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
A SoftmaxDescriptor for the SoftmaxLayer.
An output connection slot for a layer. The output slot may be connected to 1 or more input slots of s...
std::unique_ptr< tflite::ModelT > ModelPtr
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
constexpr unsigned int MaxNumOfTensorDimensions
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension. First is the number of values to add before the tensor in ...
bool m_BiasEnabled
Enable/disable bias.
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
std::unique_ptr< tflite::SubGraphT > SubgraphPtr
const tflite::TensorT * TensorRawPtr
unsigned int GetNumElements() const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
DataType GetDataType() const
static ModelPtr LoadModelFromFile(const char *fileName)
A Pooling2dDescriptor for the Pooling2dLayer.
virtual unsigned int GetNumInputSlots() const =0
uint32_t m_DilationY
Dilation along y axis.
A StandInDescriptor for the StandIn layer.
A SliceDescriptor for the SliceLayer.
virtual BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId, const std::string &name) const override
static BufferRawPtr GetBuffer(const ModelPtr &model, size_t bufferIndex)
#define CHECK_BUFFER(MODEL, BUFFER_INDEX)
bool m_BiasEnabled
Enable/disable bias.
#define CHECK_SUPPORTED_FUSED_ACTIVATION(OPTION, SUBGRAPH_INDEX, OPERATOR_INDEX)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
std::vector< TensorRawPtr > TensorRawPtrVector
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A PermuteDescriptor for the PermuteLayer.
A Convolution2dDescriptor for the Convolution2dLayer.
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
A MeanDescriptor for the MeanLayer.
void CheckTensor(const ConstTensor &t)
virtual int Connect(IInputSlot &destination)=0
uint32_t m_DilationX
Dilation along x axis.
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
const TensorShape & GetShape() const
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
bool has_value() const noexcept
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
float m_ScaleH
Center size encoding scale height.
constexpr const char * GetDataTypeName(DataType dataType)
int32_t m_EndMask
End mask value. If set, then the end is disregarded and the fullest range is used for the dimension...
A ResizeDescriptor for the ResizeLayer.
std::string AsString() const
TfLiteParser(const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
std::vector< unsigned int > m_BlockShape
Block shape values.
float GetQuantizationScale() const
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
An OriginsDescriptor for the ConcatLayer. Descriptor to configure the concatenation process...
uint32_t m_DilationY
Dilation factor value for height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A StridedSliceDescriptor for the StridedSliceLayer.
uint32_t m_PadRight
Padding right value in the width dimension.