ArmNN
 20.11
TfLiteParser Class Reference

#include <TfLiteParser.hpp>

Inheritance diagram for TfLiteParser:
ITfLiteParser

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

virtual armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile) override
 Create the network from a flatbuffers binary file on disk. More...
 
virtual armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent) override
 Create the network from a flatbuffers binary. More...
 
virtual BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const override
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
virtual BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const override
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
virtual size_t GetSubgraphCount () const override
 Return the number of subgraphs in the parsed model. More...
 
virtual std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const override
 Return the input tensor names for a given subgraph. More...
 
virtual std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const override
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParser (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
virtual ~TfLiteParser ()
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
- Static Public Member Functions inherited from ITfLiteParser
static ITfLiteParserCreateRaw (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static ITfLiteParserPtr Create (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static void Destroy (ITfLiteParser *parser)
 

Additional Inherited Members

- Protected Member Functions inherited from ITfLiteParser
virtual ~ITfLiteParser ()
 

Detailed Description

Definition at line 19 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 32 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 33 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 23 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 26 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 25 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 24 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 27 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 28 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 29 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParser()

Definition at line 538 of file TfLiteParser.cpp.

Referenced by ITfLiteParser::CreateRaw(), and TfLiteParser::~TfLiteParser().

539 : m_Options(options)
540 , m_Network(nullptr, nullptr)
541 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
542 {
543  // register supported operators
544  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
545  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
546  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
547  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
548  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
549  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
550  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
551  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
552  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParser::ParseExp;
553  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
554  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParser::ParseHardSwish;
555  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParser::ParseLeakyRelu;
556  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
557  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
558  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
559  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
560  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
561  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
562  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
563  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParser::ParseNeg;
564  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
565  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
566  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
567  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
568  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
569  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
570  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
571  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
572  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
573  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
574  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
575  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
576  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParser::ParseSplitV;
577  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
578  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
579  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
580  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
581  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
582  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
583  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
584  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParser::ParseDiv;
585  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParser::ParseArgMax;
586  // register supported custom operators
587  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
588 }

◆ ~TfLiteParser()

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)
overridevirtual

Create the network from a flatbuffers binary.

Implements ITfLiteParser.

Definition at line 604 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, ARMNN_LOG, armnnTfParser::CalcPadding(), CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParser::GetBuffer(), TensorInfo::GetDataType(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), TfLiteParser::LoadModelFromBinary(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, ElementwiseUnaryDescriptor::m_Operation, TransposeConvolution2dDescriptor::m_OutputShape, TransposeConvolution2dDescriptor::m_OutputShapeEnabled, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, armnn::NHWC, armnn::numeric_cast(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

605 {
606  ResetParser();
607  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
608  return CreateNetworkFromModel();
609 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)
overridevirtual

Create the network from a flatbuffers binary file on disk.

Implements ITfLiteParser.

Definition at line 597 of file TfLiteParser.cpp.

References TfLiteParser::LoadModelFromFile().

598 {
599  ResetParser();
600  m_Model = LoadModelFromFile(graphFile);
601  return CreateNetworkFromModel();
602 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParser::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParser::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2965 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2968 {
2969  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2970 
2971  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2972  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2973 
2974  size_t inputCount = operatorPtr->inputs.size();
2975  TensorRawPtrVector result(inputCount);
2976  for (size_t i=0; i<inputCount; ++i)
2977  {
2978  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
2979  result[i] = subgraphPtr->tensors[inputId].get();
2980  }
2981  return result;
2982 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3037 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

3040 {
3041  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3042  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3043  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3044  return operatorPtr->inputs;
3045 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Implements ITfLiteParser.

Definition at line 3253 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

3255 {
3256  CHECK_SUBGRAPH(m_Model, subgraphId);
3257  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3258  for (auto const & input : inputs)
3259  {
3260  if (input.second->name == name)
3261  {
3262  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3263  return std::make_pair(bindingId, ToTensorInfo(input.second));
3264  }
3265  }
3266 
3267  std::stringstream bindings;
3268  for (auto const & input : inputs)
3269  {
3270  bindings << "'" << input.second->name << "' ";
3271  }
3272 
3273  throw ParseException(
3274  fmt::format("No input binding found for subgraph:{} and name:{}. "
3275  "Possible inputs are: [{}] {}",
3276  subgraphId,
3277  name,
3278  bindings.str(),
3279  CHECK_LOCATION().AsString()));
3280 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Implements ITfLiteParser.

Definition at line 3282 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

3284 {
3285  CHECK_SUBGRAPH(m_Model, subgraphId);
3286  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3287  for (unsigned int i = 0; i < outputs.size(); ++i)
3288  {
3289  auto const output = outputs[i];
3290  if (output.second->name == name)
3291  {
3292  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
3293  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3294  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3295  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
3296  }
3297  }
3298 
3299  std::stringstream bindings;
3300  for (auto const & output : outputs)
3301  {
3302  bindings << "'" << output.second->name << "' ";
3303  }
3304 
3305  throw ParseException(
3306  fmt::format("No output binding found for subgraph:{} and name:{}. "
3307  "Possible outputs are: [{}] {}",
3308  subgraphId,
3309  name,
3310  bindings.str(),
3311  CHECK_LOCATION().AsString()));
3312 }
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetOutputs()

TfLiteParser::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2984 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2987 {
2988  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2989 
2990  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2991  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2992 
2993  size_t outputCount = operatorPtr->outputs.size();
2994  TensorRawPtrVector result(outputCount);
2995  for (size_t i=0; i<outputCount; ++i)
2996  {
2997  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2998  CHECK_TENSOR(model, subgraphIndex, outputId);
2999  result[i] = subgraphPtr->tensors[outputId].get();
3000  }
3001  return result;
3002 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3047 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetSubgraphInputs(), TfLiteParser::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

3050 {
3051  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3052  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3053  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3054  return operatorPtr->outputs;
3055 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const
overridevirtual

Return the number of subgraphs in the parsed model.

Implements ITfLiteParser.

Definition at line 3314 of file TfLiteParser.cpp.

3315 {
3316  return m_Model->subgraphs.size();
3317 }

◆ GetSubgraphInputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3004 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkInputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphInputTensorNames(), and TfLiteParser::~TfLiteParser().

3006 {
3007  CHECK_SUBGRAPH(model, subgraphIndex);
3008  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3009 
3010  size_t inputCount = subgraphPtr->inputs.size();
3011  TensorIdRawPtrVector result(inputCount);
3012  for (size_t i=0; i<inputCount; ++i)
3013  {
3014  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
3015  CHECK_TENSOR(model, subgraphIndex, inputId);
3016  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
3017  }
3018  return result;
3019 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the input tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3319 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphInputs().

3320 {
3321  CHECK_SUBGRAPH(m_Model, subgraphId);
3322  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3323  std::vector<std::string> result;
3324  result.reserve(inputs.size());
3325  for (auto const & input : inputs)
3326  {
3327  result.push_back(input.second->name);
3328  }
3329  return result;
3330 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetSubgraphOutputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3021 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkOutputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphOutputTensorNames(), and TfLiteParser::~TfLiteParser().

3023 {
3024  CHECK_SUBGRAPH(model, subgraphIndex);
3025  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3026 
3027  size_t outputCount = subgraphPtr->outputs.size();
3028  TensorIdRawPtrVector result(outputCount);
3029  for (size_t i=0; i<outputCount; ++i)
3030  {
3031  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3032  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
3033  }
3034  return result;
3035 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the output tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3332 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphOutputs().

3333 {
3334  CHECK_SUBGRAPH(m_Model, subgraphId);
3335  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3336  std::vector<std::string> result;
3337  result.reserve(outputs.size());
3338  for (auto const & output : outputs)
3339  {
3340  result.push_back(output.second->name);
3341  }
3342  return result;
3343 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)

◆ LoadModelFromBinary()

TfLiteParser::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 2946 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::LoadModelFromFile(), and TfLiteParser::~TfLiteParser().

2947 {
2948  if (binaryContent == nullptr)
2949  {
2950  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
2951  CHECK_LOCATION().AsString()));
2952  }
2953  flatbuffers::Verifier verifier(binaryContent, len);
2954  if (verifier.VerifyBuffer<tflite::Model>() == false)
2955  {
2956  throw ParseException(
2957  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
2958  "flatbuffers format. size:{} {}",
2959  len,
2960  CHECK_LOCATION().AsString()));
2961  }
2962  return tflite::UnPackModel(binaryContent);
2963 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ LoadModelFromFile()

TfLiteParser::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 2922 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParser::LoadModelFromBinary().

Referenced by TfLiteParser::CreateNetworkFromBinaryFile(), and TfLiteParser::~TfLiteParser().

2923 {
2924  if (fileName == nullptr)
2925  {
2926  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
2927  CHECK_LOCATION().AsString()));
2928  }
2929  std::error_code errorCode;
2930  fs::path pathToFile(fileName);
2931  if (!fs::exists(pathToFile, errorCode))
2932  {
2933  //fmt::format() could not be used here (format error)
2934  std::stringstream msg;
2935  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
2936  << " " << CHECK_LOCATION().AsString();
2937 
2938  throw FileNotFoundException(msg.str());
2939  }
2940  std::ifstream file(fileName, std::ios::binary);
2941  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2942  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2943  fileContent.size());
2944 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 2004 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParser::GetBuffer(), TensorInfo::GetDataType(), TfLiteParser::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParser::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, DetectionPostProcessDescriptor::m_DetectionsPerClass, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

Referenced by TfLiteParser::~TfLiteParser().

2006 {
2007  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2008  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2009 
2010  if (stretchDim != targetDimsIn.end())
2011  {
2012  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2013  {
2014  throw ParseException(
2015  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
2016  }
2017 
2018  auto targetNumElements =
2019  armnn::numeric_cast<unsigned int>(
2020  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2021 
2022  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2023  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2024  }
2025 
2026  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2027 
2028  TensorInfo reshapeInfo = inputTensorInfo;
2029  reshapeInfo.SetShape(outputShape);
2030 
2031  return reshapeInfo;
2032 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( const std::vector< uint32_t > &  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1509 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, TfLiteParser::GetBuffer(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PadList, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::Neg, armnn::NHWC, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and true.

Referenced by BOOST_FIXTURE_TEST_CASE(), and TfLiteParser::~TfLiteParser().

1511 {
1512  CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1513  std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1514  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1515 
1516  if (inputTensorInfo.GetNumDimensions() > 4)
1517  {
1518  std::stringstream ss;
1519  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1520  << " shape:" << inputTensorInfo.GetShape() << " "
1521  << CHECK_LOCATION().AsString();
1522  throw ParseException(ss.str());
1523  }
1524 
1525  if (squeezeDims.empty())
1526  {
1527  squeezeDims.assign(dimensionSequence,
1528  dimensionSequence+inputTensorInfo.GetNumDimensions());
1529  }
1530 
1531  std::vector<uint32_t> outputDims;
1532  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1533  {
1534  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1535  auto currentDimension = inputTensorInfo.GetShape()[i];
1536  if (skipSqueeze || currentDimension != 1)
1537  {
1538  outputDims.push_back(currentDimension);
1539  }
1540  }
1541 
1542  if (outputDims.size() > 4)
1543  {
1544  std::stringstream ss;
1545  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1546  << " shape:" << inputTensorInfo.GetShape() << " "
1547  << CHECK_LOCATION().AsString();
1548  throw ParseException(ss.str());
1549  }
1550 
1551  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1552  outputDims.data());
1553 
1554  // we need to preserve the tensor type and the quantization data as well
1555  TensorInfo outTensorInfo = inputTensorInfo;
1556  outTensorInfo.SetShape(outShape);
1557 
1558  return outTensorInfo;
1559 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

The documentation for this class was generated from the following files: