ArmNN
 20.08
TfLiteParser Class Reference

#include <TfLiteParser.hpp>

Inheritance diagram for TfLiteParser:
ITfLiteParser

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

virtual armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile) override
 Create the network from a flatbuffers binary file on disk. More...
 
virtual armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent) override
 Create the network from a flatbuffers binary. More...
 
virtual BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const override
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
virtual BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const override
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
virtual size_t GetSubgraphCount () const override
 Return the number of subgraphs in the parsed model. More...
 
virtual std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const override
 Return the input tensor names for a given subgraph. More...
 
virtual std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const override
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParser (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
virtual ~TfLiteParser ()
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
- Static Public Member Functions inherited from ITfLiteParser
static ITfLiteParserCreateRaw (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static ITfLiteParserPtr Create (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static void Destroy (ITfLiteParser *parser)
 

Additional Inherited Members

- Protected Member Functions inherited from ITfLiteParser
virtual ~ITfLiteParser ()
 

Detailed Description

Definition at line 19 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 32 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 33 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 23 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 26 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 25 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 24 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 27 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 28 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 29 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParser()

Definition at line 555 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_MODEL, CHECKED_NON_NEGATIVE, IOutputSlot::Connect(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetName(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), ReshapeDescriptor::m_TargetShape, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by ITfLiteParser::CreateRaw(), and TfLiteParser::~TfLiteParser().

556 : m_Options(options)
557 , m_Network(nullptr, nullptr)
558 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
559 {
560  // register supported operators
561  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
562  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
563  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
564  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
565  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
566  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
567  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
568  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
569  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParser::ParseExp;
570  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
571  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParser::ParseHardSwish;
572  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParser::ParseLeakyRelu;
573  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
574  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
575  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
576  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
577  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
578  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
579  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
580  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParser::ParseNeg;
581  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
582  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
583  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
584  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
585  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
586  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
587  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
588  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
589  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
590  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
591  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
592  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
593  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParser::ParseSplitV;
594  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
595  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
596  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
597  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
598  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
599  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
600  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
601  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParser::ParseDiv;
602  // register supported custom operators
603  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
604 }

◆ ~TfLiteParser()

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)
overridevirtual

Create the network from a flatbuffers binary.

Implements ITfLiteParser.

Definition at line 683 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, ARMNN_LOG, armnnTfParser::CalcPadding(), CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParser::GetBuffer(), TensorInfo::GetDataType(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), TfLiteParser::LoadModelFromBinary(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, ElementwiseUnaryDescriptor::m_Operation, TransposeConvolution2dDescriptor::m_OutputShape, TransposeConvolution2dDescriptor::m_OutputShapeEnabled, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, armnn::NHWC, armnn::numeric_cast(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

684 {
685  ResetParser();
686  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
687  return CreateNetworkFromModel();
688 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)
overridevirtual

Create the network from a flatbuffers binary file on disk.

Implements ITfLiteParser.

Definition at line 676 of file TfLiteParser.cpp.

References TfLiteParser::LoadModelFromFile().

677 {
678  ResetParser();
679  m_Model = LoadModelFromFile(graphFile);
680  return CreateNetworkFromModel();
681 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParser::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParser::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3084 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

3087 {
3088  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3089 
3090  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3091  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3092 
3093  size_t inputCount = operatorPtr->inputs.size();
3094  TensorRawPtrVector result(inputCount);
3095  for (size_t i=0; i<inputCount; ++i)
3096  {
3097  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3098  result[i] = subgraphPtr->tensors[inputId].get();
3099  }
3100  return result;
3101 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3156 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

3159 {
3160  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3161  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3162  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3163  return operatorPtr->inputs;
3164 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Implements ITfLiteParser.

Definition at line 3372 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

3374 {
3375  CHECK_SUBGRAPH(m_Model, subgraphId);
3376  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3377  for (auto const & input : inputs)
3378  {
3379  if (input.second->name == name)
3380  {
3381  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3382  return std::make_pair(bindingId, ToTensorInfo(input.second));
3383  }
3384  }
3385 
3386  std::stringstream bindings;
3387  for (auto const & input : inputs)
3388  {
3389  bindings << "'" << input.second->name << "' ";
3390  }
3391 
3392  throw ParseException(
3393  boost::str(
3394  boost::format("No input binding found for subgraph:%1% and name:%2%. "
3395  "Possible inputs are: [%3%] %4%") %
3396  subgraphId %
3397  name %
3398  bindings.str() %
3399  CHECK_LOCATION().AsString()));
3400 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Implements ITfLiteParser.

Definition at line 3402 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

3404 {
3405  CHECK_SUBGRAPH(m_Model, subgraphId);
3406  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3407  for (unsigned int i = 0; i < outputs.size(); ++i)
3408  {
3409  auto const output = outputs[i];
3410  if (output.second->name == name)
3411  {
3412  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
3413  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3414  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3415  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
3416  }
3417  }
3418 
3419  std::stringstream bindings;
3420  for (auto const & output : outputs)
3421  {
3422  bindings << "'" << output.second->name << "' ";
3423  }
3424 
3425  throw ParseException(
3426  boost::str(
3427  boost::format("No output binding found for subgraph:%1% and name:%2%. "
3428  "Possible outputs are: [%3%] %4%") %
3429  subgraphId %
3430  name %
3431  bindings.str() %
3432  CHECK_LOCATION().AsString()));
3433 }
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ GetOutputs()

TfLiteParser::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3103 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

3106 {
3107  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3108 
3109  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3110  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3111 
3112  size_t outputCount = operatorPtr->outputs.size();
3113  TensorRawPtrVector result(outputCount);
3114  for (size_t i=0; i<outputCount; ++i)
3115  {
3116  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3117  CHECK_TENSOR(model, subgraphIndex, outputId);
3118  result[i] = subgraphPtr->tensors[outputId].get();
3119  }
3120  return result;
3121 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3166 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetSubgraphInputs(), TfLiteParser::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

3169 {
3170  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3171  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3172  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3173  return operatorPtr->outputs;
3174 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const
overridevirtual

Return the number of subgraphs in the parsed model.

Implements ITfLiteParser.

Definition at line 3435 of file TfLiteParser.cpp.

3436 {
3437  return m_Model->subgraphs.size();
3438 }

◆ GetSubgraphInputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3123 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkInputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphInputTensorNames(), and TfLiteParser::~TfLiteParser().

3125 {
3126  CHECK_SUBGRAPH(model, subgraphIndex);
3127  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3128 
3129  size_t inputCount = subgraphPtr->inputs.size();
3130  TensorIdRawPtrVector result(inputCount);
3131  for (size_t i=0; i<inputCount; ++i)
3132  {
3133  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
3134  CHECK_TENSOR(model, subgraphIndex, inputId);
3135  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
3136  }
3137  return result;
3138 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the input tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3440 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphInputs().

3441 {
3442  CHECK_SUBGRAPH(m_Model, subgraphId);
3443  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3444  std::vector<std::string> result;
3445  result.reserve(inputs.size());
3446  for (auto const & input : inputs)
3447  {
3448  result.push_back(input.second->name);
3449  }
3450  return result;
3451 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetSubgraphOutputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3140 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkOutputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphOutputTensorNames(), and TfLiteParser::~TfLiteParser().

3142 {
3143  CHECK_SUBGRAPH(model, subgraphIndex);
3144  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3145 
3146  size_t outputCount = subgraphPtr->outputs.size();
3147  TensorIdRawPtrVector result(outputCount);
3148  for (size_t i=0; i<outputCount; ++i)
3149  {
3150  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3151  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
3152  }
3153  return result;
3154 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the output tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3453 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphOutputs().

3454 {
3455  CHECK_SUBGRAPH(m_Model, subgraphId);
3456  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3457  std::vector<std::string> result;
3458  result.reserve(outputs.size());
3459  for (auto const & output : outputs)
3460  {
3461  result.push_back(output.second->name);
3462  }
3463  return result;
3464 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)

◆ LoadModelFromBinary()

TfLiteParser::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 3065 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::LoadModelFromFile(), and TfLiteParser::~TfLiteParser().

3066 {
3067  if (binaryContent == nullptr)
3068  {
3069  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
3070  CHECK_LOCATION().AsString()));
3071  }
3072  flatbuffers::Verifier verifier(binaryContent, len);
3073  if (verifier.VerifyBuffer<tflite::Model>() == false)
3074  {
3075  throw ParseException(
3076  boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
3077  "flatbuffers format. size:%1% %2%") %
3078  len %
3079  CHECK_LOCATION().AsString()));
3080  }
3081  return tflite::UnPackModel(binaryContent);
3082 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ LoadModelFromFile()

TfLiteParser::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 3041 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParser::LoadModelFromBinary().

Referenced by TfLiteParser::CreateNetworkFromBinaryFile(), and TfLiteParser::~TfLiteParser().

3042 {
3043  if (fileName == nullptr)
3044  {
3045  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
3046  CHECK_LOCATION().AsString()));
3047  }
3048  std::error_code errorCode;
3049  fs::path pathToFile(fileName);
3050  if (!fs::exists(pathToFile, errorCode))
3051  {
3052  std::string locationString = CHECK_LOCATION().AsString();
3053  std::string msg = boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
3054  fileName %
3055  errorCode %
3056  locationString);
3057  throw FileNotFoundException(msg);
3058  }
3059  std::ifstream file(fileName, std::ios::binary);
3060  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3061  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3062  fileContent.size());
3063 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 2151 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParser::GetBuffer(), TensorInfo::GetDataType(), TfLiteParser::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParser::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, DetectionPostProcessDescriptor::m_DetectionsPerClass, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

Referenced by TfLiteParser::~TfLiteParser().

2153 {
2154  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2155  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2156 
2157  if (stretchDim != targetDimsIn.end())
2158  {
2159  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2160  {
2161  throw ParseException(
2162  boost::str(
2163  boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
2164  }
2165 
2166  auto targetNumElements =
2167  boost::numeric_cast<unsigned int>(
2168  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2169 
2170  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2171  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2172  }
2173 
2174  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2175 
2176  TensorInfo reshapeInfo = inputTensorInfo;
2177  reshapeInfo.SetShape(outputShape);
2178 
2179  return reshapeInfo;
2180 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( const std::vector< uint32_t > &  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1625 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, TfLiteParser::GetBuffer(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PadList, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::Neg, armnn::NHWC, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and true.

Referenced by BOOST_FIXTURE_TEST_CASE(), and TfLiteParser::~TfLiteParser().

1627 {
1628  CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1629  std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1630  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1631 
1632  if (inputTensorInfo.GetNumDimensions() > 4)
1633  {
1634  std::stringstream ss;
1635  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1636  << " shape:" << inputTensorInfo.GetShape() << " "
1637  << CHECK_LOCATION().AsString();
1638  throw ParseException(ss.str());
1639  }
1640 
1641  if (squeezeDims.empty())
1642  {
1643  squeezeDims.assign(dimensionSequence,
1644  dimensionSequence+inputTensorInfo.GetNumDimensions());
1645  }
1646 
1647  std::vector<uint32_t> outputDims;
1648  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1649  {
1650  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1651  auto currentDimension = inputTensorInfo.GetShape()[i];
1652  if (skipSqueeze || currentDimension != 1)
1653  {
1654  outputDims.push_back(currentDimension);
1655  }
1656  }
1657 
1658  if (outputDims.size() > 4)
1659  {
1660  std::stringstream ss;
1661  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1662  << " shape:" << inputTensorInfo.GetShape() << " "
1663  << CHECK_LOCATION().AsString();
1664  throw ParseException(ss.str());
1665  }
1666 
1667  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1668  outputDims.data());
1669 
1670  // we need to preserve the tensor type and the quantization data as well
1671  TensorInfo outTensorInfo = inputTensorInfo;
1672  outTensorInfo.SetShape(outShape);
1673 
1674  return outTensorInfo;
1675 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

The documentation for this class was generated from the following files: