ArmNN
 20.05
TfLiteParser Class Reference

#include <TfLiteParser.hpp>

Inheritance diagram for TfLiteParser:
ITfLiteParser

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

virtual armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile) override
 Create the network from a flatbuffers binary file on disk. More...
 
virtual armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent) override
 Create the network from a flatbuffers binary. More...
 
virtual BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const override
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
virtual BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const override
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
virtual size_t GetSubgraphCount () const override
 Return the number of subgraphs in the parsed model. More...
 
virtual std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const override
 Return the input tensor names for a given subgraph. More...
 
virtual std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const override
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParser (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
virtual ~TfLiteParser ()
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
- Static Public Member Functions inherited from ITfLiteParser
static ITfLiteParserCreateRaw (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static ITfLiteParserPtr Create (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static void Destroy (ITfLiteParser *parser)
 

Additional Inherited Members

- Protected Member Functions inherited from ITfLiteParser
virtual ~ITfLiteParser ()
 

Detailed Description

Definition at line 19 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 32 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 33 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 23 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 26 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 25 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 24 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 27 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 28 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 29 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParser()

Definition at line 489 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_MODEL, CHECKED_NON_NEGATIVE, IOutputSlot::Connect(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetName(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), ReshapeDescriptor::m_TargetShape, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by ITfLiteParser::CreateRaw(), and TfLiteParser::~TfLiteParser().

490 : m_Options(options)
491 , m_Network(nullptr, nullptr)
492 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
493 {
494  // register supported operators
495  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
496  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
497  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
498  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
499  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
500  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
501  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
502  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
503  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParser::ParseExp;
504  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
505  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
506  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
507  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
508  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
509  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
510  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
511  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
512  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
513  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
514  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
515  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
516  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
517  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
518  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
519  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
520  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
521  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
522  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
523  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
524  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParser::ParseSplitV;
525  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
526  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
527  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
528  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
529  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
530  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
531  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
532 
533  // register supported custom operators
534  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
535 }

◆ ~TfLiteParser()

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)
overridevirtual

Create the network from a flatbuffers binary.

Implements ITfLiteParser.

Definition at line 608 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, ARMNN_LOG, armnnTfParser::CalcPadding(), CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParser::GetBuffer(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), TfLiteParser::LoadModelFromBinary(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, ElementwiseUnaryDescriptor::m_Operation, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, armnn::NHWC, armnn::numeric_cast(), options, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

609 {
610  ResetParser();
611  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
612  return CreateNetworkFromModel();
613 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)
overridevirtual

Create the network from a flatbuffers binary file on disk.

Implements ITfLiteParser.

Definition at line 601 of file TfLiteParser.cpp.

References TfLiteParser::LoadModelFromFile().

602 {
603  ResetParser();
604  m_Model = LoadModelFromFile(graphFile);
605  return CreateNetworkFromModel();
606 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParser::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParser::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2852 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2855 {
2856  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2857 
2858  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2859  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2860 
2861  size_t inputCount = operatorPtr->inputs.size();
2862  TensorRawPtrVector result(inputCount);
2863  for (size_t i=0; i<inputCount; ++i)
2864  {
2865  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
2866  result[i] = subgraphPtr->tensors[inputId].get();
2867  }
2868  return result;
2869 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2924 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2927 {
2928  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2929  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2930  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2931  return operatorPtr->inputs;
2932 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Implements ITfLiteParser.

Definition at line 3140 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

3142 {
3143  CHECK_SUBGRAPH(m_Model, subgraphId);
3144  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3145  for (auto const & input : inputs)
3146  {
3147  if (input.second->name == name)
3148  {
3149  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3150  return std::make_pair(bindingId, ToTensorInfo(input.second));
3151  }
3152  }
3153 
3154  std::stringstream bindings;
3155  for (auto const & input : inputs)
3156  {
3157  bindings << "'" << input.second->name << "' ";
3158  }
3159 
3160  throw ParseException(
3161  boost::str(
3162  boost::format("No input binding found for subgraph:%1% and name:%2%. "
3163  "Possible inputs are: [%3%] %4%") %
3164  subgraphId %
3165  name %
3166  bindings.str() %
3167  CHECK_LOCATION().AsString()));
3168 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Implements ITfLiteParser.

Definition at line 3170 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

3172 {
3173  CHECK_SUBGRAPH(m_Model, subgraphId);
3174  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3175  for (unsigned int i = 0; i < outputs.size(); ++i)
3176  {
3177  auto const output = outputs[i];
3178  if (output.second->name == name)
3179  {
3180  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
3181  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3182  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3183  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
3184  }
3185  }
3186 
3187  std::stringstream bindings;
3188  for (auto const & output : outputs)
3189  {
3190  bindings << "'" << output.second->name << "' ";
3191  }
3192 
3193  throw ParseException(
3194  boost::str(
3195  boost::format("No output binding found for subgraph:%1% and name:%2%. "
3196  "Possible outputs are: [%3%] %4%") %
3197  subgraphId %
3198  name %
3199  bindings.str() %
3200  CHECK_LOCATION().AsString()));
3201 }
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ GetOutputs()

TfLiteParser::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2871 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2874 {
2875  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2876 
2877  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2878  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2879 
2880  size_t outputCount = operatorPtr->outputs.size();
2881  TensorRawPtrVector result(outputCount);
2882  for (size_t i=0; i<outputCount; ++i)
2883  {
2884  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2885  CHECK_TENSOR(model, subgraphIndex, outputId);
2886  result[i] = subgraphPtr->tensors[outputId].get();
2887  }
2888  return result;
2889 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2934 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetSubgraphInputs(), TfLiteParser::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2937 {
2938  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2939  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2940  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2941  return operatorPtr->outputs;
2942 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const
overridevirtual

Return the number of subgraphs in the parsed model.

Implements ITfLiteParser.

Definition at line 3203 of file TfLiteParser.cpp.

3204 {
3205  return m_Model->subgraphs.size();
3206 }

◆ GetSubgraphInputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 2891 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkInputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphInputTensorNames(), and TfLiteParser::~TfLiteParser().

2893 {
2894  CHECK_SUBGRAPH(model, subgraphIndex);
2895  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2896 
2897  size_t inputCount = subgraphPtr->inputs.size();
2898  TensorIdRawPtrVector result(inputCount);
2899  for (size_t i=0; i<inputCount; ++i)
2900  {
2901  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
2902  CHECK_TENSOR(model, subgraphIndex, inputId);
2903  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
2904  }
2905  return result;
2906 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the input tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3208 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphInputs().

3209 {
3210  CHECK_SUBGRAPH(m_Model, subgraphId);
3211  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3212  std::vector<std::string> result;
3213  result.reserve(inputs.size());
3214  for (auto const & input : inputs)
3215  {
3216  result.push_back(input.second->name);
3217  }
3218  return result;
3219 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetSubgraphOutputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 2908 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkOutputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphOutputTensorNames(), and TfLiteParser::~TfLiteParser().

2910 {
2911  CHECK_SUBGRAPH(model, subgraphIndex);
2912  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2913 
2914  size_t outputCount = subgraphPtr->outputs.size();
2915  TensorIdRawPtrVector result(outputCount);
2916  for (size_t i=0; i<outputCount; ++i)
2917  {
2918  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2919  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
2920  }
2921  return result;
2922 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the output tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3221 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphOutputs().

3222 {
3223  CHECK_SUBGRAPH(m_Model, subgraphId);
3224  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3225  std::vector<std::string> result;
3226  result.reserve(outputs.size());
3227  for (auto const & output : outputs)
3228  {
3229  result.push_back(output.second->name);
3230  }
3231  return result;
3232 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)

◆ LoadModelFromBinary()

TfLiteParser::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 2833 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::LoadModelFromFile(), and TfLiteParser::~TfLiteParser().

2834 {
2835  if (binaryContent == nullptr)
2836  {
2837  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2838  CHECK_LOCATION().AsString()));
2839  }
2840  flatbuffers::Verifier verifier(binaryContent, len);
2841  if (verifier.VerifyBuffer<tflite::Model>() == false)
2842  {
2843  throw ParseException(
2844  boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2845  "flatbuffers format. size:%1% %2%") %
2846  len %
2847  CHECK_LOCATION().AsString()));
2848  }
2849  return tflite::UnPackModel(binaryContent);
2850 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ LoadModelFromFile()

TfLiteParser::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 2809 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParser::LoadModelFromBinary().

Referenced by TfLiteParser::CreateNetworkFromBinaryFile(), and TfLiteParser::~TfLiteParser().

2810 {
2811  if (fileName == nullptr)
2812  {
2813  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2814  CHECK_LOCATION().AsString()));
2815  }
2816  boost::system::error_code errorCode;
2817  boost::filesystem::path pathToFile(fileName);
2818  if (!boost::filesystem::exists(pathToFile, errorCode))
2819  {
2820  std::string locationString = CHECK_LOCATION().AsString();
2821  std::string msg = boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2822  fileName %
2823  errorCode %
2824  locationString);
2825  throw FileNotFoundException(msg);
2826  }
2827  std::ifstream file(fileName, std::ios::binary);
2828  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2829  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2830  fileContent.size());
2831 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 1928 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParser::GetBuffer(), TensorInfo::GetDataType(), TfLiteParser::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParser::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, DetectionPostProcessDescriptor::m_DetectionsPerClass, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), options, armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

Referenced by TfLiteParser::~TfLiteParser().

1930 {
1931  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1932  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1933 
1934  if (stretchDim != targetDimsIn.end())
1935  {
1936  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1937  {
1938  throw ParseException(
1939  boost::str(
1940  boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1941  }
1942 
1943  auto targetNumElements =
1944  boost::numeric_cast<unsigned int>(
1945  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1946 
1947  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1948  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1949  }
1950 
1951  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1952 
1953  TensorInfo reshapeInfo = inputTensorInfo;
1954  reshapeInfo.SetShape(outputShape);
1955 
1956  return reshapeInfo;
1957 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumElements() const
Definition: Tensor.hpp:93

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( const std::vector< uint32_t > &  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1493 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, TfLiteParser::GetBuffer(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PadList, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::NHWC, options, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and true.

Referenced by BOOST_FIXTURE_TEST_CASE(), and TfLiteParser::~TfLiteParser().

1495 {
1496  CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1497  std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1498  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1499 
1500  if (inputTensorInfo.GetNumDimensions() > 4)
1501  {
1502  std::stringstream ss;
1503  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1504  << " shape:" << inputTensorInfo.GetShape() << " "
1505  << CHECK_LOCATION().AsString();
1506  throw ParseException(ss.str());
1507  }
1508 
1509  if (squeezeDims.empty())
1510  {
1511  squeezeDims.assign(dimensionSequence,
1512  dimensionSequence+inputTensorInfo.GetNumDimensions());
1513  }
1514 
1515  std::vector<uint32_t> outputDims;
1516  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1517  {
1518  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1519  auto currentDimension = inputTensorInfo.GetShape()[i];
1520  if (skipSqueeze || currentDimension != 1)
1521  {
1522  outputDims.push_back(currentDimension);
1523  }
1524  }
1525 
1526  if (outputDims.size() > 4)
1527  {
1528  std::stringstream ss;
1529  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1530  << " shape:" << inputTensorInfo.GetShape() << " "
1531  << CHECK_LOCATION().AsString();
1532  throw ParseException(ss.str());
1533  }
1534 
1535  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1536  outputDims.data());
1537 
1538  // we need to preserve the tensor type and the quantization data as well
1539  TensorInfo outTensorInfo = inputTensorInfo;
1540  outTensorInfo.SetShape(outShape);
1541 
1542  return outTensorInfo;
1543 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92

The documentation for this class was generated from the following files: