ArmNN
 20.02
TfLiteParser Class Reference

#include <TfLiteParser.hpp>

Inheritance diagram for TfLiteParser:
ITfLiteParser

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

virtual armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile) override
 Create the network from a flatbuffers binary file on disk. More...
 
virtual armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent) override
 Create the network from a flatbuffers binary. More...
 
virtual BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const override
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
virtual BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const override
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
virtual size_t GetSubgraphCount () const override
 Return the number of subgraphs in the parsed model. More...
 
virtual std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const override
 Return the input tensor names for a given subgraph. More...
 
virtual std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const override
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParser (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
virtual ~TfLiteParser ()
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
- Static Public Member Functions inherited from ITfLiteParser
static ITfLiteParserCreateRaw (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static ITfLiteParserPtr Create (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static void Destroy (ITfLiteParser *parser)
 

Additional Inherited Members

- Protected Member Functions inherited from ITfLiteParser
virtual ~ITfLiteParser ()
 

Detailed Description

Definition at line 19 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 32 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 33 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 23 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 26 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 25 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 24 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 27 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 28 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 29 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParser()

Definition at line 488 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECKED_NON_NEGATIVE, IOutputSlot::Connect(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetName(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), ReshapeDescriptor::m_TargetShape, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by ITfLiteParser::CreateRaw(), and TfLiteParser::~TfLiteParser().

489 : m_Options(options)
490 , m_Network(nullptr, nullptr)
491 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
492 {
493  // register supported operators
494  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
495  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
496  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
497  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
498  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
499  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
500  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
501  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
502  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
503  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
504  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
505  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
506  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
507  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
508  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
509  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
510  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
511  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
512  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
513  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
514  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
515  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
516  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
517  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
518  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
519  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
520  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
521  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
522  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
523  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
524  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
525  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
526  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
527  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
528  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
529 
530  // register supported custom operators
531  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
532 }

◆ ~TfLiteParser()

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)
overridevirtual

Create the network from a flatbuffers binary.

Implements ITfLiteParser.

Definition at line 605 of file TfLiteParser.cpp.

References ARMNN_LOG, armnnTfParser::CalcPadding(), CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParser::GetBuffer(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), TfLiteParser::LoadModelFromBinary(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, armnn::NHWC, armnn::numeric_cast(), options, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

606 {
607  ResetParser();
608  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
609  return CreateNetworkFromModel();
610 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)
overridevirtual

Create the network from a flatbuffers binary file on disk.

Implements ITfLiteParser.

Definition at line 598 of file TfLiteParser.cpp.

References TfLiteParser::LoadModelFromFile().

599 {
600  ResetParser();
601  m_Model = LoadModelFromFile(graphFile);
602  return CreateNetworkFromModel();
603 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParser::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParser::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2667 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2670 {
2671  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2672 
2673  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2674  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2675 
2676  size_t inputCount = operatorPtr->inputs.size();
2677  TensorRawPtrVector result(inputCount);
2678  for (size_t i=0; i<inputCount; ++i)
2679  {
2680  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
2681  result[i] = subgraphPtr->tensors[inputId].get();
2682  }
2683  return result;
2684 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2739 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2742 {
2743  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2744  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2745  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2746  return operatorPtr->inputs;
2747 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Implements ITfLiteParser.

Definition at line 2955 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

2957 {
2958  CHECK_SUBGRAPH(m_Model, subgraphId);
2959  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2960  for (auto const & input : inputs)
2961  {
2962  if (input.second->name == name)
2963  {
2964  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2965  return std::make_pair(bindingId, ToTensorInfo(input.second));
2966  }
2967  }
2968 
2969  std::stringstream bindings;
2970  for (auto const & input : inputs)
2971  {
2972  bindings << "'" << input.second->name << "' ";
2973  }
2974 
2975  throw ParseException(
2976  boost::str(
2977  boost::format("No input binding found for subgraph:%1% and name:%2%. "
2978  "Possible inputs are: [%3%] %4%") %
2979  subgraphId %
2980  name %
2981  bindings.str() %
2982  CHECK_LOCATION().AsString()));
2983 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Implements ITfLiteParser.

Definition at line 2985 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

2987 {
2988  CHECK_SUBGRAPH(m_Model, subgraphId);
2989  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2990  for (unsigned int i = 0; i < outputs.size(); ++i)
2991  {
2992  auto const output = outputs[i];
2993  if (output.second->name == name)
2994  {
2995  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
2996  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2997  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2998  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
2999  }
3000  }
3001 
3002  std::stringstream bindings;
3003  for (auto const & output : outputs)
3004  {
3005  bindings << "'" << output.second->name << "' ";
3006  }
3007 
3008  throw ParseException(
3009  boost::str(
3010  boost::format("No output binding found for subgraph:%1% and name:%2%. "
3011  "Possible outputs are: [%3%] %4%") %
3012  subgraphId %
3013  name %
3014  bindings.str() %
3015  CHECK_LOCATION().AsString()));
3016 }
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ GetOutputs()

TfLiteParser::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2686 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2689 {
2690  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2691 
2692  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2693  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2694 
2695  size_t outputCount = operatorPtr->outputs.size();
2696  TensorRawPtrVector result(outputCount);
2697  for (size_t i=0; i<outputCount; ++i)
2698  {
2699  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2700  CHECK_TENSOR(model, subgraphIndex, outputId);
2701  result[i] = subgraphPtr->tensors[outputId].get();
2702  }
2703  return result;
2704 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2749 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetSubgraphInputs(), TfLiteParser::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2752 {
2753  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2754  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2755  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2756  return operatorPtr->outputs;
2757 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const
overridevirtual

Return the number of subgraphs in the parsed model.

Implements ITfLiteParser.

Definition at line 3018 of file TfLiteParser.cpp.

3019 {
3020  return m_Model->subgraphs.size();
3021 }

◆ GetSubgraphInputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 2706 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkInputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphInputTensorNames(), and TfLiteParser::~TfLiteParser().

2708 {
2709  CHECK_SUBGRAPH(model, subgraphIndex);
2710  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2711 
2712  size_t inputCount = subgraphPtr->inputs.size();
2713  TensorIdRawPtrVector result(inputCount);
2714  for (size_t i=0; i<inputCount; ++i)
2715  {
2716  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
2717  CHECK_TENSOR(model, subgraphIndex, inputId);
2718  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
2719  }
2720  return result;
2721 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the input tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3023 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphInputs().

3024 {
3025  CHECK_SUBGRAPH(m_Model, subgraphId);
3026  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3027  std::vector<std::string> result;
3028  result.reserve(inputs.size());
3029  for (auto const & input : inputs)
3030  {
3031  result.push_back(input.second->name);
3032  }
3033  return result;
3034 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetSubgraphOutputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 2723 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkOutputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphOutputTensorNames(), and TfLiteParser::~TfLiteParser().

2725 {
2726  CHECK_SUBGRAPH(model, subgraphIndex);
2727  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2728 
2729  size_t outputCount = subgraphPtr->outputs.size();
2730  TensorIdRawPtrVector result(outputCount);
2731  for (size_t i=0; i<outputCount; ++i)
2732  {
2733  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2734  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
2735  }
2736  return result;
2737 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the output tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3036 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphOutputs().

3037 {
3038  CHECK_SUBGRAPH(m_Model, subgraphId);
3039  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3040  std::vector<std::string> result;
3041  result.reserve(outputs.size());
3042  for (auto const & output : outputs)
3043  {
3044  result.push_back(output.second->name);
3045  }
3046  return result;
3047 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)

◆ LoadModelFromBinary()

TfLiteParser::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 2648 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::LoadModelFromFile(), and TfLiteParser::~TfLiteParser().

2649 {
2650  if (binaryContent == nullptr)
2651  {
2652  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2653  CHECK_LOCATION().AsString()));
2654  }
2655  flatbuffers::Verifier verifier(binaryContent, len);
2656  if (verifier.VerifyBuffer<tflite::Model>() == false)
2657  {
2658  throw ParseException(
2659  boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2660  "flatbuffers format. size:%1% %2%") %
2661  len %
2662  CHECK_LOCATION().AsString()));
2663  }
2664  return tflite::UnPackModel(binaryContent);
2665 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ LoadModelFromFile()

TfLiteParser::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 2624 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParser::LoadModelFromBinary().

Referenced by TfLiteParser::CreateNetworkFromBinaryFile(), and TfLiteParser::~TfLiteParser().

2625 {
2626  if (fileName == nullptr)
2627  {
2628  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2629  CHECK_LOCATION().AsString()));
2630  }
2631  boost::system::error_code errorCode;
2632  boost::filesystem::path pathToFile(fileName);
2633  if (!boost::filesystem::exists(pathToFile, errorCode))
2634  {
2635  std::string locationString = CHECK_LOCATION().AsString();
2636  std::string msg = boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2637  fileName %
2638  errorCode %
2639  locationString);
2640  throw FileNotFoundException(msg);
2641  }
2642  std::ifstream file(fileName, std::ios::binary);
2643  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2644  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2645  fileContent.size());
2646 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 1898 of file TfLiteParser.cpp.

References ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParser::GetBuffer(), TensorInfo::GetDataType(), TfLiteParser::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParser::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), ActivationDescriptor::m_A, StackDescriptor::m_Axis, ActivationDescriptor::m_B, FullyConnectedDescriptor::m_BiasEnabled, DetectionPostProcessDescriptor::m_DetectionsPerClass, ActivationDescriptor::m_Function, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), options, armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

Referenced by TfLiteParser::~TfLiteParser().

1900 {
1901  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1902  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1903 
1904  if (stretchDim != targetDimsIn.end())
1905  {
1906  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1907  {
1908  throw ParseException(
1909  boost::str(
1910  boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1911  }
1912 
1913  auto targetNumElements =
1914  boost::numeric_cast<unsigned int>(
1915  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1916 
1917  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1918  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1919  }
1920 
1921  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1922 
1923  TensorInfo reshapeInfo = inputTensorInfo;
1924  reshapeInfo.SetShape(outputShape);
1925 
1926  return reshapeInfo;
1927 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumElements() const
Definition: Tensor.hpp:93

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( const std::vector< uint32_t > &  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1463 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, TfLiteParser::GetBuffer(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PadList, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::NHWC, options, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and true.

Referenced by BOOST_FIXTURE_TEST_CASE(), and TfLiteParser::~TfLiteParser().

1465 {
1466  CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1467  std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1468  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1469 
1470  if (inputTensorInfo.GetNumDimensions() > 4)
1471  {
1472  std::stringstream ss;
1473  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1474  << " shape:" << inputTensorInfo.GetShape() << " "
1475  << CHECK_LOCATION().AsString();
1476  throw ParseException(ss.str());
1477  }
1478 
1479  if (squeezeDims.empty())
1480  {
1481  squeezeDims.assign(dimensionSequence,
1482  dimensionSequence+inputTensorInfo.GetNumDimensions());
1483  }
1484 
1485  std::vector<uint32_t> outputDims;
1486  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1487  {
1488  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1489  auto currentDimension = inputTensorInfo.GetShape()[i];
1490  if (skipSqueeze || currentDimension != 1)
1491  {
1492  outputDims.push_back(currentDimension);
1493  }
1494  }
1495 
1496  if (outputDims.size() > 4)
1497  {
1498  std::stringstream ss;
1499  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1500  << " shape:" << inputTensorInfo.GetShape() << " "
1501  << CHECK_LOCATION().AsString();
1502  throw ParseException(ss.str());
1503  }
1504 
1505  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1506  outputDims.data());
1507 
1508  // we need to preserve the tensor type and the quantization data as well
1509  TensorInfo outTensorInfo = inputTensorInfo;
1510  outTensorInfo.SetShape(outShape);
1511 
1512  return outTensorInfo;
1513 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92

The documentation for this class was generated from the following files: