ArmNN  NotReleased
TfLiteParser Class Reference

#include <TfLiteParser.hpp>

Inheritance diagram for TfLiteParser:
ITfLiteParser

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

virtual armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile) override
 Create the network from a flatbuffers binary file on disk. More...
 
virtual armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent) override
 Create the network from a flatbuffers binary. More...
 
virtual BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const override
 
virtual BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const override
 
virtual size_t GetSubgraphCount () const override
 Return the number of subgraphs in the parsed model. More...
 
virtual std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const override
 Return the input tensor names for a given subgraph. More...
 
virtual std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const override
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParser (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
virtual ~TfLiteParser ()
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
- Static Public Member Functions inherited from ITfLiteParser
static ITfLiteParserCreateRaw (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static ITfLiteParserPtr Create (const armnn::Optional< TfLiteParserOptions > &options=armnn::EmptyOptional())
 
static void Destroy (ITfLiteParser *parser)
 

Additional Inherited Members

- Protected Member Functions inherited from ITfLiteParser
virtual ~ITfLiteParser ()
 

Detailed Description

Definition at line 19 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 32 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 33 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 23 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 26 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 25 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 24 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 27 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 28 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 29 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParser()

Definition at line 480 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECKED_NON_NEGATIVE, IOutputSlot::Connect(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetName(), TensorInfo::GetNumDimensions(), IConnectableLayer::GetOutputSlot(), TensorInfo::GetShape(), ReshapeDescriptor::m_TargetShape, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by ITfLiteParser::CreateRaw(), and TfLiteParser::~TfLiteParser().

481 : m_Options(options)
482 , m_Network(nullptr, nullptr)
483 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
484 {
485  // register supported operators
486  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
487  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
488  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
489  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
490  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
491  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
492  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
493  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParser::ParseDequantize;
494  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
495  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
496  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
497  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
498  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
499  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
500  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
501  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
502  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
503  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
504  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParser::ParseQuantize;
505  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
506  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
507  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
508  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
509  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
510  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParser::ParseSlice;
511  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
512  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
513  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
514  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
515  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
516  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
517  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
518  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
519  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
520  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
521 
522  // register supported custom operators
523  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
524 }

◆ ~TfLiteParser()

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)
overridevirtual

Create the network from a flatbuffers binary.

Implements ITfLiteParser.

Definition at line 597 of file TfLiteParser.cpp.

References ARMNN_LOG, armnnTfParser::CalcPadding(), CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParser::GetBuffer(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), TfLiteParser::LoadModelFromBinary(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, armnn::NHWC, options, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

598 {
599  ResetParser();
600  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
601  return CreateNetworkFromModel();
602 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)
overridevirtual

Create the network from a flatbuffers binary file on disk.

Implements ITfLiteParser.

Definition at line 590 of file TfLiteParser.cpp.

References TfLiteParser::LoadModelFromFile().

591 {
592  ResetParser();
593  m_Model = LoadModelFromFile(graphFile);
594  return CreateNetworkFromModel();
595 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParser::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParser::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2636 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2639 {
2640  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2641 
2642  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2643  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2644 
2645  size_t inputCount = operatorPtr->inputs.size();
2646  TensorRawPtrVector result(inputCount);
2647  for (size_t i=0; i<inputCount; ++i)
2648  {
2649  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
2650  result[i] = subgraphPtr->tensors[inputId].get();
2651  }
2652  return result;
2653 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2708 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2711 {
2712  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2713  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2714  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2715  return operatorPtr->inputs;
2716 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id

Implements ITfLiteParser.

Definition at line 2924 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

2926 {
2927  CHECK_SUBGRAPH(m_Model, subgraphId);
2928  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2929  for (auto const & input : inputs)
2930  {
2931  if (input.second->name == name)
2932  {
2933  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
2934  return std::make_pair(bindingId, ToTensorInfo(input.second));
2935  }
2936  }
2937 
2938  std::stringstream bindings;
2939  for (auto const & input : inputs)
2940  {
2941  bindings << "'" << input.second->name << "' ";
2942  }
2943 
2944  throw ParseException(
2945  boost::str(
2946  boost::format("No input binding found for subgraph:%1% and name:%2%. "
2947  "Possible inputs are: [%3%] %4%") %
2948  subgraphId %
2949  name %
2950  bindings.str() %
2951  CHECK_LOCATION().AsString()));
2952 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const
overridevirtual

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id

Implements ITfLiteParser.

Definition at line 2954 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParser::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

2956 {
2957  CHECK_SUBGRAPH(m_Model, subgraphId);
2958  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
2959  for (unsigned int i = 0; i < outputs.size(); ++i)
2960  {
2961  auto const output = outputs[i];
2962  if (output.second->name == name)
2963  {
2964  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
2965  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
2966  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
2967  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
2968  }
2969  }
2970 
2971  std::stringstream bindings;
2972  for (auto const & output : outputs)
2973  {
2974  bindings << "'" << output.second->name << "' ";
2975  }
2976 
2977  throw ParseException(
2978  boost::str(
2979  boost::format("No output binding found for subgraph:%1% and name:%2%. "
2980  "Possible outputs are: [%3%] %4%") %
2981  subgraphId %
2982  name %
2983  bindings.str() %
2984  CHECK_LOCATION().AsString()));
2985 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)

◆ GetOutputs()

TfLiteParser::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2655 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2658 {
2659  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2660 
2661  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2662  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2663 
2664  size_t outputCount = operatorPtr->outputs.size();
2665  TensorRawPtrVector result(outputCount);
2666  for (size_t i=0; i<outputCount; ++i)
2667  {
2668  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
2669  CHECK_TENSOR(model, subgraphIndex, outputId);
2670  result[i] = subgraphPtr->tensors[outputId].get();
2671  }
2672  return result;
2673 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 2718 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetSubgraphInputs(), TfLiteParser::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), and TfLiteParser::~TfLiteParser().

2721 {
2722  CHECK_MODEL(model, subgraphIndex, operatorIndex);
2723  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2724  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
2725  return operatorPtr->outputs;
2726 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const
overridevirtual

Return the number of subgraphs in the parsed model.

Implements ITfLiteParser.

Definition at line 2987 of file TfLiteParser.cpp.

2988 {
2989  return m_Model->subgraphs.size();
2990 }

◆ GetSubgraphInputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 2675 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkInputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphInputTensorNames(), and TfLiteParser::~TfLiteParser().

2677 {
2678  CHECK_SUBGRAPH(model, subgraphIndex);
2679  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2680 
2681  size_t inputCount = subgraphPtr->inputs.size();
2682  TensorIdRawPtrVector result(inputCount);
2683  for (size_t i=0; i<inputCount; ++i)
2684  {
2685  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
2686  CHECK_TENSOR(model, subgraphIndex, inputId);
2687  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
2688  }
2689  return result;
2690 }
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the input tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 2992 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphInputs().

2993 {
2994  CHECK_SUBGRAPH(m_Model, subgraphId);
2995  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
2996  std::vector<std::string> result;
2997  result.reserve(inputs.size());
2998  for (auto const & input : inputs)
2999  {
3000  result.push_back(input.second->name);
3001  }
3002  return result;
3003 }
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetSubgraphOutputs()

TfLiteParser::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 2692 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParser::GetNetworkOutputBindingInfo(), TfLiteParser::GetOutputTensorIds(), TfLiteParser::GetSubgraphOutputTensorNames(), and TfLiteParser::~TfLiteParser().

2694 {
2695  CHECK_SUBGRAPH(model, subgraphIndex);
2696  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
2697 
2698  size_t outputCount = subgraphPtr->outputs.size();
2699  TensorIdRawPtrVector result(outputCount);
2700  for (size_t i=0; i<outputCount; ++i)
2701  {
2702  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
2703  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
2704  }
2705  return result;
2706 }
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const
overridevirtual

Return the output tensor names for a given subgraph.

Implements ITfLiteParser.

Definition at line 3005 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParser::GetSubgraphOutputs().

3006 {
3007  CHECK_SUBGRAPH(m_Model, subgraphId);
3008  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3009  std::vector<std::string> result;
3010  result.reserve(outputs.size());
3011  for (auto const & output : outputs)
3012  {
3013  result.push_back(output.second->name);
3014  }
3015  return result;
3016 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)

◆ LoadModelFromBinary()

TfLiteParser::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 2617 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParser::CreateNetworkFromBinary(), TfLiteParser::LoadModelFromFile(), and TfLiteParser::~TfLiteParser().

2618 {
2619  if (binaryContent == nullptr)
2620  {
2621  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
2622  CHECK_LOCATION().AsString()));
2623  }
2624  flatbuffers::Verifier verifier(binaryContent, len);
2625  if (verifier.VerifyBuffer<tflite::Model>() == false)
2626  {
2627  throw ParseException(
2628  boost::str(boost::format("Buffer doesn't conform to the expected Tensorflow Lite "
2629  "flatbuffers format. size:%1% %2%") %
2630  len %
2631  CHECK_LOCATION().AsString()));
2632  }
2633  return tflite::UnPackModel(binaryContent);
2634 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
struct Model Model

◆ LoadModelFromFile()

TfLiteParser::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 2595 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParser::LoadModelFromBinary().

Referenced by TfLiteParser::CreateNetworkFromBinaryFile(), and TfLiteParser::~TfLiteParser().

2596 {
2597  if (fileName == nullptr)
2598  {
2599  throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
2600  CHECK_LOCATION().AsString()));
2601  }
2602  boost::system::error_code errorCode;
2603  boost::filesystem::path pathToFile(fileName);
2604  if (!boost::filesystem::exists(pathToFile, errorCode))
2605  {
2606  throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
2607  fileName %
2608  errorCode %
2609  CHECK_LOCATION().AsString()));
2610  }
2611  std::ifstream file(fileName, std::ios::binary);
2612  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
2613  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
2614  fileContent.size());
2615 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 1901 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParser::GetBuffer(), TensorInfo::GetDataType(), TfLiteParser::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParser::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), ActivationDescriptor::m_A, StackDescriptor::m_Axis, ActivationDescriptor::m_B, FullyConnectedDescriptor::m_BiasEnabled, DetectionPostProcessDescriptor::m_DetectionsPerClass, ActivationDescriptor::m_Function, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, options, armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

Referenced by TfLiteParser::~TfLiteParser().

1903 {
1904  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
1905  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
1906 
1907  if (stretchDim != targetDimsIn.end())
1908  {
1909  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
1910  {
1911  throw ParseException(
1912  boost::str(
1913  boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
1914  }
1915 
1916  auto targetNumElements =
1917  boost::numeric_cast<unsigned int>(
1918  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
1919 
1920  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
1921  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
1922  }
1923 
1924  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
1925 
1926  TensorInfo reshapeInfo = inputTensorInfo;
1927  reshapeInfo.SetShape(outputShape);
1928 
1929  return reshapeInfo;
1930 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
unsigned int GetNumElements() const
Definition: Tensor.hpp:93

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( const std::vector< uint32_t > &  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1466 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, TfLiteParser::GetBuffer(), TfLiteParser::GetInputs(), TfLiteParser::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParser::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParser::GetOutputTensorIds(), TensorInfo::GetShape(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PadList, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::NHWC, options, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and true.

Referenced by BOOST_FIXTURE_TEST_CASE(), and TfLiteParser::~TfLiteParser().

1468 {
1469  CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1470  std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1471  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1472 
1473  if (inputTensorInfo.GetNumDimensions() > 4)
1474  {
1475  std::stringstream ss;
1476  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1477  << " shape:" << inputTensorInfo.GetShape() << " "
1478  << CHECK_LOCATION().AsString();
1479  throw ParseException(ss.str());
1480  }
1481 
1482  if (squeezeDims.empty())
1483  {
1484  squeezeDims.assign(dimensionSequence,
1485  dimensionSequence+inputTensorInfo.GetNumDimensions());
1486  }
1487 
1488  std::vector<uint32_t> outputDims;
1489  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1490  {
1491  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1492  auto currentDimension = inputTensorInfo.GetShape()[i];
1493  if (skipSqueeze || currentDimension != 1)
1494  {
1495  outputDims.push_back(currentDimension);
1496  }
1497  }
1498 
1499  if (outputDims.size() > 4)
1500  {
1501  std::stringstream ss;
1502  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1503  << " shape:" << inputTensorInfo.GetShape() << " "
1504  << CHECK_LOCATION().AsString();
1505  throw ParseException(ss.str());
1506  }
1507 
1508  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1509  outputDims.data());
1510 
1511  // we need to preserve the tensor type and the quantization data as well
1512  TensorInfo outTensorInfo = inputTensorInfo;
1513  outTensorInfo.SetShape(outShape);
1514 
1515  return outTensorInfo;
1516 }
#define CHECK_VALID_SIZE(ACTUAL,...)
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:92
#define CHECK_LOCATION()
Definition: Exceptions.hpp:169
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:90
const TensorShape & GetShape() const
Definition: Tensor.hpp:88

The documentation for this class was generated from the following files: