ArmNN
 21.08
TfLiteParserImpl Class Reference

#include <TfLiteParser.hpp>

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile)
 Create the network from a flatbuffers binary file on disk. More...
 
armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent)
 Create the network from a flatbuffers binary. More...
 
BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
size_t GetSubgraphCount () const
 Return the number of subgraphs in the parsed model. More...
 
std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const
 Return the input tensor names for a given subgraph. More...
 
std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParserImpl (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
 ~TfLiteParserImpl ()=default
 
armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic (const std::vector< uint8_t > &binaryContent)
 
armnn::INetworkPtr LoadModel (std::unique_ptr< tflite::ModelT > model)
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (std::vector< uint32_t > squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 

Detailed Description

Definition at line 19 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 32 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 33 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 23 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 26 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 25 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 24 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 27 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 28 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 29 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParserImpl()

Definition at line 628 of file TfLiteParser.cpp.

629 : m_Options(options)
630 , m_Network(nullptr, nullptr)
631 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
632 {
633  // register supported operators
634  m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
635  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
636  m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
637  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
638  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
639  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
640  m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
641  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
642  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
643  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
644  m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
645  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
646  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
647  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
648  m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
649  m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
650  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
651  m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
652  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
653  m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
654  m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
655  m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
656  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
657  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
658  m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
659  m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
660  m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
661  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
662  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
663  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
664  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
665  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
666  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
667  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
668  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
669  m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
670  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
671  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
672  m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
673  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
674  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
675  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
676  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
677  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
678  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
679  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
680  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
681  m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
682  m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
683  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
684  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
685  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
686  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
687  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
688  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
689  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
690  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
691  m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
692  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
693  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
694  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
695  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
696 
697  // register supported custom operators
698  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
699 }

◆ ~TfLiteParserImpl()

~TfLiteParserImpl ( )
default

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)

Create the network from a flatbuffers binary.

Definition at line 715 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromBinary().

716 {
717  ResetParser();
718  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
719  return CreateNetworkFromModel();
720 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryAsDynamic()

armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic ( const std::vector< uint8_t > &  binaryContent)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)

Create the network from a flatbuffers binary file on disk.

Definition at line 708 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromFile().

709 {
710  ResetParser();
711  m_Model = LoadModelFromFile(graphFile);
712  return CreateNetworkFromModel();
713 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParserImpl::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParserImpl::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3614 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::LoadModel(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3617 {
3618  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3619 
3620  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3621  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3622 
3623  size_t inputCount = operatorPtr->inputs.size();
3624  TensorRawPtrVector result;
3625  for (size_t i=0; i<inputCount; ++i)
3626  {
3627  // If the input location is -1 then assume input is turned off.
3628  if (operatorPtr->inputs[i] == -1)
3629  {
3630  continue;
3631  }
3632  else
3633  {
3634  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3635  result.push_back(subgraphPtr->tensors[inputId].get());
3636  }
3637  }
3638  return result;
3639 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3694 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::LoadModel(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3697 {
3698  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3699  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3700  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3701  return operatorPtr->inputs;
3702 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Definition at line 3951 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

3953 {
3954  CHECK_SUBGRAPH(m_Model, subgraphId);
3955  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3956  for (auto const & input : inputs)
3957  {
3958  if (input.second->name == name)
3959  {
3960  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3961  return std::make_pair(bindingId, ToTensorInfo(input.second));
3962  }
3963  }
3964 
3965  std::stringstream bindings;
3966  for (auto const & input : inputs)
3967  {
3968  bindings << "'" << input.second->name << "' ";
3969  }
3970 
3971  throw ParseException(
3972  fmt::format("No input binding found for subgraph:{} and name:{}. "
3973  "Possible inputs are: [{}] {}",
3974  subgraphId,
3975  name,
3976  bindings.str(),
3977  CHECK_LOCATION().AsString()));
3978 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Definition at line 3980 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

3982 {
3983  CHECK_SUBGRAPH(m_Model, subgraphId);
3984  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3985  for (unsigned int i = 0; i < outputs.size(); ++i)
3986  {
3987  auto const output = outputs[i];
3988  if (output.second->name == name)
3989  {
3990  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
3991  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3992  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3993  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
3994  }
3995  }
3996 
3997  std::stringstream bindings;
3998  for (auto const & output : outputs)
3999  {
4000  bindings << "'" << output.second->name << "' ";
4001  }
4002 
4003  throw ParseException(
4004  fmt::format("No output binding found for subgraph:{} and name:{}. "
4005  "Possible outputs are: [{}] {}",
4006  subgraphId,
4007  name,
4008  bindings.str(),
4009  CHECK_LOCATION().AsString()));
4010 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetOutputs()

TfLiteParserImpl::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3641 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::LoadModel(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3644 {
3645  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3646 
3647  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3648  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3649 
3650  size_t outputCount = operatorPtr->outputs.size();
3651  TensorRawPtrVector result(outputCount);
3652  for (size_t i=0; i<outputCount; ++i)
3653  {
3654  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3655  CHECK_TENSOR(model, subgraphIndex, outputId);
3656  result[i] = subgraphPtr->tensors[outputId].get();
3657  }
3658  return result;
3659 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3704 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetSubgraphInputs(), TfLiteParserImpl::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::LoadModel(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3707 {
3708  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3709  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3710  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3711  return operatorPtr->outputs;
3712 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const

Return the number of subgraphs in the parsed model.

Definition at line 4012 of file TfLiteParser.cpp.

4013 {
4014  return m_Model->subgraphs.size();
4015 }

◆ GetSubgraphInputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3661 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkInputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphInputTensorNames().

3663 {
3664  CHECK_SUBGRAPH(model, subgraphIndex);
3665  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3666 
3667  size_t inputCount = subgraphPtr->inputs.size();
3668  TensorIdRawPtrVector result(inputCount);
3669  for (size_t i=0; i<inputCount; ++i)
3670  {
3671  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
3672  CHECK_TENSOR(model, subgraphIndex, inputId);
3673  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
3674  }
3675  return result;
3676 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const

Return the input tensor names for a given subgraph.

Definition at line 4017 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphInputs().

4018 {
4019  CHECK_SUBGRAPH(m_Model, subgraphId);
4020  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4021  std::vector<std::string> result;
4022  result.reserve(inputs.size());
4023  for (auto const & input : inputs)
4024  {
4025  result.push_back(input.second->name);
4026  }
4027  return result;
4028 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)

◆ GetSubgraphOutputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3678 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkOutputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphOutputTensorNames().

3680 {
3681  CHECK_SUBGRAPH(model, subgraphIndex);
3682  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3683 
3684  size_t outputCount = subgraphPtr->outputs.size();
3685  TensorIdRawPtrVector result(outputCount);
3686  for (size_t i=0; i<outputCount; ++i)
3687  {
3688  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3689  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
3690  }
3691  return result;
3692 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const

Return the output tensor names for a given subgraph.

Definition at line 4030 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphOutputs().

4031 {
4032  CHECK_SUBGRAPH(m_Model, subgraphId);
4033  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
4034  std::vector<std::string> result;
4035  result.reserve(outputs.size());
4036  for (auto const & output : outputs)
4037  {
4038  result.push_back(output.second->name);
4039  }
4040  return result;
4041 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 4043 of file TfLiteParser.cpp.

References TFLITE_PARSER_VERSION.

4044 {
4045  return TFLITE_PARSER_VERSION;
4046 }
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition: Version.hpp:25

◆ LoadModel()

armnn::INetworkPtr LoadModel ( std::unique_ptr< tflite::ModelT >  model)

Definition at line 723 of file TfLiteParser.cpp.

References TensorShape::AreAllDimensionsSpecified(), ARMNN_ASSERT, ARMNN_ASSERT_MSG, ARMNN_LOG, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, TransposeConvolution2dDescriptor::m_OutputShape, TransposeConvolution2dDescriptor::m_OutputShapeEnabled, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, ReshapeDescriptor::m_TargetShape, armnn::NHWC, armnn::numeric_cast(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

724 {
725  ResetParser();
726  m_Model = std::move(model);
727 
728  return CreateNetworkFromModel();
729 }

◆ LoadModelFromBinary()

TfLiteParserImpl::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 3595 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParserImpl::CreateNetworkFromBinary(), and TfLiteParserImpl::LoadModelFromFile().

3596 {
3597  if (binaryContent == nullptr)
3598  {
3599  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
3600  CHECK_LOCATION().AsString()));
3601  }
3602  flatbuffers::Verifier verifier(binaryContent, len);
3603  if (verifier.VerifyBuffer<tflite::Model>() == false)
3604  {
3605  throw ParseException(
3606  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3607  "flatbuffers format. size:{} {}",
3608  len,
3609  CHECK_LOCATION().AsString()));
3610  }
3611  return tflite::UnPackModel(binaryContent);
3612 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ LoadModelFromFile()

TfLiteParserImpl::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 3571 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParserImpl::LoadModelFromBinary().

Referenced by TfLiteParserImpl::CreateNetworkFromBinaryFile().

3572 {
3573  if (fileName == nullptr)
3574  {
3575  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
3576  CHECK_LOCATION().AsString()));
3577  }
3578  std::error_code errorCode;
3579  fs::path pathToFile(fileName);
3580  if (!fs::exists(pathToFile, errorCode))
3581  {
3582  //fmt::format() could not be used here (format error)
3583  std::stringstream msg;
3584  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3585  << " " << CHECK_LOCATION().AsString();
3586 
3587  throw FileNotFoundException(msg.str());
3588  }
3589  std::ifstream file(fileName, std::ios::binary);
3590  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3591  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3592  fileContent.size());
3593 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 2283 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetUnsignedAxis(), StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, FullyConnectedDescriptor::m_ConstantWeights, DetectionPostProcessDescriptor::m_DetectionsPerClass, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

2285 {
2286  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2287  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2288 
2289  if (stretchDim != targetDimsIn.end())
2290  {
2291  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2292  {
2293  throw ParseException(
2294  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
2295  }
2296 
2297  auto targetNumElements =
2298  armnn::numeric_cast<unsigned int>(
2299  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2300 
2301  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2302  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2303  }
2304 
2305  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2306 
2307  TensorInfo reshapeInfo = inputTensorInfo;
2308  reshapeInfo.SetShape(outputShape);
2309 
2310  return reshapeInfo;
2311 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( std::vector< uint32_t >  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1697 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, IOutputSlot::Connect(), TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PadList, PadDescriptor::m_PadValue, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::NHWC, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnn::Signed32, armnn::Signed64, and armnnDeserializer::ToTensorInfo().

1699 {
1700  CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
1701  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1702 
1703  if (inputTensorInfo.GetNumDimensions() > 4)
1704  {
1705  std::stringstream ss;
1706  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1707  << " shape:" << inputTensorInfo.GetShape() << " "
1708  << CHECK_LOCATION().AsString();
1709  throw ParseException(ss.str());
1710  }
1711 
1712  if (squeezeDims.empty())
1713  {
1714  squeezeDims.assign(dimensionSequence,
1715  dimensionSequence+inputTensorInfo.GetNumDimensions());
1716  }
1717 
1718  std::vector<uint32_t> outputDims;
1719  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1720  {
1721  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1722  auto currentDimension = inputTensorInfo.GetShape()[i];
1723  if (skipSqueeze || currentDimension != 1)
1724  {
1725  outputDims.push_back(currentDimension);
1726  }
1727  }
1728 
1729  if (outputDims.size() > 4)
1730  {
1731  std::stringstream ss;
1732  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1733  << " shape:" << inputTensorInfo.GetShape() << " "
1734  << CHECK_LOCATION().AsString();
1735  throw ParseException(ss.str());
1736  }
1737 
1738  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1739  outputDims.data());
1740 
1741  // we need to preserve the tensor type and the quantization data as well
1742  TensorInfo outTensorInfo = inputTensorInfo;
1743  outTensorInfo.SetShape(outShape);
1744 
1745  return outTensorInfo;
1746 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195

The documentation for this class was generated from the following files: