ArmNN
 21.11
TfLiteParserImpl Class Reference

#include <TfLiteParser.hpp>

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile)
 Create the network from a flatbuffers binary file on disk. More...
 
armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent)
 Create the network from a flatbuffers binary. More...
 
BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
size_t GetSubgraphCount () const
 Return the number of subgraphs in the parsed model. More...
 
std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const
 Return the input tensor names for a given subgraph. More...
 
std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParserImpl (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
 ~TfLiteParserImpl ()=default
 
armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic (const std::vector< uint8_t > &binaryContent)
 
armnn::INetworkPtr LoadModel (std::unique_ptr< tflite::ModelT > model)
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (std::vector< uint32_t > squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 

Detailed Description

Definition at line 25 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 38 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 39 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 29 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 32 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 31 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 36 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 37 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 33 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 34 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 35 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParserImpl()

Definition at line 628 of file TfLiteParser.cpp.

629 : m_Options(options)
630 , m_Network(nullptr, nullptr)
631 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
632 {
633  // register supported operators
634  m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
635  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
636  m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
637  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
638  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
639  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
640  m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
641  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
642  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
643  // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
644  #if defined(ARMNN_POST_TFLITE_2_3)
645  m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
646  #endif
647  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
648  m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
649  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
650  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
651  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
652  m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
653  m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
654  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
655  m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
656  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
657  m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
658  m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
659  m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
660  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
661  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
662  m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
663  m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
664  m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
665  = &TfLiteParserImpl::ParseLocalResponseNormalization;
666  m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
667  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
668  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
669  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
670  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
671  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
672  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
673  m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
674  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
675  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
676  m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
677  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
678  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
679  m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
680  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
681  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
682  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
683  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
684  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
685  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
686  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
687  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
688  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
689  m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
690  m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
691  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
692  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
693  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
694  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
695  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
696  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
697  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
698  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
699  m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
700  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
701  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
702  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
703  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
704 
705  // register supported custom operators
706  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
707 }

◆ ~TfLiteParserImpl()

~TfLiteParserImpl ( )
default

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)

Create the network from a flatbuffers binary.

Definition at line 723 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromBinary().

724 {
725  ResetParser();
726  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
727  return CreateNetworkFromModel();
728 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryAsDynamic()

armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic ( const std::vector< uint8_t > &  binaryContent)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)

Create the network from a flatbuffers binary file on disk.

Definition at line 716 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromFile().

717 {
718  ResetParser();
719  m_Model = LoadModelFromFile(graphFile);
720  return CreateNetworkFromModel();
721 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParserImpl::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParserImpl::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3861 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::LoadModel(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3864 {
3865  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3866 
3867  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3868  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3869 
3870  size_t inputCount = operatorPtr->inputs.size();
3871  TensorRawPtrVector result;
3872  for (size_t i=0; i<inputCount; ++i)
3873  {
3874  // If the input location is -1 then assume input is turned off.
3875  if (operatorPtr->inputs[i] == -1)
3876  {
3877  continue;
3878  }
3879  else
3880  {
3881  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3882  result.push_back(subgraphPtr->tensors[inputId].get());
3883  }
3884  }
3885  return result;
3886 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3941 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::LoadModel(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3944 {
3945  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3946  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3947  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3948  return operatorPtr->inputs;
3949 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Definition at line 4198 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

4200 {
4201  CHECK_SUBGRAPH(m_Model, subgraphId);
4202  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4203  for (auto const & input : inputs)
4204  {
4205  if (input.second->name == name)
4206  {
4207  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
4208  auto inputTensorInfo = ToTensorInfo(input.second);
4209  // Input tensors are always treated as constant tensors during network execution.
4210  inputTensorInfo.SetConstant(true);
4211  return std::make_pair(bindingId, inputTensorInfo);
4212  }
4213  }
4214 
4215  std::stringstream bindings;
4216  for (auto const & input : inputs)
4217  {
4218  bindings << "'" << input.second->name << "' ";
4219  }
4220 
4221  throw ParseException(
4222  fmt::format("No input binding found for subgraph:{} and name:{}. "
4223  "Possible inputs are: [{}] {}",
4224  subgraphId,
4225  name,
4226  bindings.str(),
4227  CHECK_LOCATION().AsString()));
4228 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Definition at line 4230 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

4232 {
4233  CHECK_SUBGRAPH(m_Model, subgraphId);
4234  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
4235  for (unsigned int i = 0; i < outputs.size(); ++i)
4236  {
4237  auto const output = outputs[i];
4238  if (output.second->name == name)
4239  {
4240  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
4241  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
4242  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
4243  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
4244  }
4245  }
4246 
4247  std::stringstream bindings;
4248  for (auto const & output : outputs)
4249  {
4250  bindings << "'" << output.second->name << "' ";
4251  }
4252 
4253  throw ParseException(
4254  fmt::format("No output binding found for subgraph:{} and name:{}. "
4255  "Possible outputs are: [{}] {}",
4256  subgraphId,
4257  name,
4258  bindings.str(),
4259  CHECK_LOCATION().AsString()));
4260 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetOutputs()

TfLiteParserImpl::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3888 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::LoadModel(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3891 {
3892  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3893 
3894  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3895  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3896 
3897  size_t outputCount = operatorPtr->outputs.size();
3898  TensorRawPtrVector result(outputCount);
3899  for (size_t i=0; i<outputCount; ++i)
3900  {
3901  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3902  CHECK_TENSOR(model, subgraphIndex, outputId);
3903  result[i] = subgraphPtr->tensors[outputId].get();
3904  }
3905  return result;
3906 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3951 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetSubgraphInputs(), TfLiteParserImpl::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::LoadModel(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3954 {
3955  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3956  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3957  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3958  return operatorPtr->outputs;
3959 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const

Return the number of subgraphs in the parsed model.

Definition at line 4262 of file TfLiteParser.cpp.

4263 {
4264  return m_Model->subgraphs.size();
4265 }

◆ GetSubgraphInputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3908 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkInputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphInputTensorNames().

3910 {
3911  CHECK_SUBGRAPH(model, subgraphIndex);
3912  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3913 
3914  size_t inputCount = subgraphPtr->inputs.size();
3915  TensorIdRawPtrVector result(inputCount);
3916  for (size_t i=0; i<inputCount; ++i)
3917  {
3918  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
3919  CHECK_TENSOR(model, subgraphIndex, inputId);
3920  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
3921  }
3922  return result;
3923 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const

Return the input tensor names for a given subgraph.

Definition at line 4267 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphInputs().

4268 {
4269  CHECK_SUBGRAPH(m_Model, subgraphId);
4270  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4271  std::vector<std::string> result;
4272  result.reserve(inputs.size());
4273  for (auto const & input : inputs)
4274  {
4275  result.push_back(input.second->name);
4276  }
4277  return result;
4278 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)

◆ GetSubgraphOutputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3925 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkOutputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphOutputTensorNames().

3927 {
3928  CHECK_SUBGRAPH(model, subgraphIndex);
3929  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3930 
3931  size_t outputCount = subgraphPtr->outputs.size();
3932  TensorIdRawPtrVector result(outputCount);
3933  for (size_t i=0; i<outputCount; ++i)
3934  {
3935  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3936  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
3937  }
3938  return result;
3939 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const

Return the output tensor names for a given subgraph.

Definition at line 4280 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphOutputs().

4281 {
4282  CHECK_SUBGRAPH(m_Model, subgraphId);
4283  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
4284  std::vector<std::string> result;
4285  result.reserve(outputs.size());
4286  for (auto const & output : outputs)
4287  {
4288  result.push_back(output.second->name);
4289  }
4290  return result;
4291 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 4293 of file TfLiteParser.cpp.

References TFLITE_PARSER_VERSION.

4294 {
4295  return TFLITE_PARSER_VERSION;
4296 }
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition: Version.hpp:25

◆ LoadModel()

armnn::INetworkPtr LoadModel ( std::unique_ptr< tflite::ModelT >  model)

Definition at line 731 of file TfLiteParser.cpp.

References TensorShape::AreAllDimensionsSpecified(), ARMNN_ASSERT, ARMNN_ASSERT_MSG, ARMNN_LOG, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, Convolution3dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, Convolution3dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, Convolution3dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, Convolution3dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, Convolution3dDescriptor::m_DilationZ, TransposeConvolution2dDescriptor::m_OutputShape, TransposeConvolution2dDescriptor::m_OutputShapeEnabled, Pooling2dDescriptor::m_OutputShapeRounding, Convolution3dDescriptor::m_PadBack, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, Convolution3dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Convolution3dDescriptor::m_PadFront, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, Convolution3dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, Convolution3dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, Convolution3dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, Convolution3dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, Convolution3dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, Convolution3dDescriptor::m_StrideZ, ReshapeDescriptor::m_TargetShape, armnn::NDHWC, armnn::NHWC, armnn::numeric_cast(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

732 {
733  ResetParser();
734  m_Model = std::move(model);
735 
736  return CreateNetworkFromModel();
737 }

◆ LoadModelFromBinary()

TfLiteParserImpl::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 3842 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParserImpl::CreateNetworkFromBinary(), and TfLiteParserImpl::LoadModelFromFile().

3843 {
3844  if (binaryContent == nullptr)
3845  {
3846  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
3847  CHECK_LOCATION().AsString()));
3848  }
3849  flatbuffers::Verifier verifier(binaryContent, len);
3850  if (verifier.VerifyBuffer<tflite::Model>() == false)
3851  {
3852  throw ParseException(
3853  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3854  "flatbuffers format. size:{} {}",
3855  len,
3856  CHECK_LOCATION().AsString()));
3857  }
3858  return tflite::UnPackModel(binaryContent);
3859 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209

◆ LoadModelFromFile()

TfLiteParserImpl::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 3818 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParserImpl::LoadModelFromBinary().

Referenced by TfLiteParserImpl::CreateNetworkFromBinaryFile().

3819 {
3820  if (fileName == nullptr)
3821  {
3822  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
3823  CHECK_LOCATION().AsString()));
3824  }
3825  std::error_code errorCode;
3826  fs::path pathToFile(fileName);
3827  if (!fs::exists(pathToFile, errorCode))
3828  {
3829  //fmt::format() could not be used here (format error)
3830  std::stringstream msg;
3831  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3832  << " " << CHECK_LOCATION().AsString();
3833 
3834  throw FileNotFoundException(msg.str());
3835  }
3836  std::ifstream file(fileName, std::ios::binary);
3837  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3838  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3839  fileContent.size());
3840 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 2481 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetUnsignedAxis(), StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, FullyConnectedDescriptor::m_ConstantWeights, DetectionPostProcessDescriptor::m_DetectionsPerClass, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

2483 {
2484  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2485  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2486 
2487  if (stretchDim != targetDimsIn.end())
2488  {
2489  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2490  {
2491  throw ParseException(
2492  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
2493  }
2494 
2495  auto targetNumElements =
2496  armnn::numeric_cast<unsigned int>(
2497  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2498 
2499  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2500  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2501  }
2502 
2503  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2504 
2505  TensorInfo reshapeInfo = inputTensorInfo;
2506  reshapeInfo.SetShape(outputShape);
2507 
2508  return reshapeInfo;
2509 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( std::vector< uint32_t >  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1824 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, IOutputSlot::Connect(), TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PaddingMode, PadDescriptor::m_PadList, PadDescriptor::m_PadValue, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::NHWC, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnn::Signed32, armnn::Signed64, and armnnDeserializer::ToTensorInfo().

1826 {
1827  CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
1828  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1829 
1830  if (inputTensorInfo.GetNumDimensions() > 4)
1831  {
1832  std::stringstream ss;
1833  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1834  << " shape:" << inputTensorInfo.GetShape() << " "
1835  << CHECK_LOCATION().AsString();
1836  throw ParseException(ss.str());
1837  }
1838 
1839  if (squeezeDims.empty())
1840  {
1841  squeezeDims.assign(dimensionSequence,
1842  dimensionSequence+inputTensorInfo.GetNumDimensions());
1843  }
1844 
1845  std::vector<uint32_t> outputDims;
1846  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1847  {
1848  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1849  auto currentDimension = inputTensorInfo.GetShape()[i];
1850  if (skipSqueeze || currentDimension != 1)
1851  {
1852  outputDims.push_back(currentDimension);
1853  }
1854  }
1855 
1856  if (outputDims.size() > 4)
1857  {
1858  std::stringstream ss;
1859  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1860  << " shape:" << inputTensorInfo.GetShape() << " "
1861  << CHECK_LOCATION().AsString();
1862  throw ParseException(ss.str());
1863  }
1864 
1865  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1866  outputDims.data());
1867 
1868  // we need to preserve the tensor type and the quantization data as well
1869  TensorInfo outTensorInfo = inputTensorInfo;
1870  outTensorInfo.SetShape(outShape);
1871 
1872  return outTensorInfo;
1873 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:209
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195

The documentation for this class was generated from the following files: