ArmNN
 22.11
TfLiteParserImpl Class Reference

#include <TfLiteParser.hpp>

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile)
 Create the network from a flatbuffers binary file on disk. More...
 
armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent)
 Create the network from a flatbuffers binary. More...
 
BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
size_t GetSubgraphCount () const
 Return the number of subgraphs in the parsed model. More...
 
std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const
 Return the input tensor names for a given subgraph. More...
 
std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParserImpl (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
 ~TfLiteParserImpl ()=default
 
armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic (const std::vector< uint8_t > &binaryContent)
 
armnn::INetworkPtr LoadModel (std::unique_ptr< tflite::ModelT > model)
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (std::vector< uint32_t > squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 

Detailed Description

Definition at line 25 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 38 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 39 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 29 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 32 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 31 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 36 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 37 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 33 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 34 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 35 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParserImpl()

Definition at line 681 of file TfLiteParser.cpp.

682 : m_Options(options)
683 , m_Network(nullptr, nullptr)
684 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
685 {
686  // register supported operators
687  m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
688  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
689  m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
690  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
691  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
692  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
693  m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
694  m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
695  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
696  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
697  // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
698  #if defined(ARMNN_POST_TFLITE_2_4)
699  m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
700  #endif
701  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
702  m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
703  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
704  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
705  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
706  m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
707  m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
708  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
709  m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
710  m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
711  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
712  m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
713  m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
714  m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
715  m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
716  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
717  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
718  m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
719  m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
720  m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
721  = &TfLiteParserImpl::ParseLocalResponseNormalization;
722  m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
723  m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
724  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
725  m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
726  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
727  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
728  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
729  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
730  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
731  m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
732  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
733  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
734  m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
735  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
736  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
737  m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
738  m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
739  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
740  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
741  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
742  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
743  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
744  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
745  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
746  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
747  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
748  m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
749  m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
750  m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
751  m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
752  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
753  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
754  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
755  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
756  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
757  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
758  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
759  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
760  m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
761  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
762  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
763  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
764  m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
765  = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
766  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
767 
768  // register supported custom operators
769  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
770 }

◆ ~TfLiteParserImpl()

~TfLiteParserImpl ( )
default

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)

Create the network from a flatbuffers binary.

Definition at line 789 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromBinary().

790 {
791  ResetParser();
792  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
793  return CreateNetworkFromModel();
794 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryAsDynamic()

armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic ( const std::vector< uint8_t > &  binaryContent)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)

Create the network from a flatbuffers binary file on disk.

Definition at line 782 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromFile().

783 {
784  ResetParser();
785  m_Model = LoadModelFromFile(graphFile);
786  return CreateNetworkFromModel();
787 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

◆ GetInputs()

TfLiteParserImpl::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 4540 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

4543 {
4544  CHECK_MODEL(model, subgraphIndex, operatorIndex);
4545 
4546  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4547  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
4548 
4549  size_t inputCount = operatorPtr->inputs.size();
4550  TensorRawPtrVector result;
4551  for (size_t i = 0; i < inputCount; ++i)
4552  {
4553  // If the input location is -1 then assume input is turned off.
4554  if (operatorPtr->inputs[i] == -1)
4555  {
4556  continue;
4557  }
4558  else
4559  {
4560  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
4561  result.push_back(subgraphPtr->tensors[inputId].get());
4562  }
4563  }
4564  return result;
4565 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 4620 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

4623 {
4624  CHECK_MODEL(model, subgraphIndex, operatorIndex);
4625  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4626  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
4627  return operatorPtr->inputs;
4628 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Definition at line 4963 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphInputs(), TensorInfo::SetConstant(), and armnnDeserializer::ToTensorInfo().

4965 {
4966  CHECK_SUBGRAPH(m_Model, subgraphId);
4967  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4968  for (auto const& input : inputs)
4969  {
4970  if (input.second->name == name)
4971  {
4972  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
4973  auto inputTensorInfo = ToTensorInfo(input.second);
4974  // Input tensors are always treated as constant tensors during network execution.
4975  inputTensorInfo.SetConstant(true);
4976  return std::make_pair(bindingId, inputTensorInfo);
4977  }
4978  }
4979 
4980  std::stringstream bindings;
4981  for (auto const& input : inputs)
4982  {
4983  bindings << "'" << input.second->name << "' ";
4984  }
4985 
4986  throw ParseException(
4987  fmt::format("No input binding found for subgraph:{} and name:{}. "
4988  "Possible inputs are: [{}] {}",
4989  subgraphId,
4990  name,
4991  bindings.str(),
4992  CHECK_LOCATION().AsString()));
4993 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Definition at line 4995 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

4997 {
4998  CHECK_SUBGRAPH(m_Model, subgraphId);
4999  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5000  for (unsigned int i = 0; i < outputs.size(); ++i)
5001  {
5002  auto const output = outputs[i];
5003  if (output.second->name == name)
5004  {
5005  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
5006  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
5007  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
5008  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
5009  }
5010  }
5011 
5012  std::stringstream bindings;
5013  for (auto const& output : outputs)
5014  {
5015  bindings << "'" << output.second->name << "' ";
5016  }
5017 
5018  throw ParseException(
5019  fmt::format("No output binding found for subgraph:{} and name:{}. "
5020  "Possible outputs are: [{}] {}",
5021  subgraphId,
5022  name,
5023  bindings.str(),
5024  CHECK_LOCATION().AsString()));
5025 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetOutputs()

TfLiteParserImpl::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 4567 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

4570 {
4571  CHECK_MODEL(model, subgraphIndex, operatorIndex);
4572 
4573  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4574  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
4575 
4576  size_t outputCount = operatorPtr->outputs.size();
4577  TensorRawPtrVector result(outputCount);
4578  for (size_t i = 0; i < outputCount; ++i)
4579  {
4580  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
4581  CHECK_TENSOR(model, subgraphIndex, outputId);
4582  result[i] = subgraphPtr->tensors[outputId].get();
4583  }
4584  return result;
4585 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const

Return the number of subgraphs in the parsed model.

Definition at line 5027 of file TfLiteParser.cpp.

5028 {
5029  return m_Model->subgraphs.size();
5030 }

◆ GetSubgraphInputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 4587 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkInputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphInputTensorNames().

4589 {
4590  CHECK_SUBGRAPH(model, subgraphIndex);
4591  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4592 
4593  size_t inputCount = subgraphPtr->inputs.size();
4594  TensorIdRawPtrVector result(inputCount);
4595  for (size_t i = 0; i < inputCount; ++i)
4596  {
4597  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
4598  CHECK_TENSOR(model, subgraphIndex, inputId);
4599  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
4600  }
4601  return result;
4602 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const

Return the input tensor names for a given subgraph.

Definition at line 5032 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphInputs().

5033 {
5034  CHECK_SUBGRAPH(m_Model, subgraphId);
5035  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5036  std::vector<std::string> result;
5037  result.reserve(inputs.size());
5038  for (auto const& input : inputs)
5039  {
5040  result.push_back(input.second->name);
5041  }
5042  return result;
5043 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)

◆ GetSubgraphOutputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 4604 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkOutputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphOutputTensorNames().

4606 {
4607  CHECK_SUBGRAPH(model, subgraphIndex);
4608  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4609 
4610  size_t outputCount = subgraphPtr->outputs.size();
4611  TensorIdRawPtrVector result(outputCount);
4612  for (size_t i = 0; i < outputCount; ++i)
4613  {
4614  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
4615  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
4616  }
4617  return result;
4618 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const

Return the output tensor names for a given subgraph.

Definition at line 5045 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphOutputs().

5046 {
5047  CHECK_SUBGRAPH(m_Model, subgraphId);
5048  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5049  std::vector<std::string> result;
5050  result.reserve(outputs.size());
5051  for (auto const& output : outputs)
5052  {
5053  result.push_back(output.second->name);
5054  }
5055  return result;
5056 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 5058 of file TfLiteParser.cpp.

References TFLITE_PARSER_VERSION.

5059 {
5060  return TFLITE_PARSER_VERSION;
5061 }
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition: Version.hpp:25

◆ LoadModel()

armnn::INetworkPtr LoadModel ( std::unique_ptr< tflite::ModelT >  model)

Definition at line 797 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_LOG, CHECK_LOCATION, armnn::error, and Exception::what().

798 {
799  ResetParser();
800  m_Model = std::move(model);
801 
802  return CreateNetworkFromModel();
803 }

◆ LoadModelFromBinary()

TfLiteParserImpl::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 4521 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParserImpl::CreateNetworkFromBinary(), and TfLiteParserImpl::LoadModelFromFile().

4522 {
4523  if (binaryContent == nullptr)
4524  {
4525  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
4526  CHECK_LOCATION().AsString()));
4527  }
4528  flatbuffers::Verifier verifier(binaryContent, len);
4529  if (verifier.VerifyBuffer<tflite::Model>() == false)
4530  {
4531  throw ParseException(
4532  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
4533  "flatbuffers format. size:{} {}",
4534  len,
4535  CHECK_LOCATION().AsString()));
4536  }
4537  return tflite::UnPackModel(binaryContent);
4538 }
::android::nn::Model Model
Helper classes.
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203

◆ LoadModelFromFile()

TfLiteParserImpl::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 4497 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParserImpl::LoadModelFromBinary().

Referenced by TfLiteParserImpl::CreateNetworkFromBinaryFile().

4498 {
4499  if (fileName == nullptr)
4500  {
4501  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
4502  CHECK_LOCATION().AsString()));
4503  }
4504  std::error_code errorCode;
4505  fs::path pathToFile(fileName);
4506  if (!fs::exists(pathToFile, errorCode))
4507  {
4508  //fmt::format() could not be used here (format error)
4509  std::stringstream msg;
4510  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
4511  << " " << CHECK_LOCATION().AsString();
4512 
4513  throw FileNotFoundException(msg.str());
4514  }
4515  std::ifstream file(fileName, std::ios::binary);
4516  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
4517  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
4518  fileContent.size());
4519 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 2760 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), BaseTensor< MemoryType >::GetInfo(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetUnsignedAxis(), LstmDescriptor::m_ActivationFunc, StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, LstmInputParams::m_CellBias, LstmInputParams::m_CellLayerNormWeights, LstmInputParams::m_CellToForgetWeights, LstmInputParams::m_CellToInputWeights, LstmInputParams::m_CellToOutputWeights, FullyConnectedDescriptor::m_ConstantWeights, DetectionPostProcessDescriptor::m_DetectionsPerClass, LstmInputParams::m_ForgetGateBias, LstmInputParams::m_ForgetLayerNormWeights, LstmInputParams::m_InputGateBias, LstmInputParams::m_InputLayerNormWeights, StackDescriptor::m_InputShape, LstmInputParams::m_InputToCellWeights, LstmInputParams::m_InputToForgetWeights, LstmInputParamsInfo::m_InputToForgetWeights, LstmInputParams::m_InputToInputWeights, LstmInputParams::m_InputToOutputWeights, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, LstmInputParams::m_OutputGateBias, LstmInputParams::m_OutputLayerNormWeights, LstmInputParams::m_ProjectionBias, LstmInputParams::m_ProjectionWeights, LstmInputParams::m_RecurrentToCellWeights, LstmInputParams::m_RecurrentToForgetWeights, LstmInputParams::m_RecurrentToInputWeights, LstmInputParams::m_RecurrentToOutputWeights, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

2762 {
2763  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2764  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2765 
2766  if (stretchDim != targetDimsIn.end())
2767  {
2768  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2769  {
2770  throw ParseException(
2771  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
2772  }
2773 
2774  auto targetNumElements =
2775  armnn::numeric_cast<unsigned int>(
2776  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2777 
2778  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2779  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2780  }
2781 
2782  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2783 
2784  TensorInfo reshapeInfo = inputTensorInfo;
2785  reshapeInfo.SetShape(outputShape);
2786 
2787  return reshapeInfo;
2788 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( std::vector< uint32_t >  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 2017 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, IOutputSlot::Connect(), armnn::Float32, TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PaddingMode, PadDescriptor::m_PadList, PadDescriptor::m_PadValue, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::NHWC, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS8, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnn::Signed32, armnn::Signed64, and armnnDeserializer::ToTensorInfo().

2019 {
2020  CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
2021  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2022 
2023  if (inputTensorInfo.GetNumDimensions() > 4)
2024  {
2025  std::stringstream ss;
2026  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2027  << " shape:" << inputTensorInfo.GetShape() << " "
2028  << CHECK_LOCATION().AsString();
2029  throw ParseException(ss.str());
2030  }
2031 
2032  if (squeezeDims.empty())
2033  {
2034  squeezeDims.assign(dimensionSequence,
2035  dimensionSequence+inputTensorInfo.GetNumDimensions());
2036  }
2037 
2038  std::vector<uint32_t> outputDims;
2039  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2040  {
2041  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2042  auto currentDimension = inputTensorInfo.GetShape()[i];
2043  if (skipSqueeze || currentDimension != 1)
2044  {
2045  outputDims.push_back(currentDimension);
2046  }
2047  }
2048 
2049  if (outputDims.size() > 4)
2050  {
2051  std::stringstream ss;
2052  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2053  << " shape:" << inputTensorInfo.GetShape() << " "
2054  << CHECK_LOCATION().AsString();
2055  throw ParseException(ss.str());
2056  }
2057 
2058  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2059  outputDims.data());
2060 
2061  // we need to preserve the tensor type and the quantization data as well
2062  TensorInfo outTensorInfo = inputTensorInfo;
2063  outTensorInfo.SetShape(outShape);
2064 
2065  return outTensorInfo;
2066 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195

The documentation for this class was generated from the following files: