ArmNN
 24.05
TfLiteParserImpl Class Reference

#include <TfLiteParser.hpp>

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile)
 Create the network from a flatbuffers binary file on disk. More...
 
armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent)
 Create the network from a flatbuffers binary. More...
 
BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
size_t GetSubgraphCount () const
 Return the number of subgraphs in the parsed model. More...
 
std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const
 Return the input tensor names for a given subgraph. More...
 
std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParserImpl (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
 ~TfLiteParserImpl ()=default
 
armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic (const std::vector< uint8_t > &binaryContent)
 
armnn::INetworkPtr LoadModel (std::unique_ptr< tflite::ModelT > model)
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (std::vector< uint32_t > squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 

Detailed Description

Definition at line 26 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 39 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 40 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 30 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 33 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 32 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 37 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 38 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 34 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 35 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 36 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParserImpl()

Definition at line 744 of file TfLiteParser.cpp.

745 : m_Options(options)
746 , m_Network(nullptr, nullptr)
747 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
748 {
749  // register supported operators
750  m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
751  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
752  m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
753  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
754  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
755  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
756  m_ParserFunctions[tflite::BuiltinOperator_BATCH_MATMUL] = &TfLiteParserImpl::ParseBatchMatMul;
757  m_ParserFunctions[tflite::BuiltinOperator_BROADCAST_TO] = &TfLiteParserImpl::ParseBroadcastTo;
758  m_ParserFunctions[tflite::BuiltinOperator_CEIL] = &TfLiteParserImpl::ParseCeil;
759  m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
760  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
761  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
762  // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
763  #if defined(ARMNN_POST_TFLITE_2_4)
764  m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
765  #endif
766  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
767  m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
768  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
769  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
770  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
771  m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
772  m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
773  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
774  m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
775  m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
776  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
777  m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
778  m_ParserFunctions[tflite::BuiltinOperator_GELU] = &TfLiteParserImpl::ParseGelu;
779  m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
780  m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
781  m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
782  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
783  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
784  m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
785  m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
786  m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
787  = &TfLiteParserImpl::ParseLocalResponseNormalization;
788  m_ParserFunctions[tflite::BuiltinOperator_LOG] = &TfLiteParserImpl::ParseLog;
789  m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
790  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
791  m_ParserFunctions[tflite::BuiltinOperator_LOG_SOFTMAX] = &TfLiteParserImpl::ParseLogSoftmax;
792  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
793  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
794  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
795  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
796  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
797  m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
798  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
799  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
800  m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
801  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
802  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
803  m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
804  m_ParserFunctions[tflite::BuiltinOperator_POW] = &TfLiteParserImpl::ParsePower;
805  m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
806  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
807  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
808  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
809  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
810  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
811  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
812  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
813  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
814  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
815  m_ParserFunctions[tflite::BuiltinOperator_REVERSE_V2] = &TfLiteParserImpl::ParseReverseV2;
816  m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
817  m_ParserFunctions[tflite::BuiltinOperator_SCATTER_ND] = &TfLiteParserImpl::ParseScatterNd;
818  m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
819  m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
820  m_ParserFunctions[tflite::BuiltinOperator_SIN] = &TfLiteParserImpl::ParseSin;
821  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
822  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
823  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
824  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_DEPTH] = &TfLiteParserImpl::ParseSpaceToDepth;
825  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
826  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
827  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
828  m_ParserFunctions[tflite::BuiltinOperator_SQUARE] = &TfLiteParserImpl::ParseSquare;
829  m_ParserFunctions[tflite::BuiltinOperator_SQUARED_DIFFERENCE] = &TfLiteParserImpl::ParseSquaredDifference;
830  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
831  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
832  m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
833  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
834  m_ParserFunctions[tflite::BuiltinOperator_TILE] = &TfLiteParserImpl::ParseTile;
835  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
836  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
837  m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
838  = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
839  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
840 
841  // register supported custom operators
842  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
843 }

◆ ~TfLiteParserImpl()

~TfLiteParserImpl ( )
default

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)

Create the network from a flatbuffers binary.

Definition at line 964 of file TfLiteParser.cpp.

965 {
966  ResetParser();
967  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
968  return CreateNetworkFromModel();
969 }

References TfLiteParserImpl::LoadModelFromBinary().

◆ CreateNetworkFromBinaryAsDynamic()

armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic ( const std::vector< uint8_t > &  binaryContent)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)

Create the network from a flatbuffers binary file on disk.

Definition at line 957 of file TfLiteParser.cpp.

958 {
959  ResetParser();
960  m_Model = LoadModelFromFile(graphFile);
961  return CreateNetworkFromModel();
962 }

References TfLiteParserImpl::LoadModelFromFile().

◆ GetBuffer()

TfLiteParserImpl::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

Definition at line 5763 of file TfLiteParser.cpp.

5764 {
5765  CHECK_BUFFER(model, bufferIndex);
5766  return model->buffers[bufferIndex].get();
5767 }

References CHECK_BUFFER.

◆ GetInputs()

TfLiteParserImpl::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 5451 of file TfLiteParser.cpp.

5454 {
5455  CHECK_MODEL(model, subgraphIndex, operatorIndex);
5456 
5457  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5458  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5459 
5460  size_t inputCount = operatorPtr->inputs.size();
5461  TensorRawPtrVector result;
5462  for (size_t i = 0; i < inputCount; ++i)
5463  {
5464  // If the input location is -1 then assume input is turned off.
5465  if (operatorPtr->inputs[i] == -1)
5466  {
5467  continue;
5468  }
5469  else
5470  {
5471  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
5472  result.push_back(subgraphPtr->tensors[inputId].get());
5473  }
5474  }
5475  return result;
5476 }

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 5531 of file TfLiteParser.cpp.

5534 {
5535  CHECK_MODEL(model, subgraphIndex, operatorIndex);
5536  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5537  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5538  return operatorPtr->inputs;
5539 }

References CHECK_MODEL.

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Definition at line 5941 of file TfLiteParser.cpp.

5943 {
5944  CHECK_SUBGRAPH(m_Model, subgraphId);
5945  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
5946  for (auto const& input : inputs)
5947  {
5948  if (input.second->name == name)
5949  {
5950  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
5951  auto inputTensorInfo = ToTensorInfo(input.second);
5952  // Input tensors are always treated as constant tensors during network execution.
5953  inputTensorInfo.SetConstant(true);
5954  return std::make_pair(bindingId, inputTensorInfo);
5955  }
5956  }
5957 
5958  std::stringstream bindings;
5959  for (auto const& input : inputs)
5960  {
5961  bindings << "'" << input.second->name << "' ";
5962  }
5963 
5964  throw ParseException(
5965  fmt::format("No input binding found for subgraph:{} and name:{}. "
5966  "Possible inputs are: [{}] {}",
5967  subgraphId,
5968  name,
5969  bindings.str(),
5970  CHECK_LOCATION().AsString()));
5971 }

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphInputs(), TensorInfo::SetConstant(), and armnnDeserializer::ToTensorInfo().

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Definition at line 5973 of file TfLiteParser.cpp.

5975 {
5976  CHECK_SUBGRAPH(m_Model, subgraphId);
5977  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
5978  for (unsigned int i = 0; i < outputs.size(); ++i)
5979  {
5980  auto const output = outputs[i];
5981  if (output.second->name == name)
5982  {
5983  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
5984  std::vector<unsigned int> shape = m_OverriddenOutputShapes.size() > 0 ?
5985  m_OverriddenOutputShapes[i] : AsUnsignedVector(output.second->shape);
5986  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
5987  }
5988  }
5989 
5990  std::stringstream bindings;
5991  for (auto const& output : outputs)
5992  {
5993  bindings << "'" << output.second->name << "' ";
5994  }
5995 
5996  throw ParseException(
5997  fmt::format("No output binding found for subgraph:{} and name:{}. "
5998  "Possible outputs are: [{}] {}",
5999  subgraphId,
6000  name,
6001  bindings.str(),
6002  CHECK_LOCATION().AsString()));
6003 }

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

◆ GetOutputs()

TfLiteParserImpl::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 5478 of file TfLiteParser.cpp.

5481 {
5482  CHECK_MODEL(model, subgraphIndex, operatorIndex);
5483 
5484  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5485  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5486 
5487  size_t outputCount = operatorPtr->outputs.size();
5488  TensorRawPtrVector result(outputCount);
5489  for (size_t i = 0; i < outputCount; ++i)
5490  {
5491  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
5492  CHECK_TENSOR(model, subgraphIndex, outputId);
5493  result[i] = subgraphPtr->tensors[outputId].get();
5494  }
5495  return result;
5496 }

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 5541 of file TfLiteParser.cpp.

5544 {
5545  CHECK_MODEL(model, subgraphIndex, operatorIndex);
5546  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5547  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
5548  return operatorPtr->outputs;
5549 }

References CHECK_MODEL.

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const

Return the number of subgraphs in the parsed model.

Definition at line 6005 of file TfLiteParser.cpp.

6006 {
6007  return m_Model->subgraphs.size();
6008 }

◆ GetSubgraphInputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 5498 of file TfLiteParser.cpp.

5500 {
5501  CHECK_SUBGRAPH(model, subgraphIndex);
5502  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5503 
5504  size_t inputCount = subgraphPtr->inputs.size();
5505  TensorIdRawPtrVector result(inputCount);
5506  for (size_t i = 0; i < inputCount; ++i)
5507  {
5508  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
5509  CHECK_TENSOR(model, subgraphIndex, inputId);
5510  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
5511  }
5512  return result;
5513 }

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkInputBindingInfo(), and TfLiteParserImpl::GetSubgraphInputTensorNames().

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const

Return the input tensor names for a given subgraph.

Definition at line 6010 of file TfLiteParser.cpp.

6011 {
6012  CHECK_SUBGRAPH(m_Model, subgraphId);
6013  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
6014  std::vector<std::string> result;
6015  result.reserve(inputs.size());
6016  for (auto const& input : inputs)
6017  {
6018  result.push_back(input.second->name);
6019  }
6020  return result;
6021 }

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphInputs().

◆ GetSubgraphOutputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 5515 of file TfLiteParser.cpp.

5517 {
5518  CHECK_SUBGRAPH(model, subgraphIndex);
5519  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
5520 
5521  size_t outputCount = subgraphPtr->outputs.size();
5522  TensorIdRawPtrVector result(outputCount);
5523  for (size_t i = 0; i < outputCount; ++i)
5524  {
5525  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
5526  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
5527  }
5528  return result;
5529 }

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkOutputBindingInfo(), and TfLiteParserImpl::GetSubgraphOutputTensorNames().

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const

Return the output tensor names for a given subgraph.

Definition at line 6023 of file TfLiteParser.cpp.

6024 {
6025  CHECK_SUBGRAPH(m_Model, subgraphId);
6026  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
6027  std::vector<std::string> result;
6028  result.reserve(outputs.size());
6029  for (auto const& output : outputs)
6030  {
6031  result.push_back(output.second->name);
6032  }
6033  return result;
6034 }

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphOutputs().

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 6036 of file TfLiteParser.cpp.

6037 {
6038  return TFLITE_PARSER_VERSION;
6039 }

References TFLITE_PARSER_VERSION.

◆ LoadModel()

armnn::INetworkPtr LoadModel ( std::unique_ptr< tflite::ModelT >  model)

Definition at line 972 of file TfLiteParser.cpp.

973 {
974  ResetParser();
975  m_Model = std::move(model);
976 
977  return CreateNetworkFromModel();
978 }

◆ LoadModelFromBinary()

TfLiteParserImpl::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 5432 of file TfLiteParser.cpp.

5433 {
5434  if (binaryContent == nullptr)
5435  {
5436  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
5437  CHECK_LOCATION().AsString()));
5438  }
5439  flatbuffers::Verifier verifier(binaryContent, len);
5440  if (verifier.VerifyBuffer<tflite::Model>() == false)
5441  {
5442  throw ParseException(
5443  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
5444  "flatbuffers format. size:{} {}",
5445  len,
5446  CHECK_LOCATION().AsString()));
5447  }
5448  return tflite::UnPackModel(binaryContent);
5449 }

References CHECK_LOCATION.

Referenced by TfLiteParserImpl::CreateNetworkFromBinary(), and TfLiteParserImpl::LoadModelFromFile().

◆ LoadModelFromFile()

TfLiteParserImpl::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 5402 of file TfLiteParser.cpp.

5403 {
5404  if (fileName == nullptr)
5405  {
5406  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
5407  CHECK_LOCATION().AsString()));
5408  }
5409  std::error_code errorCode;
5410  fs::path pathToFile(fileName);
5411  if (!fs::exists(pathToFile, errorCode))
5412  {
5413  //fmt::format() could not be used here (format error)
5414  std::stringstream msg;
5415  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
5416  << " " << CHECK_LOCATION().AsString();
5417  throw FileNotFoundException(msg.str());
5418  }
5419  if (!fs::is_regular_file(pathToFile))
5420  {
5421  // Exclude non regular files.
5422  throw InvalidArgumentException(fmt::format("File \"{}\" is not a regular file and cannot be loaded.",
5423  pathToFile.c_str()));
5424  }
5425 
5426  std::ifstream file(fileName, std::ios::binary);
5427  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
5428  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
5429  fileContent.size());
5430 }

References CHECK_LOCATION, and TfLiteParserImpl::LoadModelFromBinary().

Referenced by TfLiteParserImpl::CreateNetworkFromBinaryFile().

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 3311 of file TfLiteParser.cpp.

3313 {
3314  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
3315  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
3316 
3317  if (stretchDim != targetDimsIn.end())
3318  {
3319  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
3320  {
3321  throw ParseException(
3322  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
3323  }
3324 
3325  auto targetNumElements =
3326  armnn::numeric_cast<unsigned int>(
3327  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
3328 
3329  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
3330 
3331  if (targetNumElements == 0)
3332  {
3333  if (inputTensorInfo.GetNumElements() == 0)
3334  {
3335  outputDims[stretchIndex] = 0;
3336  }
3337  else
3338  {
3339  throw ParseException(
3340  fmt::format("Input to reshape is a tensor with elements, but the requested shape has 0. {}",
3341  CHECK_LOCATION().AsString()));
3342  }
3343  }
3344  else
3345  {
3346  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
3347  }
3348  }
3349 
3350  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
3351 
3352  TensorInfo reshapeInfo = inputTensorInfo;
3353  reshapeInfo.SetShape(outputShape);
3354 
3355  return reshapeInfo;
3356 }

References CHECK_LOCATION, TensorInfo::GetNumElements(), and TensorInfo::SetShape().

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( std::vector< uint32_t >  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 2430 of file TfLiteParser.cpp.

2432 {
2433  CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
2434  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
2435 
2436  if (inputTensorInfo.GetNumDimensions() > 4)
2437  {
2438  std::stringstream ss;
2439  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2440  << " shape:" << inputTensorInfo.GetShape() << " "
2441  << CHECK_LOCATION().AsString();
2442  throw ParseException(ss.str());
2443  }
2444 
2445  if (squeezeDims.empty())
2446  {
2447  squeezeDims.assign(dimensionSequence,
2448  dimensionSequence+inputTensorInfo.GetNumDimensions());
2449  }
2450 
2451  std::vector<uint32_t> outputDims;
2452  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
2453  {
2454  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
2455  auto currentDimension = inputTensorInfo.GetShape()[i];
2456  if (skipSqueeze || currentDimension != 1)
2457  {
2458  outputDims.push_back(currentDimension);
2459  }
2460  }
2461 
2462  if (outputDims.size() > 4)
2463  {
2464  std::stringstream ss;
2465  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
2466  << " shape:" << inputTensorInfo.GetShape() << " "
2467  << CHECK_LOCATION().AsString();
2468  throw ParseException(ss.str());
2469  }
2470 
2471  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
2472  outputDims.data());
2473 
2474  // we need to preserve the tensor type and the quantization data as well
2475  TensorInfo outTensorInfo = inputTensorInfo;
2476  outTensorInfo.SetShape(outShape);
2477 
2478  return outTensorInfo;
2479 }

References CHECK_LOCATION, CHECK_VALID_SIZE, TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), and TensorInfo::SetShape().


The documentation for this class was generated from the following files:
armnn::TensorInfo::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:198
armnnTfLiteParser::TfLiteParserImpl::TensorRawPtrVector
std::vector< TensorRawPtr > TensorRawPtrVector
Definition: TfLiteParser.hpp:36
armnn::TensorInfo
Definition: Tensor.hpp:152
armnnTfLiteParser::TfLiteParserImpl::GetSubgraphOutputs
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
Definition: TfLiteParser.cpp:5515
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:197
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnnTfLiteParser::TfLiteParserImpl::LoadModelFromFile
static ModelPtr LoadModelFromFile(const char *fileName)
Definition: TfLiteParser.cpp:5402
armnnTfLiteParser::TfLiteParserImpl::LoadModelFromBinary
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
Definition: TfLiteParser.cpp:5432
CHECKED_NON_NEGATIVE
#define CHECKED_NON_NEGATIVE(VALUE)
Definition: VerificationHelpers.hpp:35
armnn::TensorShape
Definition: Tensor.hpp:20
CHECK_TENSOR
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
Definition: TfLiteParser.cpp:207
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:45
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
CHECK_MODEL
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
Definition: TfLiteParser.cpp:186
armnnTfLiteParser::TfLiteParserImpl::TensorIdRawPtrVector
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
Definition: TfLiteParser.hpp:38
armnnTfLiteParser::TfLiteParserImpl::GetSubgraphInputs
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
Definition: TfLiteParser.cpp:5498
armnnDeserializer::ToTensorInfo
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)
Definition: Deserializer.cpp:676
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::ParseException
Definition: Exceptions.hpp:92
TFLITE_PARSER_VERSION
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition: Version.hpp:25
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:195
CHECK_VALID_SIZE
#define CHECK_VALID_SIZE(ACTUAL,...)
Definition: VerificationHelpers.hpp:32
CHECK_SUBGRAPH
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
Definition: TfLiteParser.cpp:144
CHECK_BUFFER
#define CHECK_BUFFER(MODEL, BUFFER_INDEX)
Definition: TfLiteParser.cpp:255
armnn::FileNotFoundException
Definition: Exceptions.hpp:86