ArmNN
 22.05
TfLiteParserImpl Class Reference

#include <TfLiteParser.hpp>

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile)
 Create the network from a flatbuffers binary file on disk. More...
 
armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent)
 Create the network from a flatbuffers binary. More...
 
BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
size_t GetSubgraphCount () const
 Return the number of subgraphs in the parsed model. More...
 
std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const
 Return the input tensor names for a given subgraph. More...
 
std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParserImpl (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
 ~TfLiteParserImpl ()=default
 
armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic (const std::vector< uint8_t > &binaryContent)
 
armnn::INetworkPtr LoadModel (std::unique_ptr< tflite::ModelT > model)
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (std::vector< uint32_t > squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 

Detailed Description

Definition at line 25 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 38 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 39 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 29 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 32 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 31 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 36 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 37 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 33 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 34 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 35 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParserImpl()

Definition at line 671 of file TfLiteParser.cpp.

672 : m_Options(options)
673 , m_Network(nullptr, nullptr)
674 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
675 {
676  // register supported operators
677  m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
678  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
679  m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
680  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
681  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
682  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
683  m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
684  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
685  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
686  // Conv3D support was added in TF 2.5, so for backwards compatibility a hash define is needed.
687  #if defined(ARMNN_POST_TFLITE_2_3)
688  m_ParserFunctions[tflite::BuiltinOperator_CONV_3D] = &TfLiteParserImpl::ParseConv3D;
689  #endif
690  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
691  m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
692  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
693  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
694  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
695  m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
696  m_ParserFunctions[tflite::BuiltinOperator_EQUAL] = &TfLiteParserImpl::ParseEqual;
697  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
698  m_ParserFunctions[tflite::BuiltinOperator_EXPAND_DIMS] = &TfLiteParserImpl::ParseExpandDims;
699  m_ParserFunctions[tflite::BuiltinOperator_FLOOR_DIV] = &TfLiteParserImpl::ParseFloorDiv;
700  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
701  m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
702  m_ParserFunctions[tflite::BuiltinOperator_GATHER_ND] = &TfLiteParserImpl::ParseGatherNd;
703  m_ParserFunctions[tflite::BuiltinOperator_GREATER] = &TfLiteParserImpl::ParseGreater;
704  m_ParserFunctions[tflite::BuiltinOperator_GREATER_EQUAL] = &TfLiteParserImpl::ParseGreaterOrEqual;
705  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
706  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
707  m_ParserFunctions[tflite::BuiltinOperator_LESS] = &TfLiteParserImpl::ParseLess;
708  m_ParserFunctions[tflite::BuiltinOperator_LESS_EQUAL] = &TfLiteParserImpl::ParseLessOrEqual;
709  m_ParserFunctions[tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION]
710  = &TfLiteParserImpl::ParseLocalResponseNormalization;
711  m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
712  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
713  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
714  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
715  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
716  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
717  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
718  m_ParserFunctions[tflite::BuiltinOperator_MIRROR_PAD] = &TfLiteParserImpl::ParseMirrorPad;
719  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
720  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
721  m_ParserFunctions[tflite::BuiltinOperator_NOT_EQUAL] = &TfLiteParserImpl::ParseNotEqual;
722  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
723  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
724  m_ParserFunctions[tflite::BuiltinOperator_PADV2] = &TfLiteParserImpl::ParsePad;
725  m_ParserFunctions[tflite::BuiltinOperator_PRELU] = &TfLiteParserImpl::ParsePrelu;
726  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
727  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
728  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
729  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
730  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
731  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_PROD] = &TfLiteParserImpl::ParseReduceProd;
732  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
733  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
734  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
735  m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
736  m_ParserFunctions[tflite::BuiltinOperator_SQRT] = &TfLiteParserImpl::ParseSqrt;
737  m_ParserFunctions[tflite::BuiltinOperator_SHAPE] = &TfLiteParserImpl::ParseShape;
738  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
739  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
740  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
741  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
742  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
743  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
744  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
745  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
746  m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
747  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
748  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
749  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
750  m_ParserFunctions[tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM]
751  = &TfLiteParserImpl::ParseUnidirectionalSequenceLSTM;
752  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
753 
754  // register supported custom operators
755  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
756 }

◆ ~TfLiteParserImpl()

~TfLiteParserImpl ( )
default

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)

Create the network from a flatbuffers binary.

Definition at line 775 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromBinary().

776 {
777  ResetParser();
778  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
779  return CreateNetworkFromModel();
780 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryAsDynamic()

armnn::INetworkPtr CreateNetworkFromBinaryAsDynamic ( const std::vector< uint8_t > &  binaryContent)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)

Create the network from a flatbuffers binary file on disk.

Definition at line 768 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromFile().

769 {
770  ResetParser();
771  m_Model = LoadModelFromFile(graphFile);
772  return CreateNetworkFromModel();
773 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

◆ GetInputs()

TfLiteParserImpl::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 4423 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

4426 {
4427  CHECK_MODEL(model, subgraphIndex, operatorIndex);
4428 
4429  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4430  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
4431 
4432  size_t inputCount = operatorPtr->inputs.size();
4433  TensorRawPtrVector result;
4434  for (size_t i = 0; i < inputCount; ++i)
4435  {
4436  // If the input location is -1 then assume input is turned off.
4437  if (operatorPtr->inputs[i] == -1)
4438  {
4439  continue;
4440  }
4441  else
4442  {
4443  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
4444  result.push_back(subgraphPtr->tensors[inputId].get());
4445  }
4446  }
4447  return result;
4448 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 4503 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

4506 {
4507  CHECK_MODEL(model, subgraphIndex, operatorIndex);
4508  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4509  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
4510  return operatorPtr->inputs;
4511 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Definition at line 4846 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphInputs(), TensorInfo::SetConstant(), and armnnDeserializer::ToTensorInfo().

4848 {
4849  CHECK_SUBGRAPH(m_Model, subgraphId);
4850  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4851  for (auto const& input : inputs)
4852  {
4853  if (input.second->name == name)
4854  {
4855  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
4856  auto inputTensorInfo = ToTensorInfo(input.second);
4857  // Input tensors are always treated as constant tensors during network execution.
4858  inputTensorInfo.SetConstant(true);
4859  return std::make_pair(bindingId, inputTensorInfo);
4860  }
4861  }
4862 
4863  std::stringstream bindings;
4864  for (auto const& input : inputs)
4865  {
4866  bindings << "'" << input.second->name << "' ";
4867  }
4868 
4869  throw ParseException(
4870  fmt::format("No input binding found for subgraph:{} and name:{}. "
4871  "Possible inputs are: [{}] {}",
4872  subgraphId,
4873  name,
4874  bindings.str(),
4875  CHECK_LOCATION().AsString()));
4876 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Definition at line 4878 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

4880 {
4881  CHECK_SUBGRAPH(m_Model, subgraphId);
4882  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
4883  for (unsigned int i = 0; i < outputs.size(); ++i)
4884  {
4885  auto const output = outputs[i];
4886  if (output.second->name == name)
4887  {
4888  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
4889  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
4890  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
4891  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
4892  }
4893  }
4894 
4895  std::stringstream bindings;
4896  for (auto const& output : outputs)
4897  {
4898  bindings << "'" << output.second->name << "' ";
4899  }
4900 
4901  throw ParseException(
4902  fmt::format("No output binding found for subgraph:{} and name:{}. "
4903  "Possible outputs are: [{}] {}",
4904  subgraphId,
4905  name,
4906  bindings.str(),
4907  CHECK_LOCATION().AsString()));
4908 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetOutputs()

TfLiteParserImpl::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 4450 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

4453 {
4454  CHECK_MODEL(model, subgraphIndex, operatorIndex);
4455 
4456  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4457  const auto& operatorPtr = subgraphPtr->operators[operatorIndex];
4458 
4459  size_t outputCount = operatorPtr->outputs.size();
4460  TensorRawPtrVector result(outputCount);
4461  for (size_t i = 0; i < outputCount; ++i)
4462  {
4463  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
4464  CHECK_TENSOR(model, subgraphIndex, outputId);
4465  result[i] = subgraphPtr->tensors[outputId].get();
4466  }
4467  return result;
4468 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const

Return the number of subgraphs in the parsed model.

Definition at line 4910 of file TfLiteParser.cpp.

4911 {
4912  return m_Model->subgraphs.size();
4913 }

◆ GetSubgraphInputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 4470 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkInputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphInputTensorNames().

4472 {
4473  CHECK_SUBGRAPH(model, subgraphIndex);
4474  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4475 
4476  size_t inputCount = subgraphPtr->inputs.size();
4477  TensorIdRawPtrVector result(inputCount);
4478  for (size_t i = 0; i < inputCount; ++i)
4479  {
4480  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
4481  CHECK_TENSOR(model, subgraphIndex, inputId);
4482  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
4483  }
4484  return result;
4485 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const

Return the input tensor names for a given subgraph.

Definition at line 4915 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphInputs().

4916 {
4917  CHECK_SUBGRAPH(m_Model, subgraphId);
4918  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
4919  std::vector<std::string> result;
4920  result.reserve(inputs.size());
4921  for (auto const& input : inputs)
4922  {
4923  result.push_back(input.second->name);
4924  }
4925  return result;
4926 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)

◆ GetSubgraphOutputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 4487 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkOutputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphOutputTensorNames().

4489 {
4490  CHECK_SUBGRAPH(model, subgraphIndex);
4491  const auto& subgraphPtr = model->subgraphs[subgraphIndex];
4492 
4493  size_t outputCount = subgraphPtr->outputs.size();
4494  TensorIdRawPtrVector result(outputCount);
4495  for (size_t i = 0; i < outputCount; ++i)
4496  {
4497  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
4498  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
4499  }
4500  return result;
4501 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const

Return the output tensor names for a given subgraph.

Definition at line 4928 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphOutputs().

4929 {
4930  CHECK_SUBGRAPH(m_Model, subgraphId);
4931  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
4932  std::vector<std::string> result;
4933  result.reserve(outputs.size());
4934  for (auto const& output : outputs)
4935  {
4936  result.push_back(output.second->name);
4937  }
4938  return result;
4939 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 4941 of file TfLiteParser.cpp.

References TFLITE_PARSER_VERSION.

4942 {
4943  return TFLITE_PARSER_VERSION;
4944 }
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition: Version.hpp:25

◆ LoadModel()

armnn::INetworkPtr LoadModel ( std::unique_ptr< tflite::ModelT >  model)

Definition at line 783 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_LOG, CHECK_LOCATION, armnn::error, and Exception::what().

784 {
785  ResetParser();
786  m_Model = std::move(model);
787 
788  return CreateNetworkFromModel();
789 }

◆ LoadModelFromBinary()

TfLiteParserImpl::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 4404 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParserImpl::CreateNetworkFromBinary(), and TfLiteParserImpl::LoadModelFromFile().

4405 {
4406  if (binaryContent == nullptr)
4407  {
4408  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
4409  CHECK_LOCATION().AsString()));
4410  }
4411  flatbuffers::Verifier verifier(binaryContent, len);
4412  if (verifier.VerifyBuffer<tflite::Model>() == false)
4413  {
4414  throw ParseException(
4415  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
4416  "flatbuffers format. size:{} {}",
4417  len,
4418  CHECK_LOCATION().AsString()));
4419  }
4420  return tflite::UnPackModel(binaryContent);
4421 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203

◆ LoadModelFromFile()

TfLiteParserImpl::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 4380 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParserImpl::LoadModelFromBinary().

Referenced by TfLiteParserImpl::CreateNetworkFromBinaryFile().

4381 {
4382  if (fileName == nullptr)
4383  {
4384  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
4385  CHECK_LOCATION().AsString()));
4386  }
4387  std::error_code errorCode;
4388  fs::path pathToFile(fileName);
4389  if (!fs::exists(pathToFile, errorCode))
4390  {
4391  //fmt::format() could not be used here (format error)
4392  std::stringstream msg;
4393  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
4394  << " " << CHECK_LOCATION().AsString();
4395 
4396  throw FileNotFoundException(msg.str());
4397  }
4398  std::ifstream file(fileName, std::ios::binary);
4399  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
4400  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
4401  fileContent.size());
4402 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 2671 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), BaseTensor< MemoryType >::GetInfo(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetUnsignedAxis(), LstmDescriptor::m_ActivationFunc, StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, LstmInputParams::m_CellBias, LstmInputParams::m_CellLayerNormWeights, LstmInputParams::m_CellToForgetWeights, LstmInputParams::m_CellToInputWeights, LstmInputParams::m_CellToOutputWeights, FullyConnectedDescriptor::m_ConstantWeights, DetectionPostProcessDescriptor::m_DetectionsPerClass, LstmInputParams::m_ForgetGateBias, LstmInputParams::m_ForgetLayerNormWeights, LstmInputParams::m_InputGateBias, LstmInputParams::m_InputLayerNormWeights, StackDescriptor::m_InputShape, LstmInputParams::m_InputToCellWeights, LstmInputParams::m_InputToForgetWeights, LstmInputParamsInfo::m_InputToForgetWeights, LstmInputParams::m_InputToInputWeights, LstmInputParams::m_InputToOutputWeights, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, LstmInputParams::m_OutputGateBias, LstmInputParams::m_OutputLayerNormWeights, LstmInputParams::m_ProjectionBias, LstmInputParams::m_ProjectionWeights, LstmInputParams::m_RecurrentToCellWeights, LstmInputParams::m_RecurrentToForgetWeights, LstmInputParams::m_RecurrentToInputWeights, LstmInputParams::m_RecurrentToOutputWeights, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

2673 {
2674  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2675  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2676 
2677  if (stretchDim != targetDimsIn.end())
2678  {
2679  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2680  {
2681  throw ParseException(
2682  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
2683  }
2684 
2685  auto targetNumElements =
2686  armnn::numeric_cast<unsigned int>(
2687  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2688 
2689  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2690  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2691  }
2692 
2693  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2694 
2695  TensorInfo reshapeInfo = inputTensorInfo;
2696  reshapeInfo.SetShape(outputShape);
2697 
2698  return reshapeInfo;
2699 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( std::vector< uint32_t >  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1928 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, IOutputSlot::Connect(), armnn::Float32, TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PaddingMode, PadDescriptor::m_PadList, PadDescriptor::m_PadValue, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::NHWC, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS8, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnn::Signed32, armnn::Signed64, and armnnDeserializer::ToTensorInfo().

1930 {
1931  CHECK_VALID_SIZE(squeezeDims.size(), 0, 1, 2, 3, 4);
1932  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1933 
1934  if (inputTensorInfo.GetNumDimensions() > 4)
1935  {
1936  std::stringstream ss;
1937  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1938  << " shape:" << inputTensorInfo.GetShape() << " "
1939  << CHECK_LOCATION().AsString();
1940  throw ParseException(ss.str());
1941  }
1942 
1943  if (squeezeDims.empty())
1944  {
1945  squeezeDims.assign(dimensionSequence,
1946  dimensionSequence+inputTensorInfo.GetNumDimensions());
1947  }
1948 
1949  std::vector<uint32_t> outputDims;
1950  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1951  {
1952  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1953  auto currentDimension = inputTensorInfo.GetShape()[i];
1954  if (skipSqueeze || currentDimension != 1)
1955  {
1956  outputDims.push_back(currentDimension);
1957  }
1958  }
1959 
1960  if (outputDims.size() > 4)
1961  {
1962  std::stringstream ss;
1963  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1964  << " shape:" << inputTensorInfo.GetShape() << " "
1965  << CHECK_LOCATION().AsString();
1966  throw ParseException(ss.str());
1967  }
1968 
1969  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1970  outputDims.data());
1971 
1972  // we need to preserve the tensor type and the quantization data as well
1973  TensorInfo outTensorInfo = inputTensorInfo;
1974  outTensorInfo.SetShape(outShape);
1975 
1976  return outTensorInfo;
1977 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195

The documentation for this class was generated from the following files: