ArmNN
 21.02
TfLiteParserImpl Class Reference

#include <TfLiteParser.hpp>

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile)
 Create the network from a flatbuffers binary file on disk. More...
 
armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent)
 Create the network from a flatbuffers binary. More...
 
BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
size_t GetSubgraphCount () const
 Return the number of subgraphs in the parsed model. More...
 
std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const
 Return the input tensor names for a given subgraph. More...
 
std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParserImpl (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
 ~TfLiteParserImpl ()=default
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 

Detailed Description

Definition at line 19 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 32 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 33 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 23 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 26 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 25 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 24 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 27 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 28 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 29 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParserImpl()

Definition at line 600 of file TfLiteParser.cpp.

601 : m_Options(options)
602 , m_Network(nullptr, nullptr)
603 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
604 {
605  // register supported operators
606  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
607  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
608  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
609  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
610  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
611  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
612  m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
613  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
614  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
615  m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
616  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
617  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
618  m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
619  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
620  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
621  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
622  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
623  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
624  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
625  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
626  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
627  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
628  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
629  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
630  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
631  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
632  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
633  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
634  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
635  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
636  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
637  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
638  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
639  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
640  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
641  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
642  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
643  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
644  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
645  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
646  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
647  m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
648  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
649  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
650  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
651  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
652  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
653  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
654  // register supported custom operators
655  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
656 }

◆ ~TfLiteParserImpl()

~TfLiteParserImpl ( )
default

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)

Create the network from a flatbuffers binary.

Definition at line 672 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, ARMNN_LOG, armnnTfParser::CalcPadding(), CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), TfLiteParserImpl::LoadModelFromBinary(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, ElementwiseUnaryDescriptor::m_Operation, TransposeConvolution2dDescriptor::m_OutputShape, TransposeConvolution2dDescriptor::m_OutputShapeEnabled, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, armnn::NHWC, armnn::numeric_cast(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

673 {
674  ResetParser();
675  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
676  return CreateNetworkFromModel();
677 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)

Create the network from a flatbuffers binary file on disk.

Definition at line 665 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromFile().

666 {
667  ResetParser();
668  m_Model = LoadModelFromFile(graphFile);
669  return CreateNetworkFromModel();
670 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParserImpl::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParserImpl::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3243 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::CreateNetworkFromBinary(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3246 {
3247  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3248 
3249  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3250  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3251 
3252  size_t inputCount = operatorPtr->inputs.size();
3253  TensorRawPtrVector result(inputCount);
3254  for (size_t i=0; i<inputCount; ++i)
3255  {
3256  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3257  result[i] = subgraphPtr->tensors[inputId].get();
3258  }
3259  return result;
3260 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3315 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::CreateNetworkFromBinary(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3318 {
3319  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3320  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3321  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3322  return operatorPtr->inputs;
3323 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Definition at line 3531 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

3533 {
3534  CHECK_SUBGRAPH(m_Model, subgraphId);
3535  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3536  for (auto const & input : inputs)
3537  {
3538  if (input.second->name == name)
3539  {
3540  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3541  return std::make_pair(bindingId, ToTensorInfo(input.second));
3542  }
3543  }
3544 
3545  std::stringstream bindings;
3546  for (auto const & input : inputs)
3547  {
3548  bindings << "'" << input.second->name << "' ";
3549  }
3550 
3551  throw ParseException(
3552  fmt::format("No input binding found for subgraph:{} and name:{}. "
3553  "Possible inputs are: [{}] {}",
3554  subgraphId,
3555  name,
3556  bindings.str(),
3557  CHECK_LOCATION().AsString()));
3558 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Definition at line 3560 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

3562 {
3563  CHECK_SUBGRAPH(m_Model, subgraphId);
3564  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3565  for (unsigned int i = 0; i < outputs.size(); ++i)
3566  {
3567  auto const output = outputs[i];
3568  if (output.second->name == name)
3569  {
3570  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
3571  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3572  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3573  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
3574  }
3575  }
3576 
3577  std::stringstream bindings;
3578  for (auto const & output : outputs)
3579  {
3580  bindings << "'" << output.second->name << "' ";
3581  }
3582 
3583  throw ParseException(
3584  fmt::format("No output binding found for subgraph:{} and name:{}. "
3585  "Possible outputs are: [{}] {}",
3586  subgraphId,
3587  name,
3588  bindings.str(),
3589  CHECK_LOCATION().AsString()));
3590 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetOutputs()

TfLiteParserImpl::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3262 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::CreateNetworkFromBinary(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3265 {
3266  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3267 
3268  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3269  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3270 
3271  size_t outputCount = operatorPtr->outputs.size();
3272  TensorRawPtrVector result(outputCount);
3273  for (size_t i=0; i<outputCount; ++i)
3274  {
3275  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3276  CHECK_TENSOR(model, subgraphIndex, outputId);
3277  result[i] = subgraphPtr->tensors[outputId].get();
3278  }
3279  return result;
3280 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3325 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetSubgraphInputs(), TfLiteParserImpl::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::CreateNetworkFromBinary(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3328 {
3329  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3330  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3331  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3332  return operatorPtr->outputs;
3333 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const

Return the number of subgraphs in the parsed model.

Definition at line 3592 of file TfLiteParser.cpp.

3593 {
3594  return m_Model->subgraphs.size();
3595 }

◆ GetSubgraphInputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3282 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkInputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphInputTensorNames().

3284 {
3285  CHECK_SUBGRAPH(model, subgraphIndex);
3286  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3287 
3288  size_t inputCount = subgraphPtr->inputs.size();
3289  TensorIdRawPtrVector result(inputCount);
3290  for (size_t i=0; i<inputCount; ++i)
3291  {
3292  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
3293  CHECK_TENSOR(model, subgraphIndex, inputId);
3294  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
3295  }
3296  return result;
3297 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const

Return the input tensor names for a given subgraph.

Definition at line 3597 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphInputs().

3598 {
3599  CHECK_SUBGRAPH(m_Model, subgraphId);
3600  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3601  std::vector<std::string> result;
3602  result.reserve(inputs.size());
3603  for (auto const & input : inputs)
3604  {
3605  result.push_back(input.second->name);
3606  }
3607  return result;
3608 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)

◆ GetSubgraphOutputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3299 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkOutputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphOutputTensorNames().

3301 {
3302  CHECK_SUBGRAPH(model, subgraphIndex);
3303  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3304 
3305  size_t outputCount = subgraphPtr->outputs.size();
3306  TensorIdRawPtrVector result(outputCount);
3307  for (size_t i=0; i<outputCount; ++i)
3308  {
3309  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3310  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
3311  }
3312  return result;
3313 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const

Return the output tensor names for a given subgraph.

Definition at line 3610 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphOutputs().

3611 {
3612  CHECK_SUBGRAPH(m_Model, subgraphId);
3613  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3614  std::vector<std::string> result;
3615  result.reserve(outputs.size());
3616  for (auto const & output : outputs)
3617  {
3618  result.push_back(output.second->name);
3619  }
3620  return result;
3621 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 3623 of file TfLiteParser.cpp.

References TFLITE_PARSER_VERSION.

3624 {
3625  return TFLITE_PARSER_VERSION;
3626 }
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition: Version.hpp:25

◆ LoadModelFromBinary()

TfLiteParserImpl::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 3224 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParserImpl::CreateNetworkFromBinary(), and TfLiteParserImpl::LoadModelFromFile().

3225 {
3226  if (binaryContent == nullptr)
3227  {
3228  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
3229  CHECK_LOCATION().AsString()));
3230  }
3231  flatbuffers::Verifier verifier(binaryContent, len);
3232  if (verifier.VerifyBuffer<tflite::Model>() == false)
3233  {
3234  throw ParseException(
3235  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3236  "flatbuffers format. size:{} {}",
3237  len,
3238  CHECK_LOCATION().AsString()));
3239  }
3240  return tflite::UnPackModel(binaryContent);
3241 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ LoadModelFromFile()

TfLiteParserImpl::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 3200 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParserImpl::LoadModelFromBinary().

Referenced by TfLiteParserImpl::CreateNetworkFromBinaryFile().

3201 {
3202  if (fileName == nullptr)
3203  {
3204  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
3205  CHECK_LOCATION().AsString()));
3206  }
3207  std::error_code errorCode;
3208  fs::path pathToFile(fileName);
3209  if (!fs::exists(pathToFile, errorCode))
3210  {
3211  //fmt::format() could not be used here (format error)
3212  std::stringstream msg;
3213  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3214  << " " << CHECK_LOCATION().AsString();
3215 
3216  throw FileNotFoundException(msg.str());
3217  }
3218  std::ifstream file(fileName, std::ios::binary);
3219  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3220  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3221  fileContent.size());
3222 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 2112 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, DetectionPostProcessDescriptor::m_DetectionsPerClass, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

2114 {
2115  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2116  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2117 
2118  if (stretchDim != targetDimsIn.end())
2119  {
2120  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2121  {
2122  throw ParseException(
2123  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
2124  }
2125 
2126  auto targetNumElements =
2127  armnn::numeric_cast<unsigned int>(
2128  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2129 
2130  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2131  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2132  }
2133 
2134  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2135 
2136  TensorInfo reshapeInfo = inputTensorInfo;
2137  reshapeInfo.SetShape(outputShape);
2138 
2139  return reshapeInfo;
2140 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( const std::vector< uint32_t > &  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1598 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, TfLiteParserImpl::GetBuffer(), TfLiteParserImpl::GetInputs(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PadList, PadDescriptor::m_PadValue, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::Neg, armnn::NHWC, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and true.

1600 {
1601  CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1602  std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1603  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1604 
1605  if (inputTensorInfo.GetNumDimensions() > 4)
1606  {
1607  std::stringstream ss;
1608  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1609  << " shape:" << inputTensorInfo.GetShape() << " "
1610  << CHECK_LOCATION().AsString();
1611  throw ParseException(ss.str());
1612  }
1613 
1614  if (squeezeDims.empty())
1615  {
1616  squeezeDims.assign(dimensionSequence,
1617  dimensionSequence+inputTensorInfo.GetNumDimensions());
1618  }
1619 
1620  std::vector<uint32_t> outputDims;
1621  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1622  {
1623  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1624  auto currentDimension = inputTensorInfo.GetShape()[i];
1625  if (skipSqueeze || currentDimension != 1)
1626  {
1627  outputDims.push_back(currentDimension);
1628  }
1629  }
1630 
1631  if (outputDims.size() > 4)
1632  {
1633  std::stringstream ss;
1634  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1635  << " shape:" << inputTensorInfo.GetShape() << " "
1636  << CHECK_LOCATION().AsString();
1637  throw ParseException(ss.str());
1638  }
1639 
1640  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1641  outputDims.data());
1642 
1643  // we need to preserve the tensor type and the quantization data as well
1644  TensorInfo outTensorInfo = inputTensorInfo;
1645  outTensorInfo.SetShape(outShape);
1646 
1647  return outTensorInfo;
1648 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

The documentation for this class was generated from the following files: