ArmNN
 21.05
TfLiteParserImpl Class Reference

#include <TfLiteParser.hpp>

Public Types

using ModelPtr = std::unique_ptr< tflite::ModelT >
 
using SubgraphPtr = std::unique_ptr< tflite::SubGraphT >
 
using OperatorPtr = std::unique_ptr< tflite::OperatorT >
 
using OperatorCodePtr = std::unique_ptr< tflite::OperatorCodeT >
 
using TensorPtr = std::unique_ptr< tflite::TensorT >
 
using TensorRawPtr = const tflite::TensorT *
 
using TensorRawPtrVector = std::vector< TensorRawPtr >
 
using TensorIdRawPtr = std::pair< size_t, TensorRawPtr >
 
using TensorIdRawPtrVector = std::vector< TensorIdRawPtr >
 
using BufferPtr = std::unique_ptr< tflite::BufferT >
 
using BufferRawPtr = const tflite::BufferT *
 

Public Member Functions

armnn::INetworkPtr CreateNetworkFromBinaryFile (const char *graphFile)
 Create the network from a flatbuffers binary file on disk. More...
 
armnn::INetworkPtr CreateNetworkFromBinary (const std::vector< uint8_t > &binaryContent)
 Create the network from a flatbuffers binary. More...
 
BindingPointInfo GetNetworkInputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id. More...
 
BindingPointInfo GetNetworkOutputBindingInfo (size_t subgraphId, const std::string &name) const
 Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id. More...
 
size_t GetSubgraphCount () const
 Return the number of subgraphs in the parsed model. More...
 
std::vector< std::string > GetSubgraphInputTensorNames (size_t subgraphId) const
 Return the input tensor names for a given subgraph. More...
 
std::vector< std::string > GetSubgraphOutputTensorNames (size_t subgraphId) const
 Return the output tensor names for a given subgraph. More...
 
 TfLiteParserImpl (const armnn::Optional< ITfLiteParser::TfLiteParserOptions > &options=armnn::EmptyOptional())
 
 ~TfLiteParserImpl ()=default
 

Static Public Member Functions

static ModelPtr LoadModelFromFile (const char *fileName)
 
static ModelPtr LoadModelFromBinary (const uint8_t *binaryContent, size_t len)
 
static TensorRawPtrVector GetInputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorRawPtrVector GetOutputs (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static TensorIdRawPtrVector GetSubgraphInputs (const ModelPtr &model, size_t subgraphIndex)
 
static TensorIdRawPtrVector GetSubgraphOutputs (const ModelPtr &model, size_t subgraphIndex)
 
static std::vector< int32_t > & GetInputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static std::vector< int32_t > & GetOutputTensorIds (const ModelPtr &model, size_t subgraphIndex, size_t operatorIndex)
 
static BufferRawPtr GetBuffer (const ModelPtr &model, size_t bufferIndex)
 
static armnn::TensorInfo OutputShapeOfSqueeze (const std::vector< uint32_t > &squeezeDims, const armnn::TensorInfo &inputTensorInfo)
 
static armnn::TensorInfo OutputShapeOfReshape (const armnn::TensorInfo &inputTensorInfo, const std::vector< int32_t > &targetDimsIn)
 
static const std::string GetVersion ()
 Retrieve version in X.Y.Z form. More...
 

Detailed Description

Definition at line 19 of file TfLiteParser.hpp.

Member Typedef Documentation

◆ BufferPtr

using BufferPtr = std::unique_ptr<tflite::BufferT>

Definition at line 32 of file TfLiteParser.hpp.

◆ BufferRawPtr

using BufferRawPtr = const tflite::BufferT *

Definition at line 33 of file TfLiteParser.hpp.

◆ ModelPtr

using ModelPtr = std::unique_ptr<tflite::ModelT>

Definition at line 23 of file TfLiteParser.hpp.

◆ OperatorCodePtr

using OperatorCodePtr = std::unique_ptr<tflite::OperatorCodeT>

Definition at line 26 of file TfLiteParser.hpp.

◆ OperatorPtr

using OperatorPtr = std::unique_ptr<tflite::OperatorT>

Definition at line 25 of file TfLiteParser.hpp.

◆ SubgraphPtr

using SubgraphPtr = std::unique_ptr<tflite::SubGraphT>

Definition at line 24 of file TfLiteParser.hpp.

◆ TensorIdRawPtr

using TensorIdRawPtr = std::pair<size_t, TensorRawPtr>

Definition at line 30 of file TfLiteParser.hpp.

◆ TensorIdRawPtrVector

using TensorIdRawPtrVector = std::vector<TensorIdRawPtr>

Definition at line 31 of file TfLiteParser.hpp.

◆ TensorPtr

using TensorPtr = std::unique_ptr<tflite::TensorT>

Definition at line 27 of file TfLiteParser.hpp.

◆ TensorRawPtr

using TensorRawPtr = const tflite::TensorT *

Definition at line 28 of file TfLiteParser.hpp.

◆ TensorRawPtrVector

using TensorRawPtrVector = std::vector<TensorRawPtr>

Definition at line 29 of file TfLiteParser.hpp.

Constructor & Destructor Documentation

◆ TfLiteParserImpl()

Definition at line 604 of file TfLiteParser.cpp.

605 : m_Options(options)
606 , m_Network(nullptr, nullptr)
607 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
608 {
609  // register supported operators
610  m_ParserFunctions[tflite::BuiltinOperator_ABS] = &TfLiteParserImpl::ParseAbs;
611  m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParserImpl::ParseAdd;
612  m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN] = &TfLiteParserImpl::ParseArgMin;
613  m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX] = &TfLiteParserImpl::ParseArgMax;
614  m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParserImpl::ParseAveragePool2D;
615  m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParserImpl::ParseBatchToSpaceND;
616  m_ParserFunctions[tflite::BuiltinOperator_CAST] = &TfLiteParserImpl::ParseCast;
617  m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParserImpl::ParseConcatenation;
618  m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParserImpl::ParseConv2D;
619  m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParserImpl::ParseCustomOperator;
620  m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE] = &TfLiteParserImpl::ParseDepthToSpace;
621  m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParserImpl::ParseDepthwiseConv2D;
622  m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE] = &TfLiteParserImpl::ParseDequantize;
623  m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParserImpl::ParseDiv;
624  m_ParserFunctions[tflite::BuiltinOperator_ELU] = &TfLiteParserImpl::ParseElu;
625  m_ParserFunctions[tflite::BuiltinOperator_EXP] = &TfLiteParserImpl::ParseExp;
626  m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParserImpl::ParseFullyConnected;
627  m_ParserFunctions[tflite::BuiltinOperator_GATHER] = &TfLiteParserImpl::ParseGather;
628  m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH] = &TfLiteParserImpl::ParseHardSwish;
629  m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU] = &TfLiteParserImpl::ParseLeakyRelu;
630  m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT] = &TfLiteParserImpl::ParseLogicalNot;
631  m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParserImpl::ParseLogistic;
632  m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParserImpl::ParseL2Normalization;
633  m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParserImpl::ParseMaxPool2D;
634  m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParserImpl::ParseMaximum;
635  m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParserImpl::ParseMean;
636  m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParserImpl::ParseMinimum;
637  m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParserImpl::ParseMul;
638  m_ParserFunctions[tflite::BuiltinOperator_NEG] = &TfLiteParserImpl::ParseNeg;
639  m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParserImpl::ParsePack;
640  m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParserImpl::ParsePad;
641  m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE] = &TfLiteParserImpl::ParseQuantize;
642  m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParserImpl::ParseRelu;
643  m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParserImpl::ParseRelu6;
644  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MAX] = &TfLiteParserImpl::ParseReduceMax;
645  m_ParserFunctions[tflite::BuiltinOperator_REDUCE_MIN] = &TfLiteParserImpl::ParseReduceMin;
646  m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParserImpl::ParseReshape;
647  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParserImpl::ParseResizeBilinear;
648  m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
649  m_ParserFunctions[tflite::BuiltinOperator_RSQRT] = &TfLiteParserImpl::ParseRsqrt;
650  m_ParserFunctions[tflite::BuiltinOperator_SLICE] = &TfLiteParserImpl::ParseSlice;
651  m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParserImpl::ParseSoftmax;
652  m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParserImpl::ParseSpaceToBatchND;
653  m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParserImpl::ParseSplit;
654  m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V] = &TfLiteParserImpl::ParseSplitV;
655  m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParserImpl::ParseSqueeze;
656  m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParserImpl::ParseStridedSlice;
657  m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParserImpl::ParseSub;
658  m_ParserFunctions[tflite::BuiltinOperator_SUM] = &TfLiteParserImpl::ParseSum;
659  m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParserImpl::ParseTanH;
660  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParserImpl::ParseTranspose;
661  m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParserImpl::ParseTransposeConv;
662  m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParserImpl::ParseUnpack;
663 
664  // register supported custom operators
665  m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParserImpl::ParseDetectionPostProcess;
666 }

◆ ~TfLiteParserImpl()

~TfLiteParserImpl ( )
default

Member Function Documentation

◆ CreateNetworkFromBinary()

INetworkPtr CreateNetworkFromBinary ( const std::vector< uint8_t > &  binaryContent)

Create the network from a flatbuffers binary.

Definition at line 682 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, ARMNN_LOG, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnn::error, TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), TfLiteParserImpl::LoadModelFromBinary(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, TransposeConvolution2dDescriptor::m_OutputShape, TransposeConvolution2dDescriptor::m_OutputShapeEnabled, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Pooling2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution2dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, armnn::NHWC, armnn::numeric_cast(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and Exception::what().

683 {
684  ResetParser();
685  m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
686  return CreateNetworkFromModel();
687 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)

◆ CreateNetworkFromBinaryFile()

INetworkPtr CreateNetworkFromBinaryFile ( const char *  graphFile)

Create the network from a flatbuffers binary file on disk.

Definition at line 675 of file TfLiteParser.cpp.

References TfLiteParserImpl::LoadModelFromFile().

676 {
677  ResetParser();
678  m_Model = LoadModelFromFile(graphFile);
679  return CreateNetworkFromModel();
680 }
static ModelPtr LoadModelFromFile(const char *fileName)

◆ GetBuffer()

TfLiteParserImpl::BufferRawPtr GetBuffer ( const ModelPtr model,
size_t  bufferIndex 
)
static

◆ GetInputs()

TfLiteParserImpl::TensorRawPtrVector GetInputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3357 of file TfLiteParser.cpp.

References CHECK_MODEL, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::CreateNetworkFromBinary(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3360 {
3361  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3362 
3363  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3364  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3365 
3366  size_t inputCount = operatorPtr->inputs.size();
3367  TensorRawPtrVector result;
3368  for (size_t i=0; i<inputCount; ++i)
3369  {
3370  // If the input location is -1 then assume input is turned off.
3371  if (operatorPtr->inputs[i] == -1)
3372  {
3373  continue;
3374  }
3375  else
3376  {
3377  uint32_t inputId = CHECKED_NON_NEGATIVE(operatorPtr->inputs[i]);
3378  result.push_back(subgraphPtr->tensors[inputId].get());
3379  }
3380  }
3381  return result;
3382 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetInputTensorIds()

std::vector< int32_t > & GetInputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3437 of file TfLiteParser.cpp.

References CHECK_MODEL.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::CreateNetworkFromBinary(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3440 {
3441  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3442  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3443  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3444  return operatorPtr->inputs;
3445 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetNetworkInputBindingInfo()

BindingPointInfo GetNetworkInputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name and subgraph id.

Definition at line 3677 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphInputs(), and armnnDeserializer::ToTensorInfo().

3679 {
3680  CHECK_SUBGRAPH(m_Model, subgraphId);
3681  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3682  for (auto const & input : inputs)
3683  {
3684  if (input.second->name == name)
3685  {
3686  auto bindingId = GenerateLayerBindingId(subgraphId, input.first);
3687  return std::make_pair(bindingId, ToTensorInfo(input.second));
3688  }
3689  }
3690 
3691  std::stringstream bindings;
3692  for (auto const & input : inputs)
3693  {
3694  bindings << "'" << input.second->name << "' ";
3695  }
3696 
3697  throw ParseException(
3698  fmt::format("No input binding found for subgraph:{} and name:{}. "
3699  "Possible inputs are: [{}] {}",
3700  subgraphId,
3701  name,
3702  bindings.str(),
3703  CHECK_LOCATION().AsString()));
3704 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetNetworkOutputBindingInfo()

BindingPointInfo GetNetworkOutputBindingInfo ( size_t  subgraphId,
const std::string &  name 
) const

Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name and subgraph id.

Definition at line 3706 of file TfLiteParser.cpp.

References CHECK_LOCATION, CHECK_SUBGRAPH, TfLiteParserImpl::GetSubgraphOutputs(), and armnnDeserializer::ToTensorInfo().

3708 {
3709  CHECK_SUBGRAPH(m_Model, subgraphId);
3710  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3711  for (unsigned int i = 0; i < outputs.size(); ++i)
3712  {
3713  auto const output = outputs[i];
3714  if (output.second->name == name)
3715  {
3716  auto bindingId = GenerateLayerBindingId(subgraphId, output.first);
3717  std::vector<unsigned int> shape = m_OverridenOutputShapes.size() > 0 ?
3718  m_OverridenOutputShapes[i] : AsUnsignedVector(output.second->shape);
3719  return std::make_pair(bindingId, ToTensorInfo(output.second, shape));
3720  }
3721  }
3722 
3723  std::stringstream bindings;
3724  for (auto const & output : outputs)
3725  {
3726  bindings << "'" << output.second->name << "' ";
3727  }
3728 
3729  throw ParseException(
3730  fmt::format("No output binding found for subgraph:{} and name:{}. "
3731  "Possible outputs are: [{}] {}",
3732  subgraphId,
3733  name,
3734  bindings.str(),
3735  CHECK_LOCATION().AsString()));
3736 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
armnn::TensorInfo ToTensorInfo(TensorRawPtr tensorPtr)

◆ GetOutputs()

TfLiteParserImpl::TensorRawPtrVector GetOutputs ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3384 of file TfLiteParser.cpp.

References CHECK_MODEL, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::CreateNetworkFromBinary(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3387 {
3388  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3389 
3390  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3391  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3392 
3393  size_t outputCount = operatorPtr->outputs.size();
3394  TensorRawPtrVector result(outputCount);
3395  for (size_t i=0; i<outputCount; ++i)
3396  {
3397  uint32_t outputId = CHECKED_NON_NEGATIVE(operatorPtr->outputs[i]);
3398  CHECK_TENSOR(model, subgraphIndex, outputId);
3399  result[i] = subgraphPtr->tensors[outputId].get();
3400  }
3401  return result;
3402 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECKED_NON_NEGATIVE(VALUE)
std::vector< TensorRawPtr > TensorRawPtrVector

◆ GetOutputTensorIds()

std::vector< int32_t > & GetOutputTensorIds ( const ModelPtr model,
size_t  subgraphIndex,
size_t  operatorIndex 
)
static

Definition at line 3447 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_SUBGRAPH, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetSubgraphInputs(), TfLiteParserImpl::GetSubgraphOutputs(), IOutputSlot::SetTensorInfo(), and armnnDeserializer::ToTensorInfo().

Referenced by armnnTfLiteParser::ComputeWrappedIndex(), TfLiteParserImpl::CreateNetworkFromBinary(), TfLiteParserImpl::OutputShapeOfReshape(), and TfLiteParserImpl::OutputShapeOfSqueeze().

3450 {
3451  CHECK_MODEL(model, subgraphIndex, operatorIndex);
3452  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3453  const auto & operatorPtr = subgraphPtr->operators[operatorIndex];
3454  return operatorPtr->outputs;
3455 }
#define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX)

◆ GetSubgraphCount()

size_t GetSubgraphCount ( ) const

Return the number of subgraphs in the parsed model.

Definition at line 3738 of file TfLiteParser.cpp.

3739 {
3740  return m_Model->subgraphs.size();
3741 }

◆ GetSubgraphInputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphInputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3404 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, CHECK_TENSOR, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkInputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphInputTensorNames().

3406 {
3407  CHECK_SUBGRAPH(model, subgraphIndex);
3408  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3409 
3410  size_t inputCount = subgraphPtr->inputs.size();
3411  TensorIdRawPtrVector result(inputCount);
3412  for (size_t i=0; i<inputCount; ++i)
3413  {
3414  uint32_t inputId = CHECKED_NON_NEGATIVE(subgraphPtr->inputs[i]);
3415  CHECK_TENSOR(model, subgraphIndex, inputId);
3416  result[i] = std::make_pair(inputId, subgraphPtr->tensors[inputId].get());
3417  }
3418  return result;
3419 }
#define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphInputTensorNames()

std::vector< std::string > GetSubgraphInputTensorNames ( size_t  subgraphId) const

Return the input tensor names for a given subgraph.

Definition at line 3743 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphInputs().

3744 {
3745  CHECK_SUBGRAPH(m_Model, subgraphId);
3746  auto inputs = GetSubgraphInputs(m_Model, subgraphId);
3747  std::vector<std::string> result;
3748  result.reserve(inputs.size());
3749  for (auto const & input : inputs)
3750  {
3751  result.push_back(input.second->name);
3752  }
3753  return result;
3754 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
static TensorIdRawPtrVector GetSubgraphInputs(const ModelPtr &model, size_t subgraphIndex)

◆ GetSubgraphOutputs()

TfLiteParserImpl::TensorIdRawPtrVector GetSubgraphOutputs ( const ModelPtr model,
size_t  subgraphIndex 
)
static

Definition at line 3421 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and CHECKED_NON_NEGATIVE.

Referenced by TfLiteParserImpl::GetNetworkOutputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), and TfLiteParserImpl::GetSubgraphOutputTensorNames().

3423 {
3424  CHECK_SUBGRAPH(model, subgraphIndex);
3425  const auto & subgraphPtr = model->subgraphs[subgraphIndex];
3426 
3427  size_t outputCount = subgraphPtr->outputs.size();
3428  TensorIdRawPtrVector result(outputCount);
3429  for (size_t i=0; i<outputCount; ++i)
3430  {
3431  uint32_t outputId = CHECKED_NON_NEGATIVE(subgraphPtr->outputs[i]);
3432  result[i] = std::make_pair(outputId, subgraphPtr->tensors[outputId].get());
3433  }
3434  return result;
3435 }
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)
std::vector< TensorIdRawPtr > TensorIdRawPtrVector
#define CHECKED_NON_NEGATIVE(VALUE)

◆ GetSubgraphOutputTensorNames()

std::vector< std::string > GetSubgraphOutputTensorNames ( size_t  subgraphId) const

Return the output tensor names for a given subgraph.

Definition at line 3756 of file TfLiteParser.cpp.

References CHECK_SUBGRAPH, and TfLiteParserImpl::GetSubgraphOutputs().

3757 {
3758  CHECK_SUBGRAPH(m_Model, subgraphId);
3759  auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
3760  std::vector<std::string> result;
3761  result.reserve(outputs.size());
3762  for (auto const & output : outputs)
3763  {
3764  result.push_back(output.second->name);
3765  }
3766  return result;
3767 }
static TensorIdRawPtrVector GetSubgraphOutputs(const ModelPtr &model, size_t subgraphIndex)
#define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX)

◆ GetVersion()

const std::string GetVersion ( )
static

Retrieve version in X.Y.Z form.

Definition at line 3769 of file TfLiteParser.cpp.

References TFLITE_PARSER_VERSION.

3770 {
3771  return TFLITE_PARSER_VERSION;
3772 }
#define TFLITE_PARSER_VERSION
TFLITE_PARSER_VERSION: "X.Y.Z" where: X = Major version number Y = Minor version number Z = Patch ver...
Definition: Version.hpp:25

◆ LoadModelFromBinary()

TfLiteParserImpl::ModelPtr LoadModelFromBinary ( const uint8_t *  binaryContent,
size_t  len 
)
static

Definition at line 3338 of file TfLiteParser.cpp.

References CHECK_LOCATION.

Referenced by TfLiteParserImpl::CreateNetworkFromBinary(), and TfLiteParserImpl::LoadModelFromFile().

3339 {
3340  if (binaryContent == nullptr)
3341  {
3342  throw InvalidArgumentException(fmt::format("Invalid (null) binary content {}",
3343  CHECK_LOCATION().AsString()));
3344  }
3345  flatbuffers::Verifier verifier(binaryContent, len);
3346  if (verifier.VerifyBuffer<tflite::Model>() == false)
3347  {
3348  throw ParseException(
3349  fmt::format("Buffer doesn't conform to the expected Tensorflow Lite "
3350  "flatbuffers format. size:{} {}",
3351  len,
3352  CHECK_LOCATION().AsString()));
3353  }
3354  return tflite::UnPackModel(binaryContent);
3355 }
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ LoadModelFromFile()

TfLiteParserImpl::ModelPtr LoadModelFromFile ( const char *  fileName)
static

Definition at line 3314 of file TfLiteParser.cpp.

References CHECK_LOCATION, and TfLiteParserImpl::LoadModelFromBinary().

Referenced by TfLiteParserImpl::CreateNetworkFromBinaryFile().

3315 {
3316  if (fileName == nullptr)
3317  {
3318  throw InvalidArgumentException(fmt::format("Invalid (null) file name {}",
3319  CHECK_LOCATION().AsString()));
3320  }
3321  std::error_code errorCode;
3322  fs::path pathToFile(fileName);
3323  if (!fs::exists(pathToFile, errorCode))
3324  {
3325  //fmt::format() could not be used here (format error)
3326  std::stringstream msg;
3327  msg << "Cannot find the file (" << fileName << ") errorCode: " << errorCode
3328  << " " << CHECK_LOCATION().AsString();
3329 
3330  throw FileNotFoundException(msg.str());
3331  }
3332  std::ifstream file(fileName, std::ios::binary);
3333  std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
3334  return LoadModelFromBinary(reinterpret_cast<const uint8_t *>(fileContent.c_str()),
3335  fileContent.size());
3336 }
static ModelPtr LoadModelFromBinary(const uint8_t *binaryContent, size_t len)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197

◆ OutputShapeOfReshape()

armnn::TensorInfo OutputShapeOfReshape ( const armnn::TensorInfo inputTensorInfo,
const std::vector< int32_t > &  targetDimsIn 
)
static

Definition at line 2084 of file TfLiteParser.cpp.

References ARMNN_ASSERT, ARMNN_THROW_PARSE_EXCEPTION, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, armnnDeserializer::CheckShape(), IOutputSlot::Connect(), TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetUnsignedAxis(), StackDescriptor::m_Axis, FullyConnectedDescriptor::m_BiasEnabled, FullyConnectedDescriptor::m_ConstantWeights, DetectionPostProcessDescriptor::m_DetectionsPerClass, StackDescriptor::m_InputShape, DetectionPostProcessDescriptor::m_MaxClassesPerDetection, DetectionPostProcessDescriptor::m_MaxDetections, ResizeDescriptor::m_Method, DetectionPostProcessDescriptor::m_NmsIouThreshold, DetectionPostProcessDescriptor::m_NmsScoreThreshold, DetectionPostProcessDescriptor::m_NumClasses, StackDescriptor::m_NumInputs, DetectionPostProcessDescriptor::m_ScaleH, DetectionPostProcessDescriptor::m_ScaleW, DetectionPostProcessDescriptor::m_ScaleX, DetectionPostProcessDescriptor::m_ScaleY, ReshapeDescriptor::m_TargetShape, FullyConnectedDescriptor::m_TransposeWeightMatrix, DetectionPostProcessDescriptor::m_UseRegularNms, armnn::MaxNumOfTensorDimensions, armnn::NHWC, armnn::numeric_cast(), armnnUtils::ProcessConcatInputTensorInfo(), OriginsDescriptor::SetConcatAxis(), TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), ViewsDescriptor::SetViewOriginCoord(), ViewsDescriptor::SetViewSize(), and armnnDeserializer::ToTensorInfo().

2086 {
2087  std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
2088  const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
2089 
2090  if (stretchDim != targetDimsIn.end())
2091  {
2092  if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
2093  {
2094  throw ParseException(
2095  fmt::format("At most one component of shape can be -1 {}", CHECK_LOCATION().AsString()));
2096  }
2097 
2098  auto targetNumElements =
2099  armnn::numeric_cast<unsigned int>(
2100  std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
2101 
2102  auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
2103  outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
2104  }
2105 
2106  TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
2107 
2108  TensorInfo reshapeInfo = inputTensorInfo;
2109  reshapeInfo.SetShape(outputShape);
2110 
2111  return reshapeInfo;
2112 }
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
unsigned int GetNumElements() const
Definition: Tensor.hpp:192

◆ OutputShapeOfSqueeze()

armnn::TensorInfo OutputShapeOfSqueeze ( const std::vector< uint32_t > &  squeezeDims,
const armnn::TensorInfo inputTensorInfo 
)
static

Definition at line 1595 of file TfLiteParser.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, TfLiteParserImpl::GetBuffer(), TfLiteParserImpl::GetInputs(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, MeanDescriptor::m_Axis, ActivationDescriptor::m_B, StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_DataLayout, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, ActivationDescriptor::m_Function, MeanDescriptor::m_KeepDims, StridedSliceDescriptor::m_NewAxisMask, PadDescriptor::m_PadList, PadDescriptor::m_PadValue, StridedSliceDescriptor::m_ShrinkAxisMask, StridedSliceDescriptor::m_Stride, ReshapeDescriptor::m_TargetShape, armnn::NHWC, TensorInfo::SetShape(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and true.

1597 {
1598  CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
1599  std::vector<uint32_t> squeezeDims = squeezeDimsIn;
1600  static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1601 
1602  if (inputTensorInfo.GetNumDimensions() > 4)
1603  {
1604  std::stringstream ss;
1605  ss << "Input tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1606  << " shape:" << inputTensorInfo.GetShape() << " "
1607  << CHECK_LOCATION().AsString();
1608  throw ParseException(ss.str());
1609  }
1610 
1611  if (squeezeDims.empty())
1612  {
1613  squeezeDims.assign(dimensionSequence,
1614  dimensionSequence+inputTensorInfo.GetNumDimensions());
1615  }
1616 
1617  std::vector<uint32_t> outputDims;
1618  for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1619  {
1620  bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1621  auto currentDimension = inputTensorInfo.GetShape()[i];
1622  if (skipSqueeze || currentDimension != 1)
1623  {
1624  outputDims.push_back(currentDimension);
1625  }
1626  }
1627 
1628  if (outputDims.size() > 4)
1629  {
1630  std::stringstream ss;
1631  ss << "Output tensor has unexpected number of dimensions:" << inputTensorInfo.GetNumDimensions()
1632  << " shape:" << inputTensorInfo.GetShape() << " "
1633  << CHECK_LOCATION().AsString();
1634  throw ParseException(ss.str());
1635  }
1636 
1637  TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1638  outputDims.data());
1639 
1640  // we need to preserve the tensor type and the quantization data as well
1641  TensorInfo outTensorInfo = inputTensorInfo;
1642  outTensorInfo.SetShape(outShape);
1643 
1644  return outTensorInfo;
1645 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:189
#define CHECK_VALID_SIZE(ACTUAL,...)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:191

The documentation for this class was generated from the following files: