ArmNN
 22.05
armnnTfLiteParser Namespace Reference

Classes

class  ITfLiteParser
 
class  TfLiteParserImpl
 

Typedefs

using BindingPointInfo = armnn::BindingPointInfo
 
using ITfLiteParserPtr = std::unique_ptr< ITfLiteParser, void(*)(ITfLiteParser *parser)>
 

Functions

std::unique_ptr< float[]> AsFloatArray (TfLiteParserImpl::BufferRawPtr bufferPtr, const TensorInfo &tensorInfo)
 
unsigned int ComputeWrappedIndex (int idx, unsigned int numDimsIn)
 

Typedef Documentation

◆ BindingPointInfo

Definition at line 20 of file ITfLiteParser.hpp.

◆ ITfLiteParserPtr

using ITfLiteParserPtr = std::unique_ptr<ITfLiteParser, void(*)(ITfLiteParser* parser)>

Definition at line 24 of file ITfLiteParser.hpp.

Function Documentation

◆ AsFloatArray()

std::unique_ptr<float[]> armnnTfLiteParser::AsFloatArray ( TfLiteParserImpl::BufferRawPtr  bufferPtr,
const TensorInfo tensorInfo 
)

Definition at line 900 of file TfLiteParser.cpp.

References TensorShape::AreAllDimensionsSpecified(), ARMNN_ASSERT, ARMNN_ASSERT_MSG, CHECK_LOCATION, CHECK_MODEL, CHECK_SUPPORTED_FUSED_ACTIVATION, CHECK_TENSOR, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, TfLiteParserImpl::GetBuffer(), TensorInfo::GetDataType(), armnn::GetDataTypeName(), TfLiteParserImpl::GetInputs(), TfLiteParserImpl::GetInputTensorIds(), TensorInfo::GetNumBytes(), TensorShape::GetNumDimensions(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), armnnUtils::GetNumElementsAfter(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetQuantizationDim(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetQuantizationScales(), TensorInfo::GetShape(), TensorInfo::HasPerAxisQuantization(), SoftmaxDescriptor::m_Beta, Convolution2dDescriptor::m_BiasEnabled, Convolution3dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_BiasEnabled, TransposeConvolution2dDescriptor::m_BiasEnabled, BatchToSpaceNdDescriptor::m_BlockShape, SpaceToBatchNdDescriptor::m_BlockShape, BatchToSpaceNdDescriptor::m_Crops, Pooling2dDescriptor::m_DataLayout, Convolution3dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DataLayout, L2NormalizationDescriptor::m_DataLayout, BatchToSpaceNdDescriptor::m_DataLayout, SpaceToBatchNdDescriptor::m_DataLayout, TransposeConvolution2dDescriptor::m_DataLayout, Convolution3dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationX, Convolution3dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_DilationY, Convolution3dDescriptor::m_DilationZ, TransposeConvolution2dDescriptor::m_OutputShape, TransposeConvolution2dDescriptor::m_OutputShapeEnabled, Pooling2dDescriptor::m_OutputShapeRounding, Convolution3dDescriptor::m_PadBack, Pooling2dDescriptor::m_PadBottom, Convolution3dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadBottom, TransposeConvolution2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PaddingMethod, Convolution3dDescriptor::m_PadFront, Pooling2dDescriptor::m_PadLeft, Convolution3dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadLeft, TransposeConvolution2dDescriptor::m_PadLeft, SpaceToBatchNdDescriptor::m_PadList, Pooling2dDescriptor::m_PadRight, Convolution3dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadRight, TransposeConvolution2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Convolution3dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_PadTop, TransposeConvolution2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Convolution3dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideX, TransposeConvolution2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, Convolution3dDescriptor::m_StrideY, DepthwiseConvolution2dDescriptor::m_StrideY, TransposeConvolution2dDescriptor::m_StrideY, Convolution3dDescriptor::m_StrideZ, ReshapeDescriptor::m_TargetShape, armnn::NDHWC, armnn::NHWC, armnn::numeric_cast(), IOutputSlot::SetTensorInfo(), armnnDeserializer::ToTensorInfo(), and OptionalReferenceSwitch< IsReference, T >::value().

Referenced by TfLiteParserImpl::GetBuffer().

902 {
903  if (tensorInfo.GetDataType() == DataType::QAsymmS8 || tensorInfo.GetDataType() == DataType::QSymmS8 ||
904  tensorInfo.GetDataType() == DataType::QAsymmU8)
905  {
906  std::unique_ptr<float[]> buffer(new float[tensorInfo.GetNumElements()]);
907 
908  if (tensorInfo.HasPerAxisQuantization())
909  {
910  unsigned int axis = tensorInfo.GetQuantizationDim().value();
911  auto axisDimensionality = tensorInfo.GetShape()[axis];
912  auto axisFactor = armnnUtils::GetNumElementsAfter(tensorInfo.GetShape(), axis);
913 
914  for (unsigned int i = 0; i < tensorInfo.GetNumDimensions(); ++i)
915  {
916  unsigned int axisIndex = (i / axisFactor) % axisDimensionality;
917  buffer[i] = Dequantize<int8_t>(bufferPtr->data[i], tensorInfo.GetQuantizationScales()[axisIndex],
918  tensorInfo.GetQuantizationOffset());
919  }
920  }
921  else
922  {
923  for (unsigned int i = 0; i < tensorInfo.GetNumElements(); ++i)
924  {
925  buffer[i] = Dequantize<int8_t>(bufferPtr->data[i], tensorInfo.GetQuantizationScale(),
926  tensorInfo.GetQuantizationOffset());
927  }
928  }
929  return buffer;
930  }
931  throw ParseException(
932  fmt::format("Unsupported input/weights combination: Input {} not supported with Weights {}",
933  GetDataTypeName(DataType::Float32),
934  GetDataTypeName(tensorInfo.GetDataType()),
935  CHECK_LOCATION().AsString()));
936 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
constexpr const char * GetDataTypeName(DataType dataType)
Definition: TypesUtils.hpp:202
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
float GetQuantizationScale() const
Definition: Tensor.cpp:461
DataType GetDataType() const
Definition: Tensor.hpp:198
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
unsigned int GetNumElementsAfter(const armnn::TensorShape &shape, unsigned int axis)
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
unsigned int GetNumElements() const
Definition: Tensor.hpp:196

◆ ComputeWrappedIndex()

unsigned int armnnTfLiteParser::ComputeWrappedIndex ( int  idx,
unsigned int  numDimsIn 
)

Definition at line 3692 of file TfLiteParser.cpp.

References armnn::Abs, armnn::Across, ARMNN_ASSERT, CHECK_LOCATION, CHECK_MODEL, CHECK_VALID_SIZE, CHECKED_NON_NEGATIVE, IOutputSlot::Connect(), armnn::Equal, armnn::Exp, TfLiteParserImpl::GetBuffer(), armnn::GetComparisonOperationAsCString(), TensorInfo::GetDataType(), TfLiteParserImpl::GetInputs(), IConnectableLayer::GetInputSlot(), TfLiteParserImpl::GetInputTensorIds(), IConnectableLayer::GetName(), TensorInfo::GetNumBytes(), TensorInfo::GetNumDimensions(), TensorInfo::GetNumElements(), IConnectableLayer::GetNumOutputSlots(), TfLiteParserImpl::GetOutputs(), IConnectableLayer::GetOutputSlot(), TfLiteParserImpl::GetOutputTensorIds(), TensorInfo::GetShape(), IOutputSlot::GetTensorInfo(), armnn::GetUnaryOperationAsCString(), armnn::Greater, armnn::GreaterOrEqual, armnn::Less, armnn::LessOrEqual, armnn::LocalBrightness, armnn::LogicalNot, ActivationDescriptor::m_A, NormalizationDescriptor::m_Alpha, ArgMinMaxDescriptor::m_Axis, GatherDescriptor::m_Axis, ActivationDescriptor::m_B, NormalizationDescriptor::m_Beta, NormalizationDescriptor::m_DataLayout, ActivationDescriptor::m_Function, ArgMinMaxDescriptor::m_Function, NormalizationDescriptor::m_K, ReduceDescriptor::m_KeepDims, NormalizationDescriptor::m_NormChannelType, NormalizationDescriptor::m_NormMethodType, NormalizationDescriptor::m_NormSize, ComparisonDescriptor::m_Operation, ElementwiseUnaryDescriptor::m_Operation, ReduceDescriptor::m_ReduceOperation, ReduceDescriptor::m_vAxis, armnn::Max, armnn::MaxNumOfTensorDimensions, armnn::Min, armnn::Neg, armnn::NHWC, armnn::NotEqual, armnn::numeric_cast(), armnn::Prod, armnn::Rsqrt, IOutputSlot::SetTensorInfo(), armnn::Signed32, armnn::Signed64, armnn::Sqrt, armnn::Sum, and armnnDeserializer::ToTensorInfo().

3693 {
3694  int numDims = armnn::numeric_cast<int>(numDimsIn);
3695  int v = idx < 0 ? numDims + idx : idx;
3696  ARMNN_ASSERT(v >= 0);
3697  ARMNN_ASSERT(v < numDims);
3698 
3699  return static_cast<unsigned int>(v);
3700 }
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35