ArmNN
 22.11
TensorInfo Class Reference

#include <Tensor.hpp>

Public Member Functions

 TensorInfo ()
 Empty (invalid) constructor. More...
 
 TensorInfo (const TensorShape &shape, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0, bool isConstant=false)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0, bool isConstant=false)
 
 TensorInfo (const TensorShape &shape, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim, bool isConstant=false)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim, bool isConstant=false)
 
 TensorInfo (const TensorInfo &other)
 
TensorInfooperator= (const TensorInfo &other)
 
bool operator== (const TensorInfo &other) const
 
bool operator!= (const TensorInfo &other) const
 
const TensorShapeGetShape () const
 
TensorShapeGetShape ()
 
void SetShape (const TensorShape &newShape)
 
unsigned int GetNumDimensions () const
 
unsigned int GetNumElements () const
 
DataType GetDataType () const
 
void SetDataType (DataType type)
 
bool HasMultipleQuantizationScales () const
 
bool HasPerAxisQuantization () const
 
std::vector< float > GetQuantizationScales () const
 
void SetQuantizationScales (const std::vector< float > &scales)
 
float GetQuantizationScale () const
 
void SetQuantizationScale (float scale)
 
int32_t GetQuantizationOffset () const
 
void SetQuantizationOffset (int32_t offset)
 
Optional< unsigned int > GetQuantizationDim () const
 
void SetQuantizationDim (const Optional< unsigned int > &quantizationDim)
 
bool IsQuantized () const
 
bool IsConstant () const
 
void SetConstant (const bool IsConstant=true)
 Marks the data corresponding to this tensor info as constant. More...
 
bool IsTypeSpaceMatch (const TensorInfo &other) const
 Check that the types are the same and, if quantize, that the quantization parameters are the same. More...
 
unsigned int GetNumBytes () const
 

Detailed Description

Constructor & Destructor Documentation

◆ TensorInfo() [1/6]

Empty (invalid) constructor.

Definition at line 341 of file Tensor.cpp.

342 : m_DataType(DataType::Float32), m_IsConstant(false)
343 {
344 }

◆ TensorInfo() [2/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
float  quantizationScale = 0.0f,
int32_t  quantizationOffset = 0,
bool  isConstant = false 
)

Definition at line 346 of file Tensor.cpp.

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

351  : m_Shape(shape)
352  , m_DataType(dataType)
353  , m_IsConstant(isConstant)
354 {
355  SetQuantizationScale(quantizationScale);
356  SetQuantizationOffset(quantizationOffset);
357 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489

◆ TensorInfo() [3/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
float  quantizationScale = 0.0f,
int32_t  quantizationOffset = 0,
bool  isConstant = false 
)

Definition at line 359 of file Tensor.cpp.

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

365  : m_Shape(numDimensions, dimensionSizes), m_DataType(dataType), m_IsConstant(isConstant)
366 {
367  SetQuantizationScale(quantizationScale);
368  SetQuantizationOffset(quantizationOffset);
369 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489

◆ TensorInfo() [4/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim,
bool  isConstant = false 
)

Definition at line 371 of file Tensor.cpp.

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

376  : m_Shape(shape)
377  , m_DataType(dataType)
378  , m_IsConstant(isConstant)
379 {
380  SetQuantizationScales(quantizationScales);
381  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
382 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:499
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:456

◆ TensorInfo() [5/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim,
bool  isConstant = false 
)

Definition at line 384 of file Tensor.cpp.

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

390  : m_Shape(numDimensions, dimensionSizes)
391  , m_DataType(dataType)
392  , m_IsConstant(isConstant)
393 {
394  SetQuantizationScales(quantizationScales);
395  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
396 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:499
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:456

◆ TensorInfo() [6/6]

TensorInfo ( const TensorInfo other)

Definition at line 398 of file Tensor.cpp.

399 : m_Shape(other.m_Shape)
400 , m_DataType(other.m_DataType)
401 , m_IsConstant(other.m_IsConstant)
402 , m_Quantization(other.m_Quantization)
403 {}

Member Function Documentation

◆ GetDataType()

DataType GetDataType ( ) const
inline

Definition at line 198 of file Tensor.hpp.

Referenced by armnnTfLiteParser::AsFloatArray(), BiasAndWeightsTypesCompatible::BiasAndWeightsTypesCompatible(), BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), RefTensorHandle::CanBeImported(), TosaRefTensorHandle::CanBeImported(), armnn::CheckScaleSetOnQuantizedType(), armnn::ClConvertFp16ToFp32WorkloadValidate(), armnn::ClConvertFp32ToFp16WorkloadValidate(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::ConvertBf16ToFp32Weight(), Converter::ConvertOperation(), armnn_driver::ConvertToLayerInputHandle(), armnn::optimizations::ConvertWeight(), RefDepthToSpaceWorkload::ExecuteAsync(), RefStridedSliceWorkload::ExecuteAsync(), RefArgMinMaxWorkload::ExecuteAsync(), RefSliceWorkload::ExecuteAsync(), RefShapeWorkload::ExecuteAsync(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::GetBiasDataType(), TfLiteParserImpl::GetBuffer(), Layer::GetDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnn::optimizations::pad_fold::GetLowestElement(), TfLiteParserImpl::GetOutputTensorIds(), armnnUtils::GetPerAxisParams(), armnn::GetUnpaddedTensorStrides(), armnn::InitializeArmComputeTensorData(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), SampleDynamicLayerSupport::IsAdditionSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), ClLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQLstmSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::LstmImpl(), armnn::MakeDecoder(), armnn::MakeEncoder(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::PermuteTensor(), ConvertFp32NetworkToFp16Impl::Run(), ConvertConstPermuteLayersToConstLayers::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), armnn_driver::SwizzleAndroidNn4dTensorToArmNn(), TypeAnyOf::TypeAnyOf(), TypeIs::TypeIs(), ArgMinMaxQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), Convolution2dQueueDescriptor::Validate(), Convolution3dQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), QuantizeQueueDescriptor::Validate(), EqualQueueDescriptor::Validate(), ConvertBf16ToFp32QueueDescriptor::Validate(), ConvertFp32ToBf16QueueDescriptor::Validate(), ConvertFp16ToFp32QueueDescriptor::Validate(), ConvertFp32ToFp16QueueDescriptor::Validate(), GreaterQueueDescriptor::Validate(), GatherNdQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), TransposeConvolution2dQueueDescriptor::Validate(), ComparisonQueueDescriptor::Validate(), LogicalBinaryQueueDescriptor::Validate(), and armnn::VerifyTensorInfoDataType().

198 { return m_DataType; }

◆ GetNumBytes()

◆ GetNumDimensions()

unsigned int GetNumDimensions ( ) const
inline

Definition at line 195 of file Tensor.hpp.

Referenced by armnn::ArgMinMax(), armnnTfLiteParser::AsFloatArray(), BatchMatMul::BatchMatMul(), armnn::CalculateGatherNdKeyIndices(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::ComputeAclAxis(), armnn::ComputePositiveAxis(), armnn::ComputeReductionTensorShape(), armnn::ComputeSoftmaxAclAxis(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), Converter::ConvertOperation(), armnn_driver::ConvertReduce(), RefShapeWorkload::ExecuteAsync(), RefRankWorkload::ExecuteAsync(), armnn::Gather(), armnn::GetBiasDataType(), armnn::GetNumActivations(), RefLayerSupport::IsBatchToSpaceNdSupported(), armnn_driver::IsDynamicTensor(), RefLayerSupport::IsMeanSupported(), armnn::LogSoftmax(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::PrintOutput(), armnnUtils::ProcessConcatInputTensorInfo(), armnn::Reduce(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), AddBroadcastReshapeLayerImpl::Run(), armnn::Softmax(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn_driver::SwizzleAndroidNn4dTensorToArmNn(), TensorNumDimensionsAreCorrect::TensorNumDimensionsAreCorrect(), TensorNumDimensionsAreGreaterOrEqualTo::TensorNumDimensionsAreGreaterOrEqualTo(), FullyConnectedQueueDescriptor::Validate(), MeanQueueDescriptor::Validate(), PadQueueDescriptor::Validate(), InstanceNormalizationQueueDescriptor::Validate(), L2NormalizationQueueDescriptor::Validate(), StridedSliceQueueDescriptor::Validate(), GatherNdQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), BatchMatMulQueueDescriptor::Validate(), QueueDescriptor::ValidateTensorNumDimensions(), FillLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), and MeanLayer::ValidateTensorShapesFromInputs().

195 { return m_Shape.GetNumDimensions(); }
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174

◆ GetNumElements()

unsigned int GetNumElements ( ) const
inline
Examples:
CustomMemoryAllocatorSample.cpp.

Definition at line 196 of file Tensor.hpp.

Referenced by armnn::Activation(), armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::ConvertBf16ToFp32Weight(), armnn::optimizations::ConvertWeight(), armnnOnnxParser::CreateConstTensorImpl(), armnn::Dequantize(), armnn::DetectionPostProcess(), SampleDynamicAdditionWorkload::Execute(), RefGatherNdWorkload::ExecuteAsync(), RefConvertFp32ToBf16Workload::ExecuteAsync(), RefConvertFp32ToFp16Workload::ExecuteAsync(), RefFakeQuantizationFloat32Workload::ExecuteAsync(), RefFloorWorkload::ExecuteAsync(), RefConvertBf16ToFp32Workload::ExecuteAsync(), RefConvertFp16ToFp32Workload::ExecuteAsync(), RefCastWorkload::ExecuteAsync(), RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(), RefDebugWorkload< DataType >::ExecuteAsync(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), armnn::GetBiasDataType(), TensorInfo::GetNumBytes(), armnn::MirrorPad(), IDeserializer::DeserializerImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::Pad(), armnn::PrintOutput(), armnn::Quantize(), armnn::Reduce(), FuseConvertFp32ToBf16IntoConstLayers::Run(), ConvertConstPermuteLayersToConstLayers::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), ShapesAreSameTotalSize::ShapesAreSameTotalSize(), armnn::Splitter(), armnn::Stack(), armnnDeserializer::ToConstTensor(), and SpaceToBatchNdQueueDescriptor::Validate().

196 { return m_Shape.GetNumElements(); }
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:181

◆ GetQuantizationDim()

Optional< unsigned int > GetQuantizationDim ( ) const

◆ GetQuantizationOffset()

◆ GetQuantizationScale()

◆ GetQuantizationScales()

std::vector< float > GetQuantizationScales ( ) const

◆ GetShape() [1/2]

const TensorShape& GetShape ( ) const
inline

Definition at line 191 of file Tensor.hpp.

Referenced by armnn::ArgMinMax(), ArmnnPreparedModel::ArmnnPreparedModel(), armnnTfLiteParser::AsFloatArray(), BatchMatMul::BatchMatMul(), armnn::BatchNormImpl(), armnn::BatchToSpaceNd(), armnn::CalculateGatherNdKeyIndices(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn_driver::ComputeShape(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), armnn::Convert1HWOTensorInfoToAcl(), armnn::Convert1HWOTensorToAcl(), armnn::Convert1HWOtoMIHW(), armnn::ConvertBf16ToFp32Weight(), Converter::ConvertOperation(), armnn_driver::ConvertPooling2d(), armnn::ConvertWeightTensorFromArmnnToAcl(), armnn::CreateAclNormalizationLayerInfoForL2Normalization(), armnnOnnxParser::CreateConstTensorImpl(), OnnxParserImpl::CreateNetworkFromString(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::DepthToSpace(), armnn::DetectionPostProcess(), RefChannelShuffleWorkload::ExecuteAsync(), RefDepthwiseConvolution2dWorkload::ExecuteAsync(), RefLstmWorkload::ExecuteAsync(), RefQLstmWorkload::ExecuteAsync(), RefConvolution3dWorkload::ExecuteAsync(), RefElementwiseUnaryWorkload::ExecuteAsync(), RefLogicalUnaryWorkload::ExecuteAsync(), RefLogicalBinaryWorkload::ExecuteAsync(), RefShapeWorkload::ExecuteAsync(), RefComparisonWorkload::ExecuteAsync(), RefElementwiseWorkload< Functor, ParentDescriptor, DebugString >::ExecuteAsync(), RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), armnn::GetBiasDataType(), TfLiteParserImpl::GetBuffer(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnn::GetNumActivations(), armnnUtils::GetPerAxisParams(), IDeserializer::DeserializerImpl::GetQLstmDescriptor(), RefTensorHandle::GetShape(), TosaRefTensorHandle::GetShape(), SampleTensorHandle::GetShape(), ConstTensorHandle::GetShape(), armnn::GetUnpaddedTensorStrides(), armnn::InstanceNorm(), armnn_driver::IsDynamicTensor(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), armnn::LogSoftmax(), armnn::LstmImpl(), armnn::MirrorPad(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), IDeserializer::DeserializerImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::Pad(), armnnUtils::Permuted(), armnn::PermuteTensor(), armnn::Pooling2d(), armnn::Pooling3d(), armnn::PreluImpl(), armnn::PrintOutput(), armnnUtils::ProcessConcatInputTensorInfo(), armnn::Reduce(), armnn::ReshapeWeightsForAcl(), armnn::Resize(), TransposeAsReshapeImpl::Run(), PermuteAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), ConvertConstPermuteLayersToConstLayers::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), Convolution3dLayer::SerializeLayerParameters(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Convolution2dLayer::SerializeLayerParameters(), Graph::SerializeToDot(), ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible(), ShapesAreSameRank::ShapesAreSameRank(), armnn::Slice(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::SpaceToDepth(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::StridedSlice(), armnn_driver::SwizzleAndroidNn4dTensorToArmNn(), armnnUtils::TransposeTensorShape(), ArgMinMaxQueueDescriptor::Validate(), PermuteQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), DetectionPostProcessQueueDescriptor::Validate(), ResizeQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), SpaceToDepthQueueDescriptor::Validate(), TransposeQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), DepthToSpaceQueueDescriptor::Validate(), BatchMatMulQueueDescriptor::Validate(), QueueDescriptor::ValidateTensorNumDimensions(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), and armnn::VerifyTensorInfoDataType().

191 { return m_Shape; }

◆ GetShape() [2/2]

TensorShape& GetShape ( )
inline

Definition at line 192 of file Tensor.hpp.

192 { return m_Shape; }

◆ HasMultipleQuantizationScales()

bool HasMultipleQuantizationScales ( ) const
inline

Definition at line 201 of file Tensor.hpp.

References TensorShape::operator=(), and TensorShape::operator==().

Referenced by armnn::GetBiasDataType(), TensorInfo::GetQuantizationScale(), TensorInfo::HasPerAxisQuantization(), and TensorInfo::IsTypeSpaceMatch().

201 { return m_Quantization.m_Scales.size() > 1; }

◆ HasPerAxisQuantization()

◆ IsConstant()

◆ IsQuantized()

◆ IsTypeSpaceMatch()

bool IsTypeSpaceMatch ( const TensorInfo other) const

Check that the types are the same and, if quantize, that the quantization parameters are the same.

Definition at line 432 of file Tensor.cpp.

References TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::HasMultipleQuantizationScales(), and TensorInfo::IsQuantized().

Referenced by ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::GetBiasDataType(), ClLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConcatSupported(), ClLayerSupport::IsSplitterSupported(), and NeonLayerSupport::IsSplitterSupported().

433 {
434  bool match = true;
435 
436  match &= m_DataType == other.m_DataType;
437 
439  {
440  match &= GetQuantizationScale() == other.GetQuantizationScale() &&
441  GetQuantizationOffset() == other.GetQuantizationOffset();
442  }
443  return match;
444 }
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:201
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
float GetQuantizationScale() const
Definition: Tensor.cpp:461
bool IsQuantized() const
Definition: Tensor.cpp:504

◆ operator!=()

bool operator!= ( const TensorInfo other) const

Definition at line 422 of file Tensor.cpp.

423 {
424  return !(*this == other);
425 }

◆ operator=()

TensorInfo & operator= ( const TensorInfo other)

Definition at line 405 of file Tensor.cpp.

406 {
407  m_Shape = other.m_Shape;
408  m_DataType = other.m_DataType;
409  m_Quantization = other.m_Quantization;
410  m_IsConstant = other.m_IsConstant;
411  return *this;
412 }

◆ operator==()

bool operator== ( const TensorInfo other) const

Definition at line 414 of file Tensor.cpp.

415 {
416  return ((m_Shape == other.m_Shape) &&
417  (m_DataType == other.m_DataType) &&
418  (m_Quantization == other.m_Quantization) &&
419  (m_IsConstant == other.m_IsConstant));
420 }

◆ SetConstant()

◆ SetDataType()

◆ SetQuantizationDim()

void SetQuantizationDim ( const Optional< unsigned int > &  quantizationDim)

Definition at line 499 of file Tensor.cpp.

Referenced by armnnUtils::Permuted(), and TensorInfo::TensorInfo().

500 {
501  m_Quantization.m_QuantizationDim = quantizationDim;
502 }

◆ SetQuantizationOffset()

void SetQuantizationOffset ( int32_t  offset)

Definition at line 489 of file Tensor.cpp.

Referenced by armnn::CheckScaleSetOnQuantizedType(), Converter::ConvertOperation(), RefCastWorkload::ExecuteAsync(), and TensorInfo::TensorInfo().

490 {
491  m_Quantization.m_Offset = MakeOptional<int32_t>(offset);
492 }

◆ SetQuantizationScale()

void SetQuantizationScale ( float  scale)

Definition at line 473 of file Tensor.cpp.

Referenced by armnn::CheckScaleSetOnQuantizedType(), Converter::ConvertOperation(), RefCastWorkload::ExecuteAsync(), and TensorInfo::TensorInfo().

474 {
475  m_Quantization.m_Scales = { scale };
476 }

◆ SetQuantizationScales()

void SetQuantizationScales ( const std::vector< float > &  scales)

Definition at line 456 of file Tensor.cpp.

Referenced by Converter::ConvertOperation(), and TensorInfo::TensorInfo().

457 {
458  m_Quantization.m_Scales = scales;
459 }

◆ SetShape()


The documentation for this class was generated from the following files: