ArmNN
 21.02
TensorInfo Class Reference

#include <Tensor.hpp>

Public Member Functions

 TensorInfo ()
 Empty (invalid) constructor. More...
 
 TensorInfo (const TensorShape &shape, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0)
 
 TensorInfo (const TensorShape &shape, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim)
 
 TensorInfo (const TensorInfo &other)
 
TensorInfooperator= (const TensorInfo &other)
 
bool operator== (const TensorInfo &other) const
 
bool operator!= (const TensorInfo &other) const
 
const TensorShapeGetShape () const
 
TensorShapeGetShape ()
 
void SetShape (const TensorShape &newShape)
 
unsigned int GetNumDimensions () const
 
unsigned int GetNumElements () const
 
DataType GetDataType () const
 
void SetDataType (DataType type)
 
bool HasMultipleQuantizationScales () const
 
bool HasPerAxisQuantization () const
 
std::vector< float > GetQuantizationScales () const
 
void SetQuantizationScales (const std::vector< float > &scales)
 
float GetQuantizationScale () const
 
void SetQuantizationScale (float scale)
 
int32_t GetQuantizationOffset () const
 
void SetQuantizationOffset (int32_t offset)
 
Optional< unsigned int > GetQuantizationDim () const
 
void SetQuantizationDim (const Optional< unsigned int > &quantizationDim)
 
bool IsQuantized () const
 
bool IsTypeSpaceMatch (const TensorInfo &other) const
 Check that the types are the same and, if quantize, that the quantization parameters are the same. More...
 
unsigned int GetNumBytes () const
 

Detailed Description

Definition at line 152 of file Tensor.hpp.

Constructor & Destructor Documentation

◆ TensorInfo() [1/6]

Empty (invalid) constructor.

Definition at line 341 of file Tensor.cpp.

342 : m_DataType(DataType::Float32)
343 {
344 }

◆ TensorInfo() [2/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
float  quantizationScale = 0.0f,
int32_t  quantizationOffset = 0 
)

Definition at line 346 of file Tensor.cpp.

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

350  : m_Shape(shape)
351  , m_DataType(dataType)
352 {
353  SetQuantizationScale(quantizationScale);
354  SetQuantizationOffset(quantizationOffset);
355 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480

◆ TensorInfo() [3/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
float  quantizationScale = 0.0f,
int32_t  quantizationOffset = 0 
)

Definition at line 357 of file Tensor.cpp.

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

362  : m_Shape(numDimensions, dimensionSizes)
363  , m_DataType(dataType)
364 {
365  SetQuantizationScale(quantizationScale);
366  SetQuantizationOffset(quantizationOffset);
367 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480

◆ TensorInfo() [4/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim 
)

Definition at line 369 of file Tensor.cpp.

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

373  : m_Shape(shape)
374  , m_DataType(dataType)
375 {
376  SetQuantizationScales(quantizationScales);
377  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
378 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:490
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:447

◆ TensorInfo() [5/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim 
)

Definition at line 380 of file Tensor.cpp.

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

385  : m_Shape(numDimensions, dimensionSizes)
386  , m_DataType(dataType)
387 {
388  SetQuantizationScales(quantizationScales);
389  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
390 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:490
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:447

◆ TensorInfo() [6/6]

TensorInfo ( const TensorInfo other)

Definition at line 392 of file Tensor.cpp.

393 : m_Shape(other.m_Shape)
394 , m_DataType(other.m_DataType)
395 , m_Quantization(other.m_Quantization)
396 {}

Member Function Documentation

◆ GetDataType()

DataType GetDataType ( ) const
inline

Definition at line 194 of file Tensor.hpp.

Referenced by BiasAndWeightsTypesCompatible::BiasAndWeightsTypesCompatible(), BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch(), BOOST_AUTO_TEST_CASE(), armnn::BOOST_AUTO_TEST_CASE(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::CheckScaleSetOnQuantizedType(), armnn::CheckTensorDataTypesEqual(), armnn::ClConvertFp16ToFp32WorkloadValidate(), armnn::ClConvertFp32ToFp16WorkloadValidate(), armnn::ConvertBf16ToFp32Weight(), armnn::optimizations::ConvertWeight(), TfLiteParserImpl::CreateNetworkFromBinary(), armnn::CreateQuantizedConst(), RefDepthToSpaceWorkload::Execute(), RefStridedSliceWorkload::Execute(), RefSliceWorkload::Execute(), RefLstmWorkload::Execute(), QuantizerStrategy::ExecuteStrategy(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::GetBiasDataType(), TfLiteParserImpl::GetBuffer(), Layer::GetDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnn::GetInputTensorInfo(), armnnUtils::GetPerAxisParams(), armnn::GetUnpaddedTensorStrides(), armnn::InitializeArmComputeTensorData(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), SampleDynamicLayerSupport::IsAdditionSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsFloorSupported(), armnn::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), ClLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQLstmSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), main(), armnn::MakeDecoder(), armnn::MakeEncoder(), TfLiteParserImpl::OutputShapeOfReshape(), ITfParser::TfParserImpl::ParseExpandDims(), ITfParser::TfParserImpl::ParseGather(), ITfParser::TfParserImpl::ParseSplit(), ITfParser::TfParserImpl::ParseStack(), armnn::PermuteTensor(), QuantizerStrategy::QuantizerStrategy(), ConvertFp32NetworkToFp16Impl::Run(), TypeAnyOf::TypeAnyOf(), TypeIs::TypeIs(), ArgMinMaxQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), Convolution2dQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), QuantizeQueueDescriptor::Validate(), EqualQueueDescriptor::Validate(), ConvertBf16ToFp32QueueDescriptor::Validate(), ConvertFp32ToBf16QueueDescriptor::Validate(), ConvertFp16ToFp32QueueDescriptor::Validate(), ConvertFp32ToFp16QueueDescriptor::Validate(), GreaterQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), DequantizeQueueDescriptor::Validate(), TransposeConvolution2dQueueDescriptor::Validate(), ComparisonQueueDescriptor::Validate(), LogicalBinaryQueueDescriptor::Validate(), LayerVerifierBase::VerifyConstTensors(), LayerVerifierBase::VerifyNameAndConnections(), and armnn::VerifyTensorInfoDataType().

194 { return m_DataType; }

◆ GetNumBytes()

◆ GetNumDimensions()

unsigned int GetNumDimensions ( ) const
inline

Definition at line 191 of file Tensor.hpp.

Referenced by ITfParser::TfParserImpl::AddAdditionLayer(), ITfParser::TfParserImpl::AddMaximumLayer(), ITfParser::TfParserImpl::AddMultiplicationLayer(), ITfParser::TfParserImpl::AddRealDivLayer(), armnn::ArgMinMax(), BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnn::boost_test_print_type(), armnnTfParser::CalculatePaddedOutputTensorInfo(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnnTfParser::CheckPaddingTensor(), armnn::ComputeAclAxis(), armnn::ComputePositiveAxis(), armnn::ComputeSoftmaxAclAxis(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), ITfParser::TfParserImpl::CreateAdditionLayer(), armnn::Debug(), RefRankWorkload::Execute(), armnn::Gather(), armnn::GetBiasDataType(), ITfParser::GetNetworkOutputBindingInfo(), GetTensorShapeAsArray(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsMeanSupported(), armnn::LogSoftmax(), MakeTensor(), armnnTfParser::OutputShapeOfExpandDims(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnnTfParser::OutputShapeOfSqueeze(), ITfParser::TfParserImpl::ParseConcat(), ICaffeParser::CaffeParserImpl::ParseConcatLayer(), ITfParser::TfParserImpl::ParseGather(), ICaffeParser::CaffeParserImpl::ParseInnerProductLayer(), ITfParser::TfParserImpl::ParseMean(), ITfParser::TfParserImpl::ParseShape(), ITfParser::TfParserImpl::ParseSplit(), ITfParser::TfParserImpl::ParseStack(), ITfParser::TfParserImpl::ParseSub(), RefFullyConnectedWorkload::PostAllocationConfigure(), armnnUtils::ProcessConcatInputTensorInfo(), ITfParser::TfParserImpl::ProcessElementwiseInputSlots(), armnn::Reduce(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), AddBroadcastReshapeLayerImpl::Run(), ParserFlatbuffersFixture::RunTest(), armnn::Softmax(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnnCaffeParser::TensorDescToBlobShape(), TensorNumDimensionsAreCorrect::TensorNumDimensionsAreCorrect(), FullyConnectedQueueDescriptor::Validate(), MeanQueueDescriptor::Validate(), PadQueueDescriptor::Validate(), InstanceNormalizationQueueDescriptor::Validate(), L2NormalizationQueueDescriptor::Validate(), StridedSliceQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), FillLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), and GatherLayer::ValidateTensorShapesFromInputs().

191 { return m_Shape.GetNumDimensions(); }
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174

◆ GetNumElements()

unsigned int GetNumElements ( ) const
inline

Definition at line 192 of file Tensor.hpp.

Referenced by armnn::Activation(), BOOST_FIXTURE_TEST_CASE(), armnnTfLiteParser::ComputeWrappedIndex(), Concat2dTestImpl(), Concat3dTestImpl(), Concat4dTestImpl(), armnn::ConvertBf16ToFp32Weight(), armnn::optimizations::ConvertWeight(), TfLiteParserImpl::CreateNetworkFromBinary(), armnn::CreateQuantizedConst(), armnn::Debug(), armnn::Dequantize(), armnn::DetectionPostProcess(), RefConvertFp32ToFp16Workload::Execute(), RefConvertBf16ToFp32Workload::Execute(), RefConvertFp16ToFp32Workload::Execute(), RefConvertFp32ToBf16Workload::Execute(), RefFakeQuantizationFloat32Workload::Execute(), RefFloorWorkload::Execute(), SampleDynamicAdditionWorkload::Execute(), RefStackWorkload::Execute(), RefDebugWorkload< DataType >::Execute(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), armnn::GetBiasDataType(), ITfParser::GetNetworkOutputBindingInfo(), TensorInfo::GetNumBytes(), MakeRandomTensor(), MakeTensor(), OnnxParserImpl::OnnxParserImpl(), IDeserializer::DeserializerImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::Pad(), ITfParser::TfParserImpl::ParseConv2D(), ITfParser::TfParserImpl::ParseDepthwiseConv2D(), ITfParser::TfParserImpl::ParseExpandDims(), ITfParser::TfParserImpl::ParseIdentity(), PermuteOutputForConcat(), PermuteTensorData(), armnn::Quantize(), QuantizeData(), armnn::Reduce(), ShapesAreSameTotalSize::ShapesAreSameTotalSize(), armnn::Splitter(), armnnDeserializer::ToConstTensor(), and SpaceToBatchNdQueueDescriptor::Validate().

192 { return m_Shape.GetNumElements(); }
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:181

◆ GetQuantizationDim()

Optional< unsigned int > GetQuantizationDim ( ) const

Definition at line 485 of file Tensor.cpp.

Referenced by BOOST_AUTO_TEST_CASE(), armnn::GetBiasDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnnUtils::GetPerAxisParams(), and armnnUtils::Permuted().

486 {
487  return m_Quantization.m_QuantizationDim;
488 }

◆ GetQuantizationOffset()

◆ GetQuantizationScale()

◆ GetQuantizationScales()

std::vector< float > GetQuantizationScales ( ) const

Definition at line 442 of file Tensor.cpp.

Referenced by armnn::GetBiasDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), and armnnUtils::GetPerAxisParams().

443 {
444  return m_Quantization.m_Scales;
445 }

◆ GetShape() [1/2]

const TensorShape& GetShape ( ) const
inline

Definition at line 187 of file Tensor.hpp.

Referenced by ITfParser::TfParserImpl::AddAdditionLayer(), ITfParser::TfParserImpl::AddMaximumLayer(), armnn::ArgMinMax(), armnn::BatchNormImpl(), armnn::BatchToSpaceNd(), BOOST_AUTO_TEST_CASE(), armnn::BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnn::boost_test_print_type(), armnnTfParser::CalculatePaddedOutputTensorInfo(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), Concatenate(), armnn::ConvertBf16ToFp32Weight(), armnn::ConvertWeightTensorFromArmnnToAcl(), armnn::CreateAclNormalizationLayerInfoForL2Normalization(), ITfParser::TfParserImpl::CreateAdditionLayer(), TfLiteParserImpl::CreateNetworkFromBinary(), OnnxParserImpl::CreateNetworkFromString(), armnn::CreateQuantizedConst(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::Debug(), armnn::DepthToSpace(), DepthwiseConvolution2dAsymmetricTestImpl(), DepthwiseConvolution2dDepthMul1TestImpl(), DepthwiseConvolution2dTestImpl(), armnn::DetectionPostProcess(), RefQLstmWorkload::Execute(), RefLstmWorkload::Execute(), RefLogicalUnaryWorkload::Execute(), RefComparisonWorkload::Execute(), RefElementwiseUnaryWorkload::Execute(), RefLogicalBinaryWorkload::Execute(), RefElementwiseWorkload< Functor, ParentDescriptor, DebugString >::Execute(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), GetBias(), armnn::GetBiasDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnn::GetInputTensorInfo(), ITfParser::GetNetworkOutputBindingInfo(), armnnUtils::GetPerAxisParams(), IDeserializer::DeserializerImpl::GetQLstmDescriptor(), RefTensorHandle::GetShape(), SampleTensorHandle::GetShape(), ConstCpuTensorHandle::GetShape(), GetTensorShapeAsArray(), armnn::GetUnpaddedTensorStrides(), armnn::InstanceNorm(), armnn::IsFloorSupported(), ClLayerSupport::IsResizeBilinearSupported(), NeonLayerSupport::IsResizeBilinearSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), armnn::LogSoftmax(), main(), MakeTensor(), armnnTfParser::OutputShapeOfExpandDims(), IDeserializer::DeserializerImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnnTfParser::OutputShapeOfSqueeze(), armnn::Pad(), ICaffeParser::CaffeParserImpl::ParseBatchNormLayer(), ITfParser::TfParserImpl::ParseConcat(), ICaffeParser::CaffeParserImpl::ParseConcatLayer(), ITfParser::TfParserImpl::ParseConv2D(), ITfParser::TfParserImpl::ParseDepthwiseConv2D(), ITfParser::TfParserImpl::ParseExpandDims(), ITfParser::TfParserImpl::ParseGather(), ICaffeParser::CaffeParserImpl::ParseInnerProductLayer(), ITfParser::TfParserImpl::ParsePooling2d(), ICaffeParser::CaffeParserImpl::ParsePoolingLayer(), ITfParser::TfParserImpl::ParseReshape(), ITfParser::TfParserImpl::ParseResizeBilinear(), ICaffeParser::CaffeParserImpl::ParseScaleLayer(), ITfParser::TfParserImpl::ParseShape(), ITfParser::TfParserImpl::ParseSplit(), ITfParser::TfParserImpl::ParseSqueeze(), ITfParser::TfParserImpl::ParseStack(), armnnUtils::Permuted(), PermuteInputsForConcat(), armnn::PermuteTensor(), PermuteTensorNchwToNhwc(), PermuteTensorNhwcToNchw(), armnn::Pooling2d(), RefDepthwiseConvolution2dWorkload::PostAllocationConfigure(), RefConvolution2dWorkload::PostAllocationConfigure(), RefFullyConnectedWorkload::PostAllocationConfigure(), RefTransposeConvolution2dWorkload::PostAllocationConfigure(), armnn::PreluImpl(), ITfParser::TfParserImpl::ProcessComparisonLayer(), armnnUtils::ProcessConcatInputTensorInfo(), ITfParser::TfParserImpl::ProcessElementwiseLayer(), armnn::Reduce(), RefConvolution2dWorkload::RefConvolution2dWorkload(), RefDepthwiseConvolution2dWorkload::RefDepthwiseConvolution2dWorkload(), RefFullyConnectedWorkload::RefFullyConnectedWorkload(), armnn::ReshapeWeightsForAcl(), armnn::Resize(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), AddBroadcastReshapeLayerImpl::Run(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Convolution2dLayer::SerializeLayerParameters(), Graph::SerializeToDot(), ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible(), ShapesAreSameRank::ShapesAreSameRank(), SimpleConvolution2dTestImpl(), armnn::Slice(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::SpaceToDepth(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::StridedSlice(), armnnCaffeParser::TensorDescToBlobShape(), TransposeConvolution2dEndToEnd(), armnnUtils::TransposeTensorShape(), ArgMinMaxQueueDescriptor::Validate(), PermuteQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), DetectionPostProcessQueueDescriptor::Validate(), ResizeBilinearQueueDescriptor::Validate(), ResizeQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), SpaceToDepthQueueDescriptor::Validate(), TransposeQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), DepthToSpaceQueueDescriptor::Validate(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), LayerVerifierBase::VerifyConstTensors(), LayerVerifierBase::VerifyNameAndConnections(), and armnn::VerifyTensorInfoDataType().

187 { return m_Shape; }

◆ GetShape() [2/2]

TensorShape& GetShape ( )
inline

Definition at line 188 of file Tensor.hpp.

188 { return m_Shape; }

◆ HasMultipleQuantizationScales()

bool HasMultipleQuantizationScales ( ) const
inline

◆ HasPerAxisQuantization()

bool HasPerAxisQuantization ( ) const

◆ IsQuantized()

bool IsQuantized ( ) const

Definition at line 495 of file Tensor.cpp.

References armnn::IsQuantizedType().

Referenced by armnn::GetBiasDataType(), TensorInfo::IsTypeSpaceMatch(), and TypeNotPerAxisQuantized::TypeNotPerAxisQuantized().

496 {
497  return IsQuantizedType(m_DataType);
498 }
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:249

◆ IsTypeSpaceMatch()

bool IsTypeSpaceMatch ( const TensorInfo other) const

Check that the types are the same and, if quantize, that the quantization parameters are the same.

Definition at line 423 of file Tensor.cpp.

References TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::HasMultipleQuantizationScales(), and TensorInfo::IsQuantized().

Referenced by ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::GetBiasDataType(), ClLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConcatSupported(), ClLayerSupport::IsSplitterSupported(), and NeonLayerSupport::IsSplitterSupported().

424 {
425  bool match = true;
426 
427  match &= m_DataType == other.m_DataType;
428 
430  {
431  match &= GetQuantizationScale() == other.GetQuantizationScale() &&
432  GetQuantizationOffset() == other.GetQuantizationOffset();
433  }
434  return match;
435 }
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:197
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
float GetQuantizationScale() const
Definition: Tensor.cpp:452
bool IsQuantized() const
Definition: Tensor.cpp:495

◆ operator!=()

bool operator!= ( const TensorInfo other) const

Definition at line 413 of file Tensor.cpp.

414 {
415  return !(*this == other);
416 }

◆ operator=()

TensorInfo & operator= ( const TensorInfo other)

Definition at line 398 of file Tensor.cpp.

399 {
400  m_Shape = other.m_Shape;
401  m_DataType = other.m_DataType;
402  m_Quantization = other.m_Quantization;
403  return *this;
404 }

◆ operator==()

bool operator== ( const TensorInfo other) const

Definition at line 406 of file Tensor.cpp.

407 {
408  return ((m_Shape == other.m_Shape) &&
409  (m_DataType == other.m_DataType) &&
410  (m_Quantization == other.m_Quantization));
411 }

◆ SetDataType()

◆ SetQuantizationDim()

void SetQuantizationDim ( const Optional< unsigned int > &  quantizationDim)

Definition at line 490 of file Tensor.cpp.

Referenced by BOOST_AUTO_TEST_CASE(), armnnUtils::Permuted(), and TensorInfo::TensorInfo().

491 {
492  m_Quantization.m_QuantizationDim = quantizationDim;
493 }

◆ SetQuantizationOffset()

◆ SetQuantizationScale()

void SetQuantizationScale ( float  scale)

Definition at line 464 of file Tensor.cpp.

Referenced by AdditionBroadcast1ElementTestImpl(), AdditionBroadcastTestImpl(), ArgMaxChannelTest(), ArgMaxHeightTest(), ArgMaxSimpleTest(), ArgMinChannelTest(), ArgMinSimpleTest(), ArgMinWidthTest(), BOOST_AUTO_TEST_CASE(), BoundedReLuTestCommon(), armnn::CheckScaleSetOnQuantizedType(), CompareActivationTestImpl(), ConcatUint16Test(), ConcatUint8Test(), ConstantLinearActivationTestCommon(), Convolution1dTestImpl(), Convolution2d3x3DilationTestCommon(), DepthwiseConvolution2d3x3DilationTestCommon(), DepthwiseConvolution2dAsymmetricTestImpl(), DepthwiseConvolution2dDepthMul1TestImpl(), DepthwiseConvolution2dTestImpl(), DetectionPostProcessFastNmsQuantizedTest(), DetectionPostProcessRegularNmsQuantizedTest(), ElementwiseTestHelper(), ElementwiseUnaryTestHelper(), FullyConnectedLargeTestCommon(), FullyConnectedTest(), GetSoftmaxProfilerJson(), PermuteValueSet1Test(), PermuteValueSet2Test(), PermuteValueSet3Test(), PreluTest(), QuantizeData(), ReduceMaxNegativeAxisTest(), ReduceMaxSimpleTest(), ReduceMaxSimpleTest2(), ReduceMinNegativeAxisTest(), ReduceMinSimpleTest(), ReduceSumMultipleAxisTest(), ReduceSumSimpleTest(), ReduceSumSingleAxisTest1(), ReduceSumSingleAxisTest2(), ReduceSumSingleAxisTest3(), Reshape5dTest(), ReshapeBooleanTest(), armnn::SetupQuantize(), SimpleActivationTest(), SimpleConvolution2dTestImpl(), SimpleFloorTest(), SimplePermuteTest(), SimpleReshapeTest(), SimpleTransposeTest(), TensorInfo::TensorInfo(), TransposeValueSet1Test(), TransposeValueSet2Test(), and TransposeValueSet3Test().

465 {
466  m_Quantization.m_Scales = { scale };
467 }

◆ SetQuantizationScales()

void SetQuantizationScales ( const std::vector< float > &  scales)

Definition at line 447 of file Tensor.cpp.

Referenced by BOOST_AUTO_TEST_CASE(), and TensorInfo::TensorInfo().

448 {
449  m_Quantization.m_Scales = scales;
450 }

◆ SetShape()


The documentation for this class was generated from the following files: