ArmNN
 20.08
TensorInfo Class Reference

#include <Tensor.hpp>

Public Member Functions

 TensorInfo ()
 Empty (invalid) constructor. More...
 
 TensorInfo (const TensorShape &shape, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0)
 
 TensorInfo (const TensorShape &shape, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim)
 
 TensorInfo (const TensorInfo &other)
 
TensorInfooperator= (const TensorInfo &other)
 
bool operator== (const TensorInfo &other) const
 
bool operator!= (const TensorInfo &other) const
 
const TensorShapeGetShape () const
 
TensorShapeGetShape ()
 
void SetShape (const TensorShape &newShape)
 
unsigned int GetNumDimensions () const
 
unsigned int GetNumElements () const
 
DataType GetDataType () const
 
void SetDataType (DataType type)
 
bool HasMultipleQuantizationScales () const
 
bool HasPerAxisQuantization () const
 
std::vector< float > GetQuantizationScales () const
 
void SetQuantizationScales (const std::vector< float > &scales)
 
float GetQuantizationScale () const
 
void SetQuantizationScale (float scale)
 
int32_t GetQuantizationOffset () const
 
void SetQuantizationOffset (int32_t offset)
 
Optional< unsigned int > GetQuantizationDim () const
 
void SetQuantizationDim (const Optional< unsigned int > &quantizationDim)
 
bool IsQuantized () const
 
bool IsTypeSpaceMatch (const TensorInfo &other) const
 Check that the types are the same and, if quantize, that the quantization parameters are the same. More...
 
unsigned int GetNumBytes () const
 

Detailed Description

Definition at line 152 of file Tensor.hpp.

Constructor & Destructor Documentation

◆ TensorInfo() [1/6]

Empty (invalid) constructor.

Definition at line 342 of file Tensor.cpp.

343 : m_DataType(DataType::Float32)
344 {
345 }

◆ TensorInfo() [2/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
float  quantizationScale = 0.0f,
int32_t  quantizationOffset = 0 
)

Definition at line 347 of file Tensor.cpp.

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

351  : m_Shape(shape)
352  , m_DataType(dataType)
353 {
354  SetQuantizationScale(quantizationScale);
355  SetQuantizationOffset(quantizationOffset);
356 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481

◆ TensorInfo() [3/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
float  quantizationScale = 0.0f,
int32_t  quantizationOffset = 0 
)

Definition at line 358 of file Tensor.cpp.

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

363  : m_Shape(numDimensions, dimensionSizes)
364  , m_DataType(dataType)
365 {
366  SetQuantizationScale(quantizationScale);
367  SetQuantizationOffset(quantizationOffset);
368 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481

◆ TensorInfo() [4/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim 
)

Definition at line 370 of file Tensor.cpp.

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

374  : m_Shape(shape)
375  , m_DataType(dataType)
376 {
377  SetQuantizationScales(quantizationScales);
378  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
379 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:491
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:448

◆ TensorInfo() [5/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim 
)

Definition at line 381 of file Tensor.cpp.

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

386  : m_Shape(numDimensions, dimensionSizes)
387  , m_DataType(dataType)
388 {
389  SetQuantizationScales(quantizationScales);
390  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
391 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:491
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:448

◆ TensorInfo() [6/6]

TensorInfo ( const TensorInfo other)

Definition at line 393 of file Tensor.cpp.

394 : m_Shape(other.m_Shape)
395 , m_DataType(other.m_DataType)
396 , m_Quantization(other.m_Quantization)
397 {}

Member Function Documentation

◆ GetDataType()

DataType GetDataType ( ) const
inline

Definition at line 194 of file Tensor.hpp.

Referenced by BiasAndWeightsTypesCompatible::BiasAndWeightsTypesCompatible(), BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch(), armnn::BOOST_AUTO_TEST_CASE(), BOOST_AUTO_TEST_CASE(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::CheckScaleSetOnQuantizedType(), armnn::CheckTensorDataTypesEqual(), armnn::ClConvertFp16ToFp32WorkloadValidate(), armnn::ClConvertFp32ToFp16WorkloadValidate(), armnn::ConvertBf16ToFp32Weight(), armnn::optimizations::ConvertWeight(), TfLiteParser::CreateNetworkFromBinary(), armnn::CreateQuantizedConst(), RefDepthToSpaceWorkload::Execute(), RefStridedSliceWorkload::Execute(), RefSliceWorkload::Execute(), RefLstmWorkload::Execute(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::GetBiasDataType(), TfLiteParser::GetBuffer(), Layer::GetDataType(), armnnUtils::GetPerAxisParams(), armnn::GetUnpaddedTensorStrides(), armnn::InitializeArmComputeTensorData(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), SampleDynamicLayerSupport::IsAdditionSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsFloorSupported(), armnn::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), ClLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQLstmSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), main(), armnn::MakeDecoder(), armnn::MakeEncoder(), armnnTfParser::OutputShapeOfExpandDims(), TfLiteParser::OutputShapeOfReshape(), armnnTfParser::OutputShapeOfSqueeze(), armnn::PermuteTensor(), ConvertFp32NetworkToFp16Impl::Run(), TypeAnyOf::TypeAnyOf(), TypeIs::TypeIs(), ArgMinMaxQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), Convolution2dQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), QuantizeQueueDescriptor::Validate(), EqualQueueDescriptor::Validate(), ConvertBf16ToFp32QueueDescriptor::Validate(), ConvertFp32ToBf16QueueDescriptor::Validate(), ConvertFp16ToFp32QueueDescriptor::Validate(), ConvertFp32ToFp16QueueDescriptor::Validate(), GreaterQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), DequantizeQueueDescriptor::Validate(), TransposeConvolution2dQueueDescriptor::Validate(), ComparisonQueueDescriptor::Validate(), armnn::VerifyTensorInfoDataType(), QuantizerVisitor::VisitInputLayer(), QuantizerVisitor::VisitOutputLayer(), and SerializerVisitor::VisitQuantizedLstmLayer().

194 { return m_DataType; }

◆ GetNumBytes()

◆ GetNumDimensions()

unsigned int GetNumDimensions ( ) const
inline

Definition at line 191 of file Tensor.hpp.

Referenced by armnn::ArgMinMax(), BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnn::boost_test_print_type(), armnnTfParser::CalculatePaddedOutputTensorInfo(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnnTfParser::CheckPaddingTensor(), armnn::ComputeAclAxis(), armnn::ComputePositiveAxis(), armnn::ComputeSoftmaxAclAxis(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), TfLiteParser::CreateNetworkFromBinary(), armnn::Debug(), RefRankWorkload::Execute(), armnn::Gather(), armnn::GetBiasDataType(), GetTensorShapeAsArray(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsMeanSupported(), armnn::LogSoftmax(), MakeTensor(), armnn::Mean(), armnnTfParser::OutputShapeOfExpandDims(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), armnnTfParser::OutputShapeOfSqueeze(), CaffeParserBase::ParseConcatLayer(), CaffeParserBase::ParseInnerProductLayer(), RefFullyConnectedWorkload::PostAllocationConfigure(), armnnUtils::ProcessConcatInputTensorInfo(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), ParserFlatbuffersFixture::RunTest(), armnn::Softmax(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnnCaffeParser::TensorDescToBlobShape(), TensorNumDimensionsAreCorrect::TensorNumDimensionsAreCorrect(), TfLiteParser::TfLiteParser(), TfParser::TfParser(), FullyConnectedQueueDescriptor::Validate(), MeanQueueDescriptor::Validate(), PadQueueDescriptor::Validate(), InstanceNormalizationQueueDescriptor::Validate(), L2NormalizationQueueDescriptor::Validate(), StridedSliceQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), FillLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), and GatherLayer::ValidateTensorShapesFromInputs().

191 { return m_Shape.GetNumDimensions(); }
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:175

◆ GetNumElements()

unsigned int GetNumElements ( ) const
inline

Definition at line 192 of file Tensor.hpp.

Referenced by armnn::Activation(), BOOST_FIXTURE_TEST_CASE(), armnnTfLiteParser::ComputeWrappedIndex(), Concat2dTestImpl(), Concat3dTestImpl(), Concat4dTestImpl(), armnn::ConvertBf16ToFp32Weight(), armnnTfParser::ConvertTfTensorDataType(), armnn::optimizations::ConvertWeight(), TfLiteParser::CreateNetworkFromBinary(), armnn::CreateQuantizedConst(), armnn::Debug(), armnn::Dequantize(), armnn::DetectionPostProcess(), RefConvertBf16ToFp32Workload::Execute(), RefConvertFp16ToFp32Workload::Execute(), RefConvertFp32ToBf16Workload::Execute(), RefFakeQuantizationFloat32Workload::Execute(), RefFloorWorkload::Execute(), SampleDynamicAdditionWorkload::Execute(), RefConvertFp32ToFp16Workload::Execute(), RefStackWorkload::Execute(), RefDebugWorkload< DataType >::Execute(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), armnn::GetBiasDataType(), TensorInfo::GetNumBytes(), MakeRandomTensor(), MakeTensor(), OnnxParser::OnnxParser(), Deserializer::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), armnn::Pad(), PermuteOutputForConcat(), PermuteTensorData(), armnn::Quantize(), QuantizeData(), ShapesAreSameTotalSize::ShapesAreSameTotalSize(), armnn::Splitter(), TfParser::TfParser(), armnnDeserializer::ToConstTensor(), and SpaceToBatchNdQueueDescriptor::Validate().

192 { return m_Shape.GetNumElements(); }
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:182

◆ GetQuantizationDim()

Optional< unsigned int > GetQuantizationDim ( ) const

Definition at line 486 of file Tensor.cpp.

Referenced by armnn::GetBiasDataType(), armnnUtils::GetPerAxisParams(), armnnUtils::Permuted(), and SerializerVisitor::VisitQuantizedLstmLayer().

487 {
488  return m_Quantization.m_QuantizationDim;
489 }

◆ GetQuantizationOffset()

◆ GetQuantizationScale()

◆ GetQuantizationScales()

std::vector< float > GetQuantizationScales ( ) const

Definition at line 443 of file Tensor.cpp.

Referenced by armnn::GetBiasDataType(), armnnUtils::GetPerAxisParams(), and SerializerVisitor::VisitQuantizedLstmLayer().

444 {
445  return m_Quantization.m_Scales;
446 }

◆ GetShape() [1/2]

const TensorShape& GetShape ( ) const
inline

Definition at line 187 of file Tensor.hpp.

Referenced by armnn::ArgMinMax(), armnn::BatchNormImpl(), armnn::BatchToSpaceNd(), BOOST_AUTO_TEST_CASE(), armnn::BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnn::boost_test_print_type(), armnnTfParser::CalculatePaddedOutputTensorInfo(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), Concatenate(), armnn::ConvertBf16ToFp32Weight(), armnnTfParser::ConvertTfTensorDataType(), armnn::optimizations::ConvertWeight(), armnn::ConvertWeightTensorFromArmnnToAcl(), armnn::CreateAclNormalizationLayerInfoForL2Normalization(), TfLiteParser::CreateNetworkFromBinary(), OnnxParser::CreateNetworkFromString(), armnn::CreateQuantizedConst(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::Debug(), armnn::DepthToSpace(), DepthwiseConvolution2dAsymmetricTestImpl(), DepthwiseConvolution2dDepthMul1TestImpl(), DepthwiseConvolution2dTestImpl(), armnn::DetectionPostProcess(), RefLstmWorkload::Execute(), RefQLstmWorkload::Execute(), RefComparisonWorkload::Execute(), RefElementwiseUnaryWorkload::Execute(), RefElementwiseWorkload< Functor, ParentDescriptor, DebugString >::Execute(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), GetBias(), armnn::GetBiasDataType(), armnnUtils::GetPerAxisParams(), Deserializer::GetQLstmDescriptor(), RefTensorHandle::GetShape(), SampleTensorHandle::GetShape(), ConstCpuTensorHandle::GetShape(), GetTensorShapeAsArray(), armnn::GetUnpaddedTensorStrides(), armnn::InstanceNorm(), armnn::IsFloorSupported(), ClLayerSupport::IsResizeBilinearSupported(), NeonLayerSupport::IsResizeBilinearSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), armnn::LogSoftmax(), main(), MakeTensor(), armnn::Mean(), armnnTfParser::OutputShapeOfExpandDims(), Deserializer::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), armnnTfParser::OutputShapeOfSqueeze(), armnn::Pad(), CaffeParserBase::ParseBatchNormLayer(), CaffeParserBase::ParseConcatLayer(), CaffeParserBase::ParseInnerProductLayer(), CaffeParserBase::ParsePoolingLayer(), CaffeParserBase::ParseScaleLayer(), armnnUtils::Permuted(), PermuteInputsForConcat(), armnn::PermuteTensor(), PermuteTensorNchwToNhwc(), PermuteTensorNhwcToNchw(), armnn::Pooling2d(), RefDepthwiseConvolution2dWorkload::PostAllocationConfigure(), RefConvolution2dWorkload::PostAllocationConfigure(), RefFullyConnectedWorkload::PostAllocationConfigure(), RefTransposeConvolution2dWorkload::PostAllocationConfigure(), armnn::PreluImpl(), armnnUtils::ProcessConcatInputTensorInfo(), RefConvolution2dWorkload::RefConvolution2dWorkload(), RefDepthwiseConvolution2dWorkload::RefDepthwiseConvolution2dWorkload(), RefFullyConnectedWorkload::RefFullyConnectedWorkload(), armnn::ReshapeWeightsForAcl(), armnn::Resize(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Convolution2dLayer::SerializeLayerParameters(), Graph::SerializeToDot(), ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible(), ShapesAreSameRank::ShapesAreSameRank(), SimpleConvolution2dTestImpl(), armnn::Slice(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::SpaceToDepth(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::StridedSlice(), armnnCaffeParser::TensorDescToBlobShape(), TfLiteParser::TfLiteParser(), TfParser::TfParser(), TransposeConvolution2dEndToEnd(), armnnUtils::TransposeTensorShape(), ArgMinMaxQueueDescriptor::Validate(), PermuteQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), DetectionPostProcessQueueDescriptor::Validate(), ResizeBilinearQueueDescriptor::Validate(), ResizeQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), SpaceToDepthQueueDescriptor::Validate(), TransposeQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), DepthToSpaceQueueDescriptor::Validate(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), armnn::VerifyTensorInfoDataType(), and SerializerVisitor::VisitQuantizedLstmLayer().

187 { return m_Shape; }

◆ GetShape() [2/2]

TensorShape& GetShape ( )
inline

Definition at line 188 of file Tensor.hpp.

188 { return m_Shape; }

◆ HasMultipleQuantizationScales()

bool HasMultipleQuantizationScales ( ) const
inline

◆ HasPerAxisQuantization()

bool HasPerAxisQuantization ( ) const

◆ IsQuantized()

bool IsQuantized ( ) const

Definition at line 496 of file Tensor.cpp.

References armnn::IsQuantizedType().

Referenced by armnn::GetBiasDataType(), TensorInfo::IsTypeSpaceMatch(), and TypeNotPerAxisQuantized::TypeNotPerAxisQuantized().

497 {
498  return IsQuantizedType(m_DataType);
499 }
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:236

◆ IsTypeSpaceMatch()

bool IsTypeSpaceMatch ( const TensorInfo other) const

Check that the types are the same and, if quantize, that the quantization parameters are the same.

Definition at line 424 of file Tensor.cpp.

References TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::HasMultipleQuantizationScales(), and TensorInfo::IsQuantized().

Referenced by ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::GetBiasDataType(), ClLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConcatSupported(), ClLayerSupport::IsSplitterSupported(), and NeonLayerSupport::IsSplitterSupported().

425 {
426  bool match = true;
427 
428  match &= m_DataType == other.m_DataType;
429 
431  {
432  match &= GetQuantizationScale() == other.GetQuantizationScale() &&
433  GetQuantizationOffset() == other.GetQuantizationOffset();
434  }
435  return match;
436 }
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:197
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:470
float GetQuantizationScale() const
Definition: Tensor.cpp:453
bool IsQuantized() const
Definition: Tensor.cpp:496

◆ operator!=()

bool operator!= ( const TensorInfo other) const

Definition at line 414 of file Tensor.cpp.

415 {
416  return !(*this == other);
417 }

◆ operator=()

TensorInfo & operator= ( const TensorInfo other)

Definition at line 399 of file Tensor.cpp.

400 {
401  m_Shape = other.m_Shape;
402  m_DataType = other.m_DataType;
403  m_Quantization = other.m_Quantization;
404  return *this;
405 }

◆ operator==()

bool operator== ( const TensorInfo other) const

Definition at line 407 of file Tensor.cpp.

408 {
409  return ((m_Shape == other.m_Shape) &&
410  (m_DataType == other.m_DataType) &&
411  (m_Quantization == other.m_Quantization));
412 }

◆ SetDataType()

◆ SetQuantizationDim()

void SetQuantizationDim ( const Optional< unsigned int > &  quantizationDim)

Definition at line 491 of file Tensor.cpp.

Referenced by armnnUtils::Permuted(), and TensorInfo::TensorInfo().

492 {
493  m_Quantization.m_QuantizationDim = quantizationDim;
494 }

◆ SetQuantizationOffset()

◆ SetQuantizationScale()

◆ SetQuantizationScales()

void SetQuantizationScales ( const std::vector< float > &  scales)

Definition at line 448 of file Tensor.cpp.

Referenced by BOOST_AUTO_TEST_CASE(), and TensorInfo::TensorInfo().

449 {
450  m_Quantization.m_Scales = scales;
451 }

◆ SetShape()


The documentation for this class was generated from the following files: