ArmNN
 20.05
TensorInfo Class Reference

#include <Tensor.hpp>

Public Member Functions

 TensorInfo ()
 Empty (invalid) constructor. More...
 
 TensorInfo (const TensorShape &shape, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0)
 
 TensorInfo (const TensorShape &shape, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim)
 
 TensorInfo (const TensorInfo &other)
 
TensorInfooperator= (const TensorInfo &other)
 
bool operator== (const TensorInfo &other) const
 
bool operator!= (const TensorInfo &other) const
 
const TensorShapeGetShape () const
 
TensorShapeGetShape ()
 
void SetShape (const TensorShape &newShape)
 
unsigned int GetNumDimensions () const
 
unsigned int GetNumElements () const
 
DataType GetDataType () const
 
void SetDataType (DataType type)
 
bool HasMultipleQuantizationScales () const
 
bool HasPerAxisQuantization () const
 
std::vector< float > GetQuantizationScales () const
 
void SetQuantizationScales (const std::vector< float > &scales)
 
float GetQuantizationScale () const
 
void SetQuantizationScale (float scale)
 
int32_t GetQuantizationOffset () const
 
void SetQuantizationOffset (int32_t offset)
 
Optional< unsigned int > GetQuantizationDim () const
 
void SetQuantizationDim (const Optional< unsigned int > &quantizationDim)
 
bool IsQuantized () const
 
bool IsTypeSpaceMatch (const TensorInfo &other) const
 Check that the types are the same and, if quantize, that the quantization parameters are the same. More...
 
unsigned int GetNumBytes () const
 

Detailed Description

Definition at line 53 of file Tensor.hpp.

Constructor & Destructor Documentation

◆ TensorInfo() [1/6]

Empty (invalid) constructor.

Definition at line 137 of file Tensor.cpp.

138 : m_DataType(DataType::Float32)
139 {
140 }

◆ TensorInfo() [2/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
float  quantizationScale = 0.0f,
int32_t  quantizationOffset = 0 
)

Definition at line 142 of file Tensor.cpp.

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

146  : m_Shape(shape)
147  , m_DataType(dataType)
148 {
149  SetQuantizationScale(quantizationScale);
150  SetQuantizationOffset(quantizationOffset);
151 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:276

◆ TensorInfo() [3/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
float  quantizationScale = 0.0f,
int32_t  quantizationOffset = 0 
)

Definition at line 153 of file Tensor.cpp.

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

158  : m_Shape(numDimensions, dimensionSizes)
159  , m_DataType(dataType)
160 {
161  SetQuantizationScale(quantizationScale);
162  SetQuantizationOffset(quantizationOffset);
163 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:276

◆ TensorInfo() [4/6]

TensorInfo ( const TensorShape shape,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim 
)

Definition at line 165 of file Tensor.cpp.

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

169  : m_Shape(shape)
170  , m_DataType(dataType)
171 {
172  SetQuantizationScales(quantizationScales);
173  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
174 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:286
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:243

◆ TensorInfo() [5/6]

TensorInfo ( unsigned int  numDimensions,
const unsigned int *  dimensionSizes,
DataType  dataType,
const std::vector< float > &  quantizationScales,
unsigned int  quantizationDim 
)

Definition at line 176 of file Tensor.cpp.

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

181  : m_Shape(numDimensions, dimensionSizes)
182  , m_DataType(dataType)
183 {
184  SetQuantizationScales(quantizationScales);
185  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
186 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:286
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:243

◆ TensorInfo() [6/6]

TensorInfo ( const TensorInfo other)

Definition at line 188 of file Tensor.cpp.

189 : m_Shape(other.m_Shape)
190 , m_DataType(other.m_DataType)
191 , m_Quantization(other.m_Quantization)
192 {}

Member Function Documentation

◆ GetDataType()

DataType GetDataType ( ) const
inline

Definition at line 95 of file Tensor.hpp.

Referenced by BiasAndWeightsTypesCompatible::BiasAndWeightsTypesCompatible(), BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch(), armnn::BOOST_AUTO_TEST_CASE(), BOOST_AUTO_TEST_CASE(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::CheckScaleSetOnQuantizedType(), armnn::CheckTensorDataTypesEqual(), armnn::ClConvertFp16ToFp32WorkloadValidate(), armnn::ClConvertFp32ToFp16WorkloadValidate(), armnn::ConvertBf16ToFp32Weight(), armnn::optimizations::ConvertWeight(), armnn::CreateQuantizedConst(), RefDepthToSpaceWorkload::Execute(), RefStridedSliceWorkload::Execute(), RefSliceWorkload::Execute(), RefLstmWorkload::Execute(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::GetBiasDataType(), TfLiteParser::GetBuffer(), Layer::GetDataType(), armnnUtils::GetPerAxisParams(), armnn::GetUnpaddedTensorStrides(), armnn::InitializeArmComputeTensorData(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), SampleDynamicLayerSupport::IsAdditionSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsFloorSupported(), armnn::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), ClLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQLstmSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), main(), armnn::MakeDecoder(), armnn::MakeEncoder(), armnnTfParser::OutputShapeOfExpandDims(), TfLiteParser::OutputShapeOfReshape(), armnnTfParser::OutputShapeOfSqueeze(), armnn::PermuteTensor(), ConvertFp32NetworkToFp16Impl::Run(), TypeAnyOf::TypeAnyOf(), TypeIs::TypeIs(), ArgMinMaxQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), Convolution2dQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), QuantizeQueueDescriptor::Validate(), EqualQueueDescriptor::Validate(), ConvertBf16ToFp32QueueDescriptor::Validate(), ConvertFp32ToBf16QueueDescriptor::Validate(), ConvertFp16ToFp32QueueDescriptor::Validate(), ConvertFp32ToFp16QueueDescriptor::Validate(), GreaterQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), DequantizeQueueDescriptor::Validate(), TransposeConvolution2dQueueDescriptor::Validate(), ComparisonQueueDescriptor::Validate(), armnn::VerifyTensorInfoDataType(), QuantizerVisitor::VisitInputLayer(), QuantizerVisitor::VisitOutputLayer(), and SerializerVisitor::VisitQuantizedLstmLayer().

95 { return m_DataType; }

◆ GetNumBytes()

◆ GetNumDimensions()

unsigned int GetNumDimensions ( ) const
inline

Definition at line 92 of file Tensor.hpp.

Referenced by armnn::ArgMinMax(), BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnn::boost_test_print_type(), armnnTfParser::CalculatePaddedOutputTensorInfo(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnnTfParser::CheckPaddingTensor(), armnn::ComputeSoftmaxAclAxis(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), TfLiteParser::CreateNetworkFromBinary(), armnn::Debug(), armnn::Gather(), armnn::GetBiasDataType(), GetTensorShapeAsArray(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsMeanSupported(), armnn::LogSoftmax(), MakeTensor(), armnn::Mean(), armnnTfParser::OutputShapeOfExpandDims(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), armnnTfParser::OutputShapeOfSqueeze(), CaffeParserBase::ParseConcatLayer(), CaffeParserBase::ParseInnerProductLayer(), RefFullyConnectedWorkload::PostAllocationConfigure(), armnnUtils::ProcessConcatInputTensorInfo(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), ParserFlatbuffersFixture::RunTest(), armnn::Softmax(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnnCaffeParser::TensorDescToBlobShape(), TensorNumDimensionsAreCorrect::TensorNumDimensionsAreCorrect(), TfLiteParser::TfLiteParser(), TfParser::TfParser(), FullyConnectedQueueDescriptor::Validate(), MeanQueueDescriptor::Validate(), PadQueueDescriptor::Validate(), InstanceNormalizationQueueDescriptor::Validate(), L2NormalizationQueueDescriptor::Validate(), StridedSliceQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), MeanLayer::ValidateTensorShapesFromInputs(), and GatherLayer::ValidateTensorShapesFromInputs().

92 { return m_Shape.GetNumDimensions(); }
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43

◆ GetNumElements()

unsigned int GetNumElements ( ) const
inline

Definition at line 93 of file Tensor.hpp.

Referenced by armnn::Activation(), BOOST_FIXTURE_TEST_CASE(), armnnTfLiteParser::ComputeWrappedIndex(), Concat2dTestImpl(), Concat3dTestImpl(), Concat4dTestImpl(), armnn::ConvertBf16ToFp32Weight(), armnnTfParser::ConvertTfTensorDataType(), armnn::optimizations::ConvertWeight(), TfLiteParser::CreateNetworkFromBinary(), armnn::CreateQuantizedConst(), armnn::Debug(), armnn::Dequantize(), armnn::DetectionPostProcess(), RefConvertBf16ToFp32Workload::Execute(), RefConvertFp16ToFp32Workload::Execute(), RefConvertFp32ToBf16Workload::Execute(), RefFakeQuantizationFloat32Workload::Execute(), RefFloorWorkload::Execute(), SampleDynamicAdditionWorkload::Execute(), RefConvertFp32ToFp16Workload::Execute(), RefStackWorkload::Execute(), RefDebugWorkload< DataType >::Execute(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), armnn::GetBiasDataType(), TensorInfo::GetNumBytes(), MakeRandomTensor(), MakeTensor(), OnnxParser::OnnxParser(), Deserializer::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), armnn::Pad(), PermuteOutputForConcat(), PermuteTensorData(), armnn::Quantize(), QuantizeData(), ShapesAreSameTotalSize::ShapesAreSameTotalSize(), armnn::Splitter(), TfParser::TfParser(), armnnDeserializer::ToConstTensor(), and SpaceToBatchNdQueueDescriptor::Validate().

93 { return m_Shape.GetNumElements(); }
unsigned int GetNumElements() const
Definition: Tensor.cpp:107

◆ GetQuantizationDim()

Optional< unsigned int > GetQuantizationDim ( ) const

Definition at line 281 of file Tensor.cpp.

Referenced by armnn::GetBiasDataType(), armnnUtils::GetPerAxisParams(), and SerializerVisitor::VisitQuantizedLstmLayer().

282 {
283  return m_Quantization.m_QuantizationDim;
284 }

◆ GetQuantizationOffset()

◆ GetQuantizationScale()

◆ GetQuantizationScales()

std::vector< float > GetQuantizationScales ( ) const

Definition at line 238 of file Tensor.cpp.

Referenced by armnn::GetBiasDataType(), armnnUtils::GetPerAxisParams(), and SerializerVisitor::VisitQuantizedLstmLayer().

239 {
240  return m_Quantization.m_Scales;
241 }

◆ GetShape() [1/2]

const TensorShape& GetShape ( ) const
inline

Definition at line 88 of file Tensor.hpp.

Referenced by armnn::ArgMinMax(), armnn::BatchNormImpl(), armnn::BatchToSpaceNd(), BOOST_AUTO_TEST_CASE(), armnn::BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnn::boost_test_print_type(), armnnTfParser::CalculatePaddedOutputTensorInfo(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), Concatenate(), armnn::ConvertBf16ToFp32Weight(), armnnTfParser::ConvertTfTensorDataType(), armnn::optimizations::ConvertWeight(), armnn::ConvertWeightTensorFromArmnnToAcl(), armnn::CreateAclNormalizationLayerInfoForL2Normalization(), TfLiteParser::CreateNetworkFromBinary(), OnnxParser::CreateNetworkFromString(), armnn::CreateQuantizedConst(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::Debug(), armnn::DepthToSpace(), DepthwiseConvolution2dAsymmetricTestImpl(), DepthwiseConvolution2dDepthMul1TestImpl(), DepthwiseConvolution2dTestImpl(), armnn::DetectionPostProcess(), RefLstmWorkload::Execute(), RefQLstmWorkload::Execute(), RefComparisonWorkload::Execute(), RefElementwiseUnaryWorkload::Execute(), RefElementwiseWorkload< Functor, ParentDescriptor, DebugString >::Execute(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), GetBias(), armnn::GetBiasDataType(), armnnUtils::GetPerAxisParams(), Deserializer::GetQLstmDescriptor(), RefTensorHandle::GetShape(), SampleTensorHandle::GetShape(), ConstCpuTensorHandle::GetShape(), GetTensorShapeAsArray(), armnn::GetUnpaddedTensorStrides(), armnn::InstanceNorm(), armnn::IsFloorSupported(), ClLayerSupport::IsResizeBilinearSupported(), NeonLayerSupport::IsResizeBilinearSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), armnn::LogSoftmax(), main(), MakeTensor(), armnn::Mean(), armnnTfParser::OutputShapeOfExpandDims(), Deserializer::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), armnnTfParser::OutputShapeOfSqueeze(), armnn::Pad(), CaffeParserBase::ParseBatchNormLayer(), CaffeParserBase::ParseConcatLayer(), CaffeParserBase::ParseInnerProductLayer(), CaffeParserBase::ParsePoolingLayer(), CaffeParserBase::ParseScaleLayer(), armnnUtils::Permuted(), PermuteInputsForConcat(), armnn::PermuteTensor(), PermuteTensorNchwToNhwc(), PermuteTensorNhwcToNchw(), armnn::Pooling2d(), RefDepthwiseConvolution2dWorkload::PostAllocationConfigure(), RefConvolution2dWorkload::PostAllocationConfigure(), RefFullyConnectedWorkload::PostAllocationConfigure(), RefTransposeConvolution2dWorkload::PostAllocationConfigure(), armnn::PreluImpl(), armnnUtils::ProcessConcatInputTensorInfo(), RefConvolution2dWorkload::RefConvolution2dWorkload(), RefDepthwiseConvolution2dWorkload::RefDepthwiseConvolution2dWorkload(), RefFullyConnectedWorkload::RefFullyConnectedWorkload(), armnn::ReshapeWeightsForAcl(), armnn::Resize(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Convolution2dLayer::SerializeLayerParameters(), Graph::SerializeToDot(), ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible(), ShapesAreSameRank::ShapesAreSameRank(), SimpleConvolution2dTestImpl(), armnn::Slice(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::SpaceToDepth(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::StridedSlice(), armnnCaffeParser::TensorDescToBlobShape(), TfLiteParser::TfLiteParser(), TfParser::TfParser(), TransposeConvolution2dEndToEnd(), armnnUtils::TransposeTensorShape(), ArgMinMaxQueueDescriptor::Validate(), PermuteQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), DetectionPostProcessQueueDescriptor::Validate(), ResizeBilinearQueueDescriptor::Validate(), ResizeQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), SpaceToDepthQueueDescriptor::Validate(), TransposeQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), DepthToSpaceQueueDescriptor::Validate(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), armnn::VerifyTensorInfoDataType(), and SerializerVisitor::VisitQuantizedLstmLayer().

88 { return m_Shape; }

◆ GetShape() [2/2]

TensorShape& GetShape ( )
inline

Definition at line 89 of file Tensor.hpp.

89 { return m_Shape; }

◆ HasMultipleQuantizationScales()

bool HasMultipleQuantizationScales ( ) const
inline

◆ HasPerAxisQuantization()

bool HasPerAxisQuantization ( ) const

◆ IsQuantized()

bool IsQuantized ( ) const

Definition at line 291 of file Tensor.cpp.

References armnn::IsQuantizedType().

Referenced by armnn::GetBiasDataType(), TensorInfo::IsTypeSpaceMatch(), and TypeNotPerAxisQuantized::TypeNotPerAxisQuantized().

292 {
293  return IsQuantizedType(m_DataType);
294 }
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:236

◆ IsTypeSpaceMatch()

bool IsTypeSpaceMatch ( const TensorInfo other) const

Check that the types are the same and, if quantize, that the quantization parameters are the same.

Definition at line 219 of file Tensor.cpp.

References TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::HasMultipleQuantizationScales(), and TensorInfo::IsQuantized().

Referenced by ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::GetBiasDataType(), ClLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConcatSupported(), ClLayerSupport::IsSplitterSupported(), and NeonLayerSupport::IsSplitterSupported().

220 {
221  bool match = true;
222 
223  match &= m_DataType == other.m_DataType;
224 
226  {
227  match &= GetQuantizationScale() == other.GetQuantizationScale() &&
228  GetQuantizationOffset() == other.GetQuantizationOffset();
229  }
230  return match;
231 }
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:98
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:265
float GetQuantizationScale() const
Definition: Tensor.cpp:248
bool IsQuantized() const
Definition: Tensor.cpp:291

◆ operator!=()

bool operator!= ( const TensorInfo other) const

Definition at line 209 of file Tensor.cpp.

210 {
211  return !(*this == other);
212 }

◆ operator=()

TensorInfo & operator= ( const TensorInfo other)

Definition at line 194 of file Tensor.cpp.

195 {
196  m_Shape = other.m_Shape;
197  m_DataType = other.m_DataType;
198  m_Quantization = other.m_Quantization;
199  return *this;
200 }

◆ operator==()

bool operator== ( const TensorInfo other) const

Definition at line 202 of file Tensor.cpp.

203 {
204  return ((m_Shape == other.m_Shape) &&
205  (m_DataType == other.m_DataType) &&
206  (m_Quantization == other.m_Quantization));
207 }

◆ SetDataType()

◆ SetQuantizationDim()

void SetQuantizationDim ( const Optional< unsigned int > &  quantizationDim)

Definition at line 286 of file Tensor.cpp.

Referenced by TensorInfo::TensorInfo().

287 {
288  m_Quantization.m_QuantizationDim = quantizationDim;
289 }

◆ SetQuantizationOffset()

◆ SetQuantizationScale()

◆ SetQuantizationScales()

void SetQuantizationScales ( const std::vector< float > &  scales)

Definition at line 243 of file Tensor.cpp.

Referenced by TensorInfo::TensorInfo().

244 {
245  m_Quantization.m_Scales = scales;
246 }

◆ SetShape()


The documentation for this class was generated from the following files: