From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- Documentation/classarmnn_1_1_tensor_info.xhtml | 1004 ------------------------ 1 file changed, 1004 deletions(-) delete mode 100644 Documentation/classarmnn_1_1_tensor_info.xhtml (limited to 'Documentation/classarmnn_1_1_tensor_info.xhtml') diff --git a/Documentation/classarmnn_1_1_tensor_info.xhtml b/Documentation/classarmnn_1_1_tensor_info.xhtml deleted file mode 100644 index 69222379b5..0000000000 --- a/Documentation/classarmnn_1_1_tensor_info.xhtml +++ /dev/null @@ -1,1004 +0,0 @@ - - - - - - - - - - - - - -ArmNN: TensorInfo Class Reference - - - - - - - - - - - - - - - - -
-
- - - - ArmNN - - - -
-
-  20.02 -
-
-
- - - - - - - -
-
- -
-
-
- -
- -
-
- - -
- -
- -
- -
-
TensorInfo Class Reference
-
-
- -

#include <Tensor.hpp>

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

-Public Member Functions

 TensorInfo ()
 Empty (invalid) constructor. More...
 
 TensorInfo (const TensorShape &shape, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0)
 
 TensorInfo (const TensorShape &shape, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim)
 
 TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim)
 
 TensorInfo (const TensorInfo &other)
 
TensorInfooperator= (const TensorInfo &other)
 
bool operator== (const TensorInfo &other) const
 
bool operator!= (const TensorInfo &other) const
 
const TensorShapeGetShape () const
 
TensorShapeGetShape ()
 
void SetShape (const TensorShape &newShape)
 
unsigned int GetNumDimensions () const
 
unsigned int GetNumElements () const
 
DataType GetDataType () const
 
void SetDataType (DataType type)
 
bool HasMultipleQuantizationScales () const
 
bool HasPerAxisQuantization () const
 
std::vector< float > GetQuantizationScales () const
 
void SetQuantizationScales (const std::vector< float > &scales)
 
float GetQuantizationScale () const
 
void SetQuantizationScale (float scale)
 
int32_t GetQuantizationOffset () const
 
void SetQuantizationOffset (int32_t offset)
 
Optional< unsigned int > GetQuantizationDim () const
 
void SetQuantizationDim (const Optional< unsigned int > &quantizationDim)
 
bool IsQuantized () const
 
bool IsTypeSpaceMatch (const TensorInfo &other) const
 Check that the types are the same and, if quantize, that the quantization parameters are the same. More...
 
unsigned int GetNumBytes () const
 
-

Detailed Description

-
-

Definition at line 53 of file Tensor.hpp.

-

Constructor & Destructor Documentation

- -

◆ TensorInfo() [1/6]

- -
-
- - - - - - - -
TensorInfo ()
-
- -

Empty (invalid) constructor.

- -

Definition at line 136 of file Tensor.cpp.

-
137 : m_DataType(DataType::Float32)
138 {
139 }
-
-
-
- -

◆ TensorInfo() [2/6]

- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TensorInfo (const TensorShapeshape,
DataType dataType,
float quantizationScale = 0.0f,
int32_t quantizationOffset = 0 
)
-
- -

Definition at line 141 of file Tensor.cpp.

- -

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

-
145  : m_Shape(shape)
146  , m_DataType(dataType)
147 {
148  SetQuantizationScale(quantizationScale);
149  SetQuantizationOffset(quantizationOffset);
150 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
-
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
-
-
-
- -

◆ TensorInfo() [3/6]

- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TensorInfo (unsigned int numDimensions,
const unsigned int * dimensionSizes,
DataType dataType,
float quantizationScale = 0.0f,
int32_t quantizationOffset = 0 
)
-
- -

Definition at line 152 of file Tensor.cpp.

- -

References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

-
157  : m_Shape(numDimensions, dimensionSizes)
158  , m_DataType(dataType)
159 {
160  SetQuantizationScale(quantizationScale);
161  SetQuantizationOffset(quantizationOffset);
162 }
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
-
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
-
-
-
- -

◆ TensorInfo() [4/6]

- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TensorInfo (const TensorShapeshape,
DataType dataType,
const std::vector< float > & quantizationScales,
unsigned int quantizationDim 
)
-
- -

Definition at line 164 of file Tensor.cpp.

- -

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

-
168  : m_Shape(shape)
169  , m_DataType(dataType)
170 {
171  SetQuantizationScales(quantizationScales);
172  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
173 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:285
-
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:242
-
-
-
- -

◆ TensorInfo() [5/6]

- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TensorInfo (unsigned int numDimensions,
const unsigned int * dimensionSizes,
DataType dataType,
const std::vector< float > & quantizationScales,
unsigned int quantizationDim 
)
-
- -

Definition at line 175 of file Tensor.cpp.

- -

References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().

-
180  : m_Shape(numDimensions, dimensionSizes)
181  , m_DataType(dataType)
182 {
183  SetQuantizationScales(quantizationScales);
184  SetQuantizationDim(MakeOptional<unsigned int>(quantizationDim));
185 }
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:285
-
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:242
-
-
-
- -

◆ TensorInfo() [6/6]

- -
-
- - - - - - - - -
TensorInfo (const TensorInfoother)
-
- -

Definition at line 187 of file Tensor.cpp.

-
188 : m_Shape(other.m_Shape)
189 , m_DataType(other.m_DataType)
190 , m_Quantization(other.m_Quantization)
191 {}
-
-
-

Member Function Documentation

- -

◆ GetDataType()

- -
-
- - - - - -
- - - - - - - -
DataType GetDataType () const
-
-inline
-
- -

Definition at line 95 of file Tensor.hpp.

- -

Referenced by BiasAndWeightsTypesCompatible::BiasAndWeightsTypesCompatible(), BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch(), armnn::BOOST_AUTO_TEST_CASE(), BOOST_AUTO_TEST_CASE(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::CheckScaleSetOnQuantizedType(), armnn::CheckTensorDataTypesEqual(), armnn::ClConvertFp16ToFp32WorkloadValidate(), armnn::ClConvertFp32ToFp16WorkloadValidate(), armnn::CreateQuantizedConst(), RefStridedSliceWorkload::Execute(), RefDepthToSpaceWorkload::Execute(), RefSliceWorkload::Execute(), RefLstmWorkload::Execute(), Float16ToFloat32::Func(), Float32ToFloat16::Func(), armnn::GetBiasDataType(), TfLiteParser::GetBuffer(), Layer::GetDataType(), armnnUtils::GetPerAxisParams(), armnn::GetUnpaddedTensorStrides(), SerializerVisitor::GetVersionTable(), armnn::InitializeArmComputeTensorData(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), SampleDynamicLayerSupport::IsAdditionSupported(), ClLayerSupport::IsConstantSupported(), NeonLayerSupport::IsConstantSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsFloorSupported(), armnn::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), main(), armnn::MakeDecoder(), armnn::MakeEncoder(), armnnTfParser::OutputShapeOfExpandDims(), TfLiteParser::OutputShapeOfReshape(), armnnTfParser::OutputShapeOfSqueeze(), armnn::PermuteTensor(), ConvertFp32NetworkToFp16Impl::Run(), TypeAnyOf::TypeAnyOf(), TypeIs::TypeIs(), ArgMinMaxQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), Convolution2dQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), QuantizeQueueDescriptor::Validate(), EqualQueueDescriptor::Validate(), ConvertFp16ToFp32QueueDescriptor::Validate(), ConvertFp32ToFp16QueueDescriptor::Validate(), GreaterQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), DequantizeQueueDescriptor::Validate(), TransposeConvolution2dQueueDescriptor::Validate(), ComparisonQueueDescriptor::Validate(), armnn::VerifyTensorInfoDataType(), QuantizerVisitor::VisitInputLayer(), QuantizerVisitor::VisitOutputLayer(), and SerializerVisitor::VisitQuantizedLstmLayer().

-
95 { return m_DataType; }
-
-
- -

◆ GetNumBytes()

- - - -

◆ GetNumDimensions()

- -
-
- - - - - -
- - - - - - - -
unsigned int GetNumDimensions () const
-
-inline
-
- -

Definition at line 92 of file Tensor.hpp.

- -

Referenced by armnn::ArgMinMax(), BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnn::boost_test_print_type(), armnnTfParser::CalculatePaddedOutputTensorInfo(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnnTfParser::CheckPaddingTensor(), armnn::ComputeSoftmaxAclAxis(), armnn::Concatenate(), TfLiteParser::CreateNetworkFromBinary(), armnn::Debug(), armnn::Gather(), armnn::GetBiasDataType(), GetTensorShapeAsArray(), RefLayerSupport::IsBatchToSpaceNdSupported(), RefLayerSupport::IsMeanSupported(), armnn::LogSoftmax(), MakeTensor(), armnn::Mean(), armnnTfParser::OutputShapeOfExpandDims(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), armnnTfParser::OutputShapeOfSqueeze(), CaffeParserBase::ParseConcatLayer(), CaffeParserBase::ParseInnerProductLayer(), RefFullyConnectedWorkload::PostAllocationConfigure(), armnnUtils::ProcessConcatInputTensorInfo(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), ParserFlatbuffersFixture::RunTest(), armnn::Softmax(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnnCaffeParser::TensorDescToBlobShape(), TensorNumDimensionsAreCorrect::TensorNumDimensionsAreCorrect(), TfLiteParser::TfLiteParser(), TfParser::TfParser(), FullyConnectedQueueDescriptor::Validate(), MeanQueueDescriptor::Validate(), PadQueueDescriptor::Validate(), InstanceNormalizationQueueDescriptor::Validate(), L2NormalizationQueueDescriptor::Validate(), StridedSliceQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), GatherLayer::ValidateTensorShapesFromInputs(), and MeanLayer::ValidateTensorShapesFromInputs().

-
92 { return m_Shape.GetNumDimensions(); }
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:43
-
-
-
- -

◆ GetNumElements()

- - - -

◆ GetQuantizationDim()

- -
-
- - - - - - - -
Optional< unsigned int > GetQuantizationDim () const
-
- -

Definition at line 280 of file Tensor.cpp.

- -

Referenced by armnn::GetBiasDataType(), and armnnUtils::GetPerAxisParams().

-
281 {
282  return m_Quantization.m_QuantizationDim;
283 }
-
-
- -

◆ GetQuantizationOffset()

- - - -

◆ GetQuantizationScale()

- - - -

◆ GetQuantizationScales()

- -
-
- - - - - - - -
std::vector< float > GetQuantizationScales () const
-
- -

Definition at line 237 of file Tensor.cpp.

- -

Referenced by armnn::GetBiasDataType(), and armnnUtils::GetPerAxisParams().

-
238 {
239  return m_Quantization.m_Scales;
240 }
-
-
- -

◆ GetShape() [1/2]

- -
-
- - - - - -
- - - - - - - -
const TensorShape& GetShape () const
-
-inline
-
- -

Definition at line 88 of file Tensor.hpp.

- -

Referenced by armnn::ArgMinMax(), armnn::BatchNormImpl(), armnn::BatchToSpaceNd(), BOOST_AUTO_TEST_CASE(), armnn::BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnn::boost_test_print_type(), armnnTfParser::CalculatePaddedOutputTensorInfo(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::Concatenate(), Concatenate(), armnnTfParser::ConvertTfTensorDataType(), armnn::ConvertWeightTensorFromArmnnToAcl(), armnn::CreateAclNormalizationLayerInfoForL2Normalization(), TfLiteParser::CreateNetworkFromBinary(), OnnxParser::CreateNetworkFromString(), armnn::CreateQuantizedConst(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::Debug(), armnn::DepthToSpace(), DepthwiseConvolution2dAsymmetricTestImpl(), DepthwiseConvolution2dDepthMul1TestImpl(), DepthwiseConvolution2dTestImpl(), armnn::DetectionPostProcess(), RefLstmWorkload::Execute(), RefComparisonWorkload::Execute(), RefElementwiseUnaryWorkload::Execute(), RefElementwiseWorkload< Functor, ParentDescriptor, DebugString >::Execute(), Float16ToFloat32::Func(), Float32ToFloat16::Func(), armnn::Gather(), GetBias(), armnn::GetBiasDataType(), Deserializer::GetLstmDescriptor(), armnnUtils::GetPerAxisParams(), RefTensorHandle::GetShape(), SampleTensorHandle::GetShape(), ConstCpuTensorHandle::GetShape(), GetTensorShapeAsArray(), armnn::GetUnpaddedTensorStrides(), SerializerVisitor::GetVersionTable(), armnn::InstanceNorm(), armnn::IsFloorSupported(), ClLayerSupport::IsResizeBilinearSupported(), NeonLayerSupport::IsResizeBilinearSupported(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), armnn::LogSoftmax(), main(), MakeTensor(), armnn::Mean(), armnnTfParser::OutputShapeOfExpandDims(), Deserializer::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfReshape(), TfLiteParser::OutputShapeOfSqueeze(), armnnTfParser::OutputShapeOfSqueeze(), armnn::Pad(), CaffeParserBase::ParseBatchNormLayer(), CaffeParserBase::ParseConcatLayer(), CaffeParserBase::ParseInnerProductLayer(), CaffeParserBase::ParsePoolingLayer(), CaffeParserBase::ParseScaleLayer(), armnnUtils::Permuted(), PermuteInputsForConcat(), armnn::PermuteTensor(), PermuteTensorNchwToNhwc(), PermuteTensorNhwcToNchw(), armnn::Pooling2d(), RefDepthwiseConvolution2dWorkload::PostAllocationConfigure(), RefConvolution2dWorkload::PostAllocationConfigure(), RefFullyConnectedWorkload::PostAllocationConfigure(), RefTransposeConvolution2dWorkload::PostAllocationConfigure(), armnn::PreluImpl(), armnnUtils::ProcessConcatInputTensorInfo(), RefConvolution2dWorkload::RefConvolution2dWorkload(), RefDepthwiseConvolution2dWorkload::RefDepthwiseConvolution2dWorkload(), RefFullyConnectedWorkload::RefFullyConnectedWorkload(), armnn::ReshapeWeightsForAcl(), armnn::Resize(), TransposeAsReshapeImpl::Run(), PermuteAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Convolution2dLayer::SerializeLayerParameters(), Graph::SerializeToDot(), ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible(), ShapesAreSameRank::ShapesAreSameRank(), SimpleConvolution2dTestImpl(), armnn::Slice(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::SpaceToDepth(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::StridedSlice(), armnnCaffeParser::TensorDescToBlobShape(), TfLiteParser::TfLiteParser(), TfParser::TfParser(), TransposeConvolution2dEndToEnd(), armnnUtils::TransposeTensorShape(), ArgMinMaxQueueDescriptor::Validate(), PermuteQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), DetectionPostProcessQueueDescriptor::Validate(), ResizeBilinearQueueDescriptor::Validate(), ResizeQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), SpaceToDepthQueueDescriptor::Validate(), TransposeQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), DepthToSpaceQueueDescriptor::Validate(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), armnn::VerifyTensorInfoDataType(), and SerializerVisitor::VisitQuantizedLstmLayer().

-
88 { return m_Shape; }
-
-
- -

◆ GetShape() [2/2]

- -
-
- - - - - -
- - - - - - - -
TensorShape& GetShape ()
-
-inline
-
- -

Definition at line 89 of file Tensor.hpp.

-
89 { return m_Shape; }
-
-
- -

◆ HasMultipleQuantizationScales()

- -
-
- - - - - -
- - - - - - - -
bool HasMultipleQuantizationScales () const
-
-inline
-
- -

Definition at line 98 of file Tensor.hpp.

- -

References TensorShape::operator==(), SetQuantizationOffset(), and SetQuantizationScale().

- -

Referenced by armnn::GetBiasDataType(), TensorInfo::GetQuantizationScale(), TensorInfo::HasPerAxisQuantization(), and TensorInfo::IsTypeSpaceMatch().

-
98 { return m_Quantization.m_Scales.size() > 1; }
-
-
- -

◆ HasPerAxisQuantization()

- -
-
- - - - - - - -
bool HasPerAxisQuantization () const
-
- -

Definition at line 232 of file Tensor.cpp.

- -

References TensorInfo::HasMultipleQuantizationScales().

- -

Referenced by armnn::GetBiasDataType(), armnnUtils::GetPerAxisParams(), armnn::MakeDecoder(), armnn::MakeEncoder(), and TypeNotPerAxisQuantized::TypeNotPerAxisQuantized().

-
233 {
234  return HasMultipleQuantizationScales() || m_Quantization.m_QuantizationDim.has_value();
235 }
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:98
-
-
-
- -

◆ IsQuantized()

- -
-
- - - - - - - -
bool IsQuantized () const
-
- -

Definition at line 290 of file Tensor.cpp.

- -

References armnn::IsQuantizedType().

- -

Referenced by armnn::GetBiasDataType(), TensorInfo::IsTypeSpaceMatch(), and TypeNotPerAxisQuantized::TypeNotPerAxisQuantized().

-
291 {
292  return IsQuantizedType(m_DataType);
293 }
constexpr bool IsQuantizedType()
Definition: TypesUtils.hpp:236
-
-
-
- -

◆ IsTypeSpaceMatch()

- -
-
- - - - - - - - -
bool IsTypeSpaceMatch (const TensorInfoother) const
-
- -

Check that the types are the same and, if quantize, that the quantization parameters are the same.

- -

Definition at line 218 of file Tensor.cpp.

- -

References TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::HasMultipleQuantizationScales(), and TensorInfo::IsQuantized().

- -

Referenced by ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::GetBiasDataType(), ClLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConcatSupported(), ClLayerSupport::IsSplitterSupported(), and NeonLayerSupport::IsSplitterSupported().

-
219 {
220  bool match = true;
221 
222  match &= m_DataType == other.m_DataType;
223 
225  {
226  match &= GetQuantizationScale() == other.GetQuantizationScale() &&
227  GetQuantizationOffset() == other.GetQuantizationOffset();
228  }
229  return match;
230 }
bool HasMultipleQuantizationScales() const
Definition: Tensor.hpp:98
-
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:264
-
float GetQuantizationScale() const
Definition: Tensor.cpp:247
-
bool IsQuantized() const
Definition: Tensor.cpp:290
-
-
-
- -

◆ operator!=()

- -
-
- - - - - - - - -
bool operator!= (const TensorInfoother) const
-
- -

Definition at line 208 of file Tensor.cpp.

-
209 {
210  return !(*this == other);
211 }
-
-
- -

◆ operator=()

- -
-
- - - - - - - - -
TensorInfo & operator= (const TensorInfoother)
-
- -

Definition at line 193 of file Tensor.cpp.

-
194 {
195  m_Shape = other.m_Shape;
196  m_DataType = other.m_DataType;
197  m_Quantization = other.m_Quantization;
198  return *this;
199 }
-
-
- -

◆ operator==()

- -
-
- - - - - - - - -
bool operator== (const TensorInfoother) const
-
- -

Definition at line 201 of file Tensor.cpp.

-
202 {
203  return ((m_Shape == other.m_Shape) &&
204  (m_DataType == other.m_DataType) &&
205  (m_Quantization == other.m_Quantization));
206 }
-
-
- -

◆ SetDataType()

- -
-
- - - - - -
- - - - - - - - -
void SetDataType (DataType type)
-
-inline
-
-
- -

◆ SetQuantizationDim()

- -
-
- - - - - - - - -
void SetQuantizationDim (const Optional< unsigned int > & quantizationDim)
-
- -

Definition at line 285 of file Tensor.cpp.

- -

Referenced by TensorInfo::TensorInfo().

-
286 {
287  m_Quantization.m_QuantizationDim = quantizationDim;
288 }
-
-
- -

◆ SetQuantizationOffset()

- - - -

◆ SetQuantizationScale()

- - - -

◆ SetQuantizationScales()

- -
-
- - - - - - - - -
void SetQuantizationScales (const std::vector< float > & scales)
-
- -

Definition at line 242 of file Tensor.cpp.

- -

Referenced by TensorInfo::TensorInfo().

-
243 {
244  m_Quantization.m_Scales = scales;
245 }
-
-
- -

◆ SetShape()

- - -
The documentation for this class was generated from the following files: -
-
- - - - -- cgit v1.2.1