22.11
|
#include <Tensor.hpp>
Public Member Functions | |
TensorInfo () | |
Empty (invalid) constructor. More... | |
TensorInfo (const TensorShape &shape, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0, bool isConstant=false) | |
TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, float quantizationScale=0.0f, int32_t quantizationOffset=0, bool isConstant=false) | |
TensorInfo (const TensorShape &shape, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim, bool isConstant=false) | |
TensorInfo (unsigned int numDimensions, const unsigned int *dimensionSizes, DataType dataType, const std::vector< float > &quantizationScales, unsigned int quantizationDim, bool isConstant=false) | |
TensorInfo (const TensorInfo &other) | |
TensorInfo & | operator= (const TensorInfo &other) |
bool | operator== (const TensorInfo &other) const |
bool | operator!= (const TensorInfo &other) const |
const TensorShape & | GetShape () const |
TensorShape & | GetShape () |
void | SetShape (const TensorShape &newShape) |
unsigned int | GetNumDimensions () const |
unsigned int | GetNumElements () const |
DataType | GetDataType () const |
void | SetDataType (DataType type) |
bool | HasMultipleQuantizationScales () const |
bool | HasPerAxisQuantization () const |
std::vector< float > | GetQuantizationScales () const |
void | SetQuantizationScales (const std::vector< float > &scales) |
float | GetQuantizationScale () const |
void | SetQuantizationScale (float scale) |
int32_t | GetQuantizationOffset () const |
void | SetQuantizationOffset (int32_t offset) |
Optional< unsigned int > | GetQuantizationDim () const |
void | SetQuantizationDim (const Optional< unsigned int > &quantizationDim) |
bool | IsQuantized () const |
bool | IsConstant () const |
void | SetConstant (const bool IsConstant=true) |
Marks the data corresponding to this tensor info as constant. More... | |
bool | IsTypeSpaceMatch (const TensorInfo &other) const |
Check that the types are the same and, if quantize, that the quantization parameters are the same. More... | |
unsigned int | GetNumBytes () const |
Definition at line 152 of file Tensor.hpp.
TensorInfo | ( | ) |
Empty (invalid) constructor.
Definition at line 341 of file Tensor.cpp.
TensorInfo | ( | const TensorShape & | shape, |
DataType | dataType, | ||
float | quantizationScale = 0.0f , |
||
int32_t | quantizationOffset = 0 , |
||
bool | isConstant = false |
||
) |
Definition at line 346 of file Tensor.cpp.
References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().
TensorInfo | ( | unsigned int | numDimensions, |
const unsigned int * | dimensionSizes, | ||
DataType | dataType, | ||
float | quantizationScale = 0.0f , |
||
int32_t | quantizationOffset = 0 , |
||
bool | isConstant = false |
||
) |
Definition at line 359 of file Tensor.cpp.
References TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().
TensorInfo | ( | const TensorShape & | shape, |
DataType | dataType, | ||
const std::vector< float > & | quantizationScales, | ||
unsigned int | quantizationDim, | ||
bool | isConstant = false |
||
) |
Definition at line 371 of file Tensor.cpp.
References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().
TensorInfo | ( | unsigned int | numDimensions, |
const unsigned int * | dimensionSizes, | ||
DataType | dataType, | ||
const std::vector< float > & | quantizationScales, | ||
unsigned int | quantizationDim, | ||
bool | isConstant = false |
||
) |
Definition at line 384 of file Tensor.cpp.
References TensorInfo::SetQuantizationDim(), and TensorInfo::SetQuantizationScales().
TensorInfo | ( | const TensorInfo & | other | ) |
Definition at line 398 of file Tensor.cpp.
|
inline |
Definition at line 198 of file Tensor.hpp.
Referenced by armnnTfLiteParser::AsFloatArray(), BiasAndWeightsTypesCompatible::BiasAndWeightsTypesCompatible(), BiasAndWeightsTypesMatch::BiasAndWeightsTypesMatch(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), RefTensorHandle::CanBeImported(), TosaRefTensorHandle::CanBeImported(), armnn::CheckScaleSetOnQuantizedType(), armnn::ClConvertFp16ToFp32WorkloadValidate(), armnn::ClConvertFp32ToFp16WorkloadValidate(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::ConvertBf16ToFp32Weight(), Converter::ConvertOperation(), armnn_driver::ConvertToLayerInputHandle(), armnn::optimizations::ConvertWeight(), RefDepthToSpaceWorkload::ExecuteAsync(), RefStridedSliceWorkload::ExecuteAsync(), RefArgMinMaxWorkload::ExecuteAsync(), RefSliceWorkload::ExecuteAsync(), RefShapeWorkload::ExecuteAsync(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::GetBiasDataType(), TfLiteParserImpl::GetBuffer(), Layer::GetDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnn::optimizations::pad_fold::GetLowestElement(), TfLiteParserImpl::GetOutputTensorIds(), armnnUtils::GetPerAxisParams(), armnn::GetUnpaddedTensorStrides(), armnn::InitializeArmComputeTensorData(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), SampleDynamicLayerSupport::IsAdditionSupported(), RefLayerSupport::IsConvertFp16ToFp32Supported(), RefLayerSupport::IsConvertFp32ToFp16Supported(), RefLayerSupport::IsConvolution2dSupported(), RefLayerSupport::IsConvolution3dSupported(), RefLayerSupport::IsDepthwiseConvolutionSupported(), NeonLayerSupport::IsFloorSupported(), RefLayerSupport::IsFullyConnectedSupported(), ClLayerSupport::IsQLstmSupported(), NeonLayerSupport::IsQLstmSupported(), RefLayerSupport::IsTransposeConvolution2dSupported(), NeonLayerSupport::IsUnidirectionalSequenceLstmSupported(), armnn::LstmImpl(), armnn::MakeDecoder(), armnn::MakeEncoder(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::PermuteTensor(), ConvertFp32NetworkToFp16Impl::Run(), ConvertConstPermuteLayersToConstLayers::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), armnn_driver::SwizzleAndroidNn4dTensorToArmNn(), TypeAnyOf::TypeAnyOf(), TypeIs::TypeIs(), ArgMinMaxQueueDescriptor::Validate(), FullyConnectedQueueDescriptor::Validate(), Convolution2dQueueDescriptor::Validate(), Convolution3dQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), QuantizeQueueDescriptor::Validate(), EqualQueueDescriptor::Validate(), ConvertBf16ToFp32QueueDescriptor::Validate(), ConvertFp32ToBf16QueueDescriptor::Validate(), ConvertFp16ToFp32QueueDescriptor::Validate(), ConvertFp32ToFp16QueueDescriptor::Validate(), GreaterQueueDescriptor::Validate(), GatherNdQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), TransposeConvolution2dQueueDescriptor::Validate(), ComparisonQueueDescriptor::Validate(), LogicalBinaryQueueDescriptor::Validate(), and armnn::VerifyTensorInfoDataType().
unsigned int GetNumBytes | ( | ) | const |
Definition at line 427 of file Tensor.cpp.
References armnn::GetDataTypeSize(), and TensorInfo::GetNumElements().
Referenced by RefTensorHandle::Allocate(), TosaRefTensorHandle::Allocate(), SampleTensorHandle::Allocate(), ScopedTensorHandle::Allocate(), ArmnnPreparedModel::ArmnnPreparedModel(), armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), ConstTensor::ConstTensor(), ConstTensorPin::ConstTensorPin(), armnnOnnxParser::CreateConstTensorImpl(), RefReshapeWorkload::ExecuteAsync(), ArmnnPreparedModel::ExecuteWithDummyInputs(), IDeserializer::DeserializerImpl::GetNetworkOutputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), SampleTensorHandle::Import(), SampleTensorHandle::Manage(), RefTensorHandle::Manage(), TosaRefTensorHandle::Manage(), RefTensorHandle::Map(), TosaRefTensorHandle::Map(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::PermuteTensor(), armnn::ReorderWeightChannelsForAcl(), and ScopedTensorHandle::ScopedTensorHandle().
|
inline |
Definition at line 195 of file Tensor.hpp.
Referenced by armnn::ArgMinMax(), armnnTfLiteParser::AsFloatArray(), BatchMatMul::BatchMatMul(), armnn::CalculateGatherNdKeyIndices(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::ComputeAclAxis(), armnn::ComputePositiveAxis(), armnn::ComputeReductionTensorShape(), armnn::ComputeSoftmaxAclAxis(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), Converter::ConvertOperation(), armnn_driver::ConvertReduce(), RefShapeWorkload::ExecuteAsync(), RefRankWorkload::ExecuteAsync(), armnn::Gather(), armnn::GetBiasDataType(), armnn::GetNumActivations(), RefLayerSupport::IsBatchToSpaceNdSupported(), armnn_driver::IsDynamicTensor(), RefLayerSupport::IsMeanSupported(), armnn::LogSoftmax(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::PrintOutput(), armnnUtils::ProcessConcatInputTensorInfo(), armnn::Reduce(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), AddBroadcastReshapeLayerImpl::Run(), armnn::Softmax(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn_driver::SwizzleAndroidNn4dTensorToArmNn(), TensorNumDimensionsAreCorrect::TensorNumDimensionsAreCorrect(), TensorNumDimensionsAreGreaterOrEqualTo::TensorNumDimensionsAreGreaterOrEqualTo(), FullyConnectedQueueDescriptor::Validate(), MeanQueueDescriptor::Validate(), PadQueueDescriptor::Validate(), InstanceNormalizationQueueDescriptor::Validate(), L2NormalizationQueueDescriptor::Validate(), StridedSliceQueueDescriptor::Validate(), GatherNdQueueDescriptor::Validate(), GatherQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), BatchMatMulQueueDescriptor::Validate(), QueueDescriptor::ValidateTensorNumDimensions(), FillLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), and MeanLayer::ValidateTensorShapesFromInputs().
|
inline |
Definition at line 196 of file Tensor.hpp.
Referenced by armnn::Activation(), armnnTfLiteParser::AsFloatArray(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::ConvertBf16ToFp32Weight(), armnn::optimizations::ConvertWeight(), armnnOnnxParser::CreateConstTensorImpl(), armnn::Dequantize(), armnn::DetectionPostProcess(), SampleDynamicAdditionWorkload::Execute(), RefGatherNdWorkload::ExecuteAsync(), RefConvertFp32ToBf16Workload::ExecuteAsync(), RefConvertFp32ToFp16Workload::ExecuteAsync(), RefFakeQuantizationFloat32Workload::ExecuteAsync(), RefFloorWorkload::ExecuteAsync(), RefConvertBf16ToFp32Workload::ExecuteAsync(), RefConvertFp16ToFp32Workload::ExecuteAsync(), RefCastWorkload::ExecuteAsync(), RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(), RefDebugWorkload< DataType >::ExecuteAsync(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), armnn::GetBiasDataType(), TensorInfo::GetNumBytes(), armnn::MirrorPad(), IDeserializer::DeserializerImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::Pad(), armnn::PrintOutput(), armnn::Quantize(), armnn::Reduce(), FuseConvertFp32ToBf16IntoConstLayers::Run(), ConvertConstPermuteLayersToConstLayers::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), ShapesAreSameTotalSize::ShapesAreSameTotalSize(), armnn::Splitter(), armnn::Stack(), armnnDeserializer::ToConstTensor(), and SpaceToBatchNdQueueDescriptor::Validate().
Optional< unsigned int > GetQuantizationDim | ( | ) | const |
Definition at line 494 of file Tensor.cpp.
Referenced by armnnTfLiteParser::AsFloatArray(), Converter::ConvertOperation(), armnn::GetBiasDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnnUtils::GetPerAxisParams(), and armnnUtils::Permuted().
int32_t GetQuantizationOffset | ( | ) | const |
Definition at line 478 of file Tensor.cpp.
Referenced by armnnTfLiteParser::AsFloatArray(), armnn::CheckScaleSetOnQuantizedType(), Converter::ConvertOperation(), armnn::Dequantize(), armnn::GetBiasDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnn::optimizations::pad_fold::GetLowestElement(), armnn::optimizations::pad_fold::GetZeroElement(), TensorInfo::IsTypeSpaceMatch(), armnn::MakeDecoder(), armnn::MakeEncoder(), TfLiteParserImpl::OutputShapeOfReshape(), QuantizationParametersAreEqual::QuantizationParametersAreEqual(), armnn::Quantize(), and ConvertConstDequantisationLayersToConstLayersImpl::Run().
float GetQuantizationScale | ( | ) | const |
Definition at line 461 of file Tensor.cpp.
References ARMNN_ASSERT, and TensorInfo::HasMultipleQuantizationScales().
Referenced by armnnTfLiteParser::AsFloatArray(), armnn::CheckScaleSetOnQuantizedType(), Converter::ConvertOperation(), armnn::Dequantize(), armnn::GetBiasDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnn::optimizations::pad_fold::GetLowestElement(), TensorInfo::IsTypeSpaceMatch(), armnn::MakeDecoder(), armnn::MakeEncoder(), TfLiteParserImpl::OutputShapeOfReshape(), QuantizationParametersAreEqual::QuantizationParametersAreEqual(), armnn::Quantize(), and ConvertConstDequantisationLayersToConstLayersImpl::Run().
std::vector< float > GetQuantizationScales | ( | ) | const |
Definition at line 451 of file Tensor.cpp.
Referenced by armnnTfLiteParser::AsFloatArray(), Converter::ConvertOperation(), armnn::GetBiasDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), and armnnUtils::GetPerAxisParams().
|
inline |
Definition at line 191 of file Tensor.hpp.
Referenced by armnn::ArgMinMax(), ArmnnPreparedModel::ArmnnPreparedModel(), armnnTfLiteParser::AsFloatArray(), BatchMatMul::BatchMatMul(), armnn::BatchNormImpl(), armnn::BatchToSpaceNd(), armnn::CalculateGatherNdKeyIndices(), armnnUtils::CalculateReducedOutputTensoInfo(), armnnUtils::CalculateStridedSliceOutputTensorInfo(), armnn::ClUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn_driver::ComputeShape(), armnnTfLiteParser::ComputeWrappedIndex(), armnn::Concatenate(), armnn::Convert1HWOTensorInfoToAcl(), armnn::Convert1HWOTensorToAcl(), armnn::Convert1HWOtoMIHW(), armnn::ConvertBf16ToFp32Weight(), Converter::ConvertOperation(), armnn_driver::ConvertPooling2d(), armnn::ConvertWeightTensorFromArmnnToAcl(), armnn::CreateAclNormalizationLayerInfoForL2Normalization(), armnnOnnxParser::CreateConstTensorImpl(), OnnxParserImpl::CreateNetworkFromString(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::DepthToSpace(), armnn::DetectionPostProcess(), RefChannelShuffleWorkload::ExecuteAsync(), RefDepthwiseConvolution2dWorkload::ExecuteAsync(), RefLstmWorkload::ExecuteAsync(), RefQLstmWorkload::ExecuteAsync(), RefConvolution3dWorkload::ExecuteAsync(), RefElementwiseUnaryWorkload::ExecuteAsync(), RefLogicalUnaryWorkload::ExecuteAsync(), RefLogicalBinaryWorkload::ExecuteAsync(), RefShapeWorkload::ExecuteAsync(), RefComparisonWorkload::ExecuteAsync(), RefElementwiseWorkload< Functor, ParentDescriptor, DebugString >::ExecuteAsync(), RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(), BFloat16ToFloat32::Func(), Float16ToFloat32::Func(), Float32ToBFloat16::Func(), Float32ToFloat16::Func(), armnn::Gather(), armnn::GetBiasDataType(), TfLiteParserImpl::GetBuffer(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnn::GetNumActivations(), armnnUtils::GetPerAxisParams(), IDeserializer::DeserializerImpl::GetQLstmDescriptor(), RefTensorHandle::GetShape(), TosaRefTensorHandle::GetShape(), SampleTensorHandle::GetShape(), ConstTensorHandle::GetShape(), armnn::GetUnpaddedTensorStrides(), armnn::InstanceNorm(), armnn_driver::IsDynamicTensor(), ClLayerSupport::IsSplitterSupported(), NeonLayerSupport::IsSplitterSupported(), armnn::LogSoftmax(), armnn::LstmImpl(), armnn::MirrorPad(), armnn::NeonUnidirectionalSequenceLstmFloatWorkloadValidate(), armnn::NeonUnidirectionalSequenceLstmWorkloadValidate(), IDeserializer::DeserializerImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnn::Pad(), armnnUtils::Permuted(), armnn::PermuteTensor(), armnn::Pooling2d(), armnn::Pooling3d(), armnn::PreluImpl(), armnn::PrintOutput(), armnnUtils::ProcessConcatInputTensorInfo(), armnn::Reduce(), armnn::ReshapeWeightsForAcl(), armnn::Resize(), TransposeAsReshapeImpl::Run(), PermuteAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), ConvertConstPermuteLayersToConstLayers::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), Convolution3dLayer::SerializeLayerParameters(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Convolution2dLayer::SerializeLayerParameters(), Graph::SerializeToDot(), ShapesAreBroadcastCompatible::ShapesAreBroadcastCompatible(), ShapesAreSameRank::ShapesAreSameRank(), armnn::Slice(), armnn::Softmax(), armnn::SpaceToBatchNd(), armnn::SpaceToDepth(), armnn::Split(), armnn::Splitter(), armnn::Stack(), armnn::StridedSlice(), armnn_driver::SwizzleAndroidNn4dTensorToArmNn(), armnnUtils::TransposeTensorShape(), ArgMinMaxQueueDescriptor::Validate(), PermuteQueueDescriptor::Validate(), DepthwiseConvolution2dQueueDescriptor::Validate(), DetectionPostProcessQueueDescriptor::Validate(), ResizeQueueDescriptor::Validate(), SpaceToBatchNdQueueDescriptor::Validate(), SpaceToDepthQueueDescriptor::Validate(), TransposeQueueDescriptor::Validate(), SliceQueueDescriptor::Validate(), DepthToSpaceQueueDescriptor::Validate(), BatchMatMulQueueDescriptor::Validate(), QueueDescriptor::ValidateTensorNumDimensions(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), and armnn::VerifyTensorInfoDataType().
|
inline |
Definition at line 192 of file Tensor.hpp.
|
inline |
Definition at line 201 of file Tensor.hpp.
References TensorShape::operator=(), and TensorShape::operator==().
Referenced by armnn::GetBiasDataType(), TensorInfo::GetQuantizationScale(), TensorInfo::HasPerAxisQuantization(), and TensorInfo::IsTypeSpaceMatch().
bool HasPerAxisQuantization | ( | ) | const |
Definition at line 446 of file Tensor.cpp.
References TensorInfo::HasMultipleQuantizationScales().
Referenced by armnnTfLiteParser::AsFloatArray(), armnn::Convert1HWOtoMIHW(), Converter::ConvertOperation(), armnn::GetBiasDataType(), armnnSerializer::GetFlatBufferArgMinMaxFunction(), armnnUtils::GetPerAxisParams(), armnn::MakeDecoder(), armnn::MakeEncoder(), and TypeNotPerAxisQuantized::TypeNotPerAxisQuantized().
bool IsConstant | ( | ) | const |
Definition at line 509 of file Tensor.cpp.
Referenced by LayerWithParameters< Pooling3dDescriptor >::GetConnectedConstantAsInputTensors(), LayerSupportHandle::IsConvolution2dSupported(), LayerSupportHandle::IsDepthwiseConvolutionSupported(), LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(), LayerSupportHandle::IsFullyConnectedSupported(), FuseConvertFp32ToBf16IntoConstLayers::Run(), ConvertConstPermuteLayersToConstLayers::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), and TensorInfo::SetConstant().
bool IsQuantized | ( | ) | const |
Definition at line 504 of file Tensor.cpp.
References armnn::IsQuantizedType().
Referenced by RefCastWorkload::ExecuteAsync(), armnn::GetBiasDataType(), armnn::optimizations::pad_fold::GetZeroElement(), TensorInfo::IsTypeSpaceMatch(), armnn::optimizations::pad_fold::TryFoldPadIntoLayer2d(), and TypeNotPerAxisQuantized::TypeNotPerAxisQuantized().
bool IsTypeSpaceMatch | ( | const TensorInfo & | other | ) | const |
Check that the types are the same and, if quantize, that the quantization parameters are the same.
Definition at line 432 of file Tensor.cpp.
References TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::HasMultipleQuantizationScales(), and TensorInfo::IsQuantized().
Referenced by ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::GetBiasDataType(), ClLayerSupport::IsConcatSupported(), NeonLayerSupport::IsConcatSupported(), ClLayerSupport::IsSplitterSupported(), and NeonLayerSupport::IsSplitterSupported().
bool operator!= | ( | const TensorInfo & | other | ) | const |
Definition at line 422 of file Tensor.cpp.
TensorInfo & operator= | ( | const TensorInfo & | other | ) |
Definition at line 405 of file Tensor.cpp.
bool operator== | ( | const TensorInfo & | other | ) | const |
Definition at line 414 of file Tensor.cpp.
void SetConstant | ( | const bool | IsConstant = true | ) |
Marks the data corresponding to this tensor info as constant.
: This can allow further optimization on execution : The user has to ensure that the underlying data actually is constant.
Definition at line 514 of file Tensor.cpp.
References TensorInfo::IsConstant().
Referenced by ArmnnPreparedModel::ArmnnPreparedModel(), armnn_driver::ConvertOperandToConstTensorPin(), armnnOnnxParser::CreateConstTensorImpl(), OnnxParserImpl::CreateNetworkFromString(), ArmnnPreparedModel::ExecuteWithDummyInputs(), TfLiteParserImpl::GetBuffer(), TfLiteParserImpl::GetNetworkInputBindingInfo(), IDeserializer::DeserializerImpl::GetNetworkOutputBindingInfo(), TfLiteParserImpl::GetOutputTensorIds(), armnnUtils::MakeInputTensors(), armnn::PermuteTensor(), armnn::RevertConstantWeightsToFP32(), ConvertConstPermuteLayersToConstLayers::Run(), FuseConvertFp32ToBf16IntoConstLayers::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), and armnnDeserializer::ToConstTensor().
|
inline |
Definition at line 199 of file Tensor.hpp.
Referenced by armnn_driver::ConvertOperandToConstTensorPin(), ConvertToDataType(), TfLiteParserImpl::GetOutputTensorIds(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), and ConvertFp32NetworkToFp16Impl::Run().
void SetQuantizationDim | ( | const Optional< unsigned int > & | quantizationDim | ) |
Definition at line 499 of file Tensor.cpp.
Referenced by armnnUtils::Permuted(), and TensorInfo::TensorInfo().
void SetQuantizationOffset | ( | int32_t | offset | ) |
Definition at line 489 of file Tensor.cpp.
Referenced by armnn::CheckScaleSetOnQuantizedType(), Converter::ConvertOperation(), RefCastWorkload::ExecuteAsync(), and TensorInfo::TensorInfo().
void SetQuantizationScale | ( | float | scale | ) |
Definition at line 473 of file Tensor.cpp.
Referenced by armnn::CheckScaleSetOnQuantizedType(), Converter::ConvertOperation(), RefCastWorkload::ExecuteAsync(), and TensorInfo::TensorInfo().
void SetQuantizationScales | ( | const std::vector< float > & | scales | ) |
Definition at line 456 of file Tensor.cpp.
Referenced by Converter::ConvertOperation(), and TensorInfo::TensorInfo().
|
inline |
Definition at line 193 of file Tensor.hpp.
Referenced by ClGatherNdWorkload::ClGatherNdWorkload(), armnn::ClGatherNdWorkloadValidate(), armnn::Convert1HWOtoMIHW(), armnn_driver::ConvertOperandToConstTensorPin(), Converter::ConvertOperation(), OnnxParserImpl::CreateNetworkFromString(), RefGatherNdWorkload::ExecuteAsync(), RefUnidirectionalSequenceLstmWorkload::ExecuteAsync(), NeonGatherNdWorkload::NeonGatherNdWorkload(), armnn::NeonGatherNdWorkloadValidate(), IDeserializer::DeserializerImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfReshape(), TfLiteParserImpl::OutputShapeOfSqueeze(), armnnUtils::Permuted(), armnn::ReshapeWeightsForAcl(), AddBroadcastReshapeLayerImpl::Run(), and armnnUtils::TransposeTensorShape().