ArmNN
 21.11
Conv2dTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
std::vector< T > GetBias2 (bool biasEnabled, float qScale)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
std::vector< T > GetBias4 (bool biasEnabled, float qScale)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
std::vector< T > GetBias8 (bool biasEnabled, float qScale)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
std::vector< T > GetBias (bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout)
 
template<typename T , typename B >
void ApplyBias (std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult< T, 4 > SimpleConvolution2dTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &originalInput, const std::vector< T > &originalKernel, const std::vector< B > &bias, const std::vector< T > &originalOutputExpected, const armnn::TensorShape &originalInputShape, const armnn::TensorShape &originalKernelShape, const armnn::TensorShape &originalOutputExpectedShape, float qScale, int32_t qOffset, const armnn::DataLayout layout=armnn::DataLayout::NCHW, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1, uint32_t dilationX=1, uint32_t dilationY=1)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>, armnn::DataType OutType = ArmnnType, typename O = armnn::ResolveType<OutType>>
LayerTestResult< O, 4 > SimpleConvolution2dNhwcTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &input, const std::vector< T > &kernel, const std::vector< B > &bias, const std::vector< O > &outputExpected, const armnn::TensorShape &inputShape, const armnn::TensorShape &kernelShape, const armnn::TensorShape &outputExpectedShape, const armnn::DataLayout dataLayout, float qScale, int32_t qOffset, uint32_t padLeft=1, uint32_t padTop=1, uint32_t padRight=1, uint32_t padBottom=1, uint32_t strideX=1, uint32_t strideY=1)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Convolution1dTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleConvolution2d3x3NhwcTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, armnn::DataLayout dataLayout)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleConvolution2d3x3Stride2x2TestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout &dataLayout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleConvolution2d3x5TestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleConvolution2d3x3TestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout, float qScale, int32_t qOffset)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleConvolution2dAsymmetricPaddingTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout, float qScale, int32_t qOffset)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > Convolution2d3x3DilationTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< float > &inputNoQuantizedValues, armnn::TensorInfo &inputTensorInfo, const std::vector< float > &kernelNoQuantizedValues, armnn::TensorInfo &kernelTensorInfo, const std::vector< float > &outputExpectedNoQuantizedValues, armnn::TensorInfo &outputTensorInfo, uint32_t dilationX, uint32_t dilationY, armnn::DataLayout layout=armnn::DataLayout::NCHW, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1, bool biasEnabled=false)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T >
LayerTestResult< T, 4 > Convolution2d3x3Dilation3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T >
LayerTestResult< T, 4 > Convolution2d2x3x3Dilation3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T >
LayerTestResult< T, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > CompareConvolution2dTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
 
LayerTestResult< float, 4 > Convolution2d3x3Stride2x2BFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout &dataLayout)
 
LayerTestResult< float, 4 > Convolution2d3x3Stride2x2BFloat16SmallValueTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout &dataLayout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult< T, 4 > DepthwiseConvolution2dAsymmetricTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &input, const std::vector< T > &kernel, const std::vector< B > &bias, const std::vector< T > &outputExpected, const armnn::TensorShape &inputShape, const armnn::TensorShape &kernelShape, const armnn::TensorShape &outputExpectedShape, float qScale, int32_t qOffset, const armnn::DataLayout layout, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2dDepthMul1TestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2dTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult< T, 4 > DepthwiseConvolution2dTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &originalInput, const std::vector< T > &originalKernel, const std::vector< B > &bias, const std::vector< T > &originalOutputExpected, const armnn::TensorShape &originalInputShape, const armnn::TensorShape &originalKernelShape, const armnn::TensorShape &originalOutputExpectedShape, float qScale, int32_t qOffset, const armnn::DataLayout layout=armnn::DataLayout::NCHW, uint32_t padLeft=0, uint32_t padTop=0, uint32_t padRight=0, uint32_t padBottom=0, uint32_t strideX=1, uint32_t strideY=1, uint32_t dilationX=1, uint32_t dilationY=1)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2dAsymmetricTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2dNhwcTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, bool biasEnabled)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > DepthwiseConvolution2d3x3DilationTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< float > &inputNoQuantizedValues, armnn::TensorInfo &inputTensorInfo, const std::vector< float > &kernelNoQuantizedValues, armnn::TensorInfo &kernelTensorInfo, const std::vector< float > &outputExpectedNoQuantizedValues, armnn::TensorInfo &outputTensorInfo, uint32_t dilationX, uint32_t dilationY, armnn::DataLayout layout=armnn::DataLayout::NCHW, bool biasEnabled=false)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T >
LayerTestResult< T, 4 > DepthwiseConvolution2d3x3Dilation3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T >
LayerTestResult< T, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T >
LayerTestResult< T, 4 > DepthwiseConvolution2dMult4Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T >
LayerTestResult< T, 4 > DepthwiseConvolution2dMult2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > CompareDepthwiseConvolution2dTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnnUtils::DataLayoutIndexed &layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::BFloat16 >, 4 > Convolution2d3x3Dilation3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::Float32 >, 4 > Convolution2d3x3Dilation3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmS8 >, 4 > Convolution2d3x3Dilation3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 4 > Convolution2d3x3Dilation3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 4 > Convolution2d3x3Dilation3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::Float32 >, 4 > Convolution2d2x3x3Dilation3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::BFloat16 >, 4 > Convolution2d2x3x3Dilation3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmS8 >, 4 > Convolution2d2x3x3Dilation3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 4 > Convolution2d2x3x3Dilation3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 4 > Convolution2d2x3x3Dilation3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::BFloat16 >, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::Float32 >, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmS8 >, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 4 > Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::BFloat16 >, 4 > DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::Float32 >, 4 > DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmS8 >, 4 > DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 4 > DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 4 > DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::BFloat16 >, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::Float32 >, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmS8 >, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QAsymmU8 >, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::QSymmS16 >, 4 > DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 > (armnn::IWorkloadFactory &, const armnn::IBackendInternal::IMemoryManagerSharedPtr &, const armnn::ITensorHandleFactory &, bool, armnn::DataLayout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::BFloat16 >, 4 > DepthwiseConvolution2dMult4Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::Float32 >, 4 > DepthwiseConvolution2dMult4Test< armnn::DataType::Float32, armnn::DataType::Float32 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::BFloat16 >, 4 > DepthwiseConvolution2dMult2Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
template LayerTestResult< armnn::ResolveType< armnn::DataType::Float32 >, 4 > DepthwiseConvolution2dMult2Test< armnn::DataType::Float32, armnn::DataType::Float32 > (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleConvolution2d3x5Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > SimpleConvolution2d3x5Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleConvolution2d3x3Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleConvolution2d3x3NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
 
LayerTestResult< float, 4 > SimpleConvolution2d3x3Stride2x2Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > SimpleConvolution2d3x3Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< int16_t, 4 > SimpleConvolution2d3x5QSymm16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< int16_t, 4 > SimpleConvolution2d3x3QSymm16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > Convolution2dAsymmetricPaddingTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout layout)
 
LayerTestResult< float, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::DataLayout layout)
 
LayerTestResult< float, 4 > Convolution1dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
 
LayerTestResult< uint8_t, 4 > Convolution1dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
 
LayerTestResult< uint8_t, 4 > Convolution2dPerAxisQuantTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > CompareConvolution2dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthNhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthMul1Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dDepthMul64Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > DepthwiseConvolution2dAsymmetricTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dDepthMul1Uint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > DepthwiseConvolution2dInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< int16_t, 4 > DepthwiseConvolution2dDepthMul1Int16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > DepthwiseConvolution2dPerAxisQuantTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
 
LayerTestResult< float, 4 > CompareDepthwiseConvolution2dFloatTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnn::DataLayout layout)
 
LayerTestResult< uint8_t, 4 > CompareDepthwiseConvolution2dUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, const armnn::DataLayout layout)
 

Function Documentation

◆ ApplyBias()

void ApplyBias ( std::vector< T > &  v,
float  vScale,
int32_t  vOffset,
const std::vector< B > &  bias,
float  bScale,
int32_t  bOffset,
uint32_t  w,
uint32_t  h 
)

Definition at line 162 of file Conv2dTestImpl.cpp.

References ARMNN_ASSERT, ARMNN_ASSERT_MSG, B, and armnnUtils::SelectiveDequantize().

Referenced by Convolution1dTestImpl(), DepthwiseConvolution2dAsymmetricTestImpl(), DepthwiseConvolution2dDepthMul1TestImpl(), DepthwiseConvolution2dTestImpl(), and SimpleConvolution2dTestImpl().

164 {
165  ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
166  "Invalid type and parameter combination.");
167  ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
168  "Invalid type and parameter combination.");
169 
170  // Note we need to dequantize and re-quantize the image value and the bias.
171  for (uint32_t i = 0; i < bias.size(); ++i)
172  {
173  float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
174  for (uint32_t y = 0; y < h; ++y)
175  {
176  for (uint32_t x = 0; x < w; ++x)
177  {
178  uint32_t offset = (i * h + y) * w + x;
179  ARMNN_ASSERT(offset < v.size());
180  T& outRef = v[offset];
181  float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
182  outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
183  }
184  }
185  }
186 }
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
float SelectiveDequantize(T value, float scale, int32_t offset)

◆ CompareConvolution2dTest()

LayerTestResult<float,4> CompareConvolution2dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory 
)

Definition at line 3493 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3499 {
3500  return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
3501  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory);
3502 }

◆ CompareConvolution2dTestImpl()

LayerTestResult<T,4> CompareConvolution2dTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory 
)

Definition at line 1313 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), TensorInfo::GetShape(), armnn::info, Convolution2dQueueDescriptor::m_Bias, Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, and Convolution2dQueueDescriptor::m_Weight.

1319 {
1320  unsigned int inputHeight = 8;
1321  unsigned int inputWidth = 16;
1322  unsigned int inputChannels = 3;
1323  unsigned int inputNum = 5;
1324 
1325  unsigned int kernelHeight = 3;
1326  unsigned int kernelWidth = 3;
1327 
1328  unsigned int strideX = 2;
1329  unsigned int strideY = 3;
1330  unsigned int padX = 1;
1331  unsigned int padY = 1;
1332 
1333  unsigned int outputNum = inputNum;
1334  unsigned int outputChannels = 2;
1335  unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
1336  unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
1337 
1338  armnn::TensorInfo inputTensorInfo;
1339  armnn::TensorInfo outputTensorInfo;
1340  armnn::TensorInfo kernelDesc;
1341  armnn::TensorInfo biasDesc;
1342 
1343  unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
1344  unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
1345  unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
1346  unsigned int biasShape[] = {outputChannels};
1347 
1348  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
1349  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
1350  kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
1351  biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
1352 
1353  auto input = MakeRandomTensor<T>(inputTensorInfo, 124908);
1354  auto kernel = MakeRandomTensor<T>(kernelDesc, 891234);
1355  auto bias = MakeRandomTensor<T>(biasDesc, 1028);
1356 
1357  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1358  std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
1359 
1360  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1361  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1362 
1365  armnn::ScopedTensorHandle weightsTensor(kernelDesc);
1366  armnn::ScopedTensorHandle biasTensor(biasDesc);
1367 
1368  AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
1369  AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
1370 
1371  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1372  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1373  data.m_Weight = &weightsTensor;
1374  data.m_Bias = &biasTensor;
1375  data.m_Parameters.m_StrideX = strideX;
1376  data.m_Parameters.m_StrideY = strideY;
1377  data.m_Parameters.m_PadLeft = padX;
1378  data.m_Parameters.m_PadRight = padX;
1379  data.m_Parameters.m_PadTop = padY;
1380  data.m_Parameters.m_PadBottom = padY;
1381  data.m_Parameters.m_BiasEnabled = true;
1382 
1383  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1384  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1385 
1386  armnn::Convolution2dQueueDescriptor refData = data;
1387  armnn::WorkloadInfo refInfo = info;
1388  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1389  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1390 
1391  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
1392  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo);
1393 
1394  outputHandleRef->Allocate();
1395  inputHandleRef->Allocate();
1396 
1397  inputHandle->Allocate();
1398  outputHandle->Allocate();
1399 
1400  CopyDataToITensorHandle(inputHandle.get(), input.data());
1401  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
1402 
1403  ExecuteWorkload(*workload, memoryManager);
1404 
1405  workloadRef->PostAllocationConfigure();
1406  workloadRef->Execute();
1407 
1408  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1409  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
1410 
1411  return LayerTestResult<T, 4>(actualOutput,
1412  expectedOutput,
1413  outputHandle->GetShape(),
1414  outputTensorInfo.GetShape());
1415 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
const ConstTensorHandle * m_Weight
const ConstTensorHandle * m_Bias
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ CompareDepthwiseConvolution2dFloatTest()

LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
const armnn::DataLayout  layout 
)

Definition at line 3761 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3768 {
3769  return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
3770  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
3771 }

◆ CompareDepthwiseConvolution2dTestImpl()

LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
const armnnUtils::DataLayoutIndexed layout 
)

Definition at line 2893 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateDepthwiseConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), armnn::GetBiasDataType(), DataLayoutIndexed::GetDataLayout(), TensorInfo::GetNumElements(), TensorInfo::GetShape(), armnn::info, DepthwiseConvolution2dQueueDescriptor::m_Bias, DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, DepthwiseConvolution2dQueueDescriptor::m_Weight, armnn::NCHW, and armnn::NHWC.

2900 {
2901  unsigned int inputHeight = 8;
2902  unsigned int inputWidth = 16;
2903  unsigned int inputChannels = 3;
2904  unsigned int inputNum = 5;
2905 
2906  unsigned int kernelHeight = 3;
2907  unsigned int kernelWidth = 3;
2908  unsigned int channelMultiplier = 1;
2909 
2910  unsigned int strideX = 2;
2911  unsigned int strideY = 3;
2912  unsigned int padX = 1;
2913  unsigned int padY = 1;
2914 
2915  unsigned int outputNum = inputNum;
2916  unsigned int outputChannels = inputChannels * channelMultiplier;
2917  unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
2918  unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
2919 
2920  armnn::TensorInfo inputTensorInfo;
2921  armnn::TensorInfo outputTensorInfo;
2922  armnn::TensorInfo kernelDesc;
2923  armnn::TensorInfo biasDesc;
2924 
2925  std::vector<unsigned int> inputShape;
2926  std::vector<unsigned int> outputShape;
2927  std::vector<unsigned int> kernelShape{ 1, kernelHeight, kernelWidth, outputChannels };
2928  std::vector<unsigned int> biasShape{ outputChannels };
2929  switch (layout.GetDataLayout())
2930  {
2932  inputShape = { inputNum, inputChannels, inputHeight, inputWidth };
2933  outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
2934  break;
2936  inputShape = { inputNum, inputHeight, inputWidth, inputChannels };
2937  outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
2938  break;
2939  default:
2940  throw armnn::InvalidArgumentException("unknown data layout ["
2941  + std::to_string(static_cast<int>(layout.GetDataLayout())) + "]");
2942  }
2943 
2944  float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
2945  float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
2946  int32_t qOffset = 0;
2947 
2948  inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
2949  outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
2950  kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
2951  biasDesc = armnn::TensorInfo(1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
2952 
2953  auto input = MakeRandomTensor<T>(inputTensorInfo, 124908, 0.0f, 255.0f);
2954  auto kernel = MakeRandomTensor<T>(kernelDesc, 891234, 0.0f, 255.0f);
2955  auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasDesc, 1028, 0.0f, 255.0f);
2956 
2957  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2958  std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
2959 
2960  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
2961  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2962 
2965  armnn::ScopedTensorHandle weightsTensor(kernelDesc);
2966  armnn::ScopedTensorHandle biasTensor(biasDesc);
2967 
2968  AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
2969  AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
2970 
2971  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2972  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2973  data.m_Weight = &weightsTensor;
2974  data.m_Bias = &biasTensor;
2975  data.m_Parameters.m_StrideX = strideX;
2976  data.m_Parameters.m_StrideY = strideY;
2977  data.m_Parameters.m_PadLeft = padX;
2978  data.m_Parameters.m_PadRight = padX;
2979  data.m_Parameters.m_PadTop = padY;
2980  data.m_Parameters.m_PadBottom = padY;
2981  data.m_Parameters.m_BiasEnabled = true;
2982  data.m_Parameters.m_DataLayout = layout.GetDataLayout();
2983 
2984  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2985  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
2986 
2988  armnn::WorkloadInfo refInfo = info;
2989  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
2990  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
2991 
2992  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
2993  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateDepthwiseConvolution2d(refData, refInfo);
2994 
2995  outputHandleRef->Allocate();
2996  inputHandleRef->Allocate();
2997 
2998  inputHandle->Allocate();
2999  outputHandle->Allocate();
3000 
3001  CopyDataToITensorHandle(inputHandle.get(), input.data());
3002  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
3003 
3004  ExecuteWorkload(*workload, memoryManager);
3005 
3006  workloadRef->PostAllocationConfigure();
3007  workloadRef->Execute();
3008 
3009  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
3010  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
3011 
3012  return LayerTestResult<T, 4>(actualOutput,
3013  expectedOutput,
3014  outputHandle->GetShape(),
3015  outputTensorInfo.GetShape());
3016 }
bool m_BiasEnabled
Enable/disable bias.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
armnn::DataLayout GetDataLayout() const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
DataType GetBiasDataType(DataType inputDataType)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
Depthwise Convolution 2D layer workload data.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
uint32_t m_PadRight
Padding right value in the width dimension.

◆ CompareDepthwiseConvolution2dUint8Test()

LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
const armnn::DataLayout  layout 
)

Definition at line 3773 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3780 {
3781  return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8>(
3782  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, layout);
3783 }

◆ Convolution1dTest()

LayerTestResult<float, 4> Convolution1dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled 
)

Definition at line 3377 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3382 {
3383  return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3384  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
3385 }

◆ Convolution1dTestImpl()

LayerTestResult<T,4> Convolution1dTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled 
)

Definition at line 464 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), ApplyBias(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), armnn::info, Convolution2dQueueDescriptor::m_Bias, Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dQueueDescriptor::m_Weight, and TensorInfo::SetQuantizationScale().

471 {
473  // Until we have a specialist 1D convolution layer, we can fake one using
474  // 2D convolution with the final dimension set to 1.
475  // I don't anticipate this being particularly slow, given that convolution is implemented
476  // as a matrix multiplication, at which point dimension doesn't matter.
477 
478  unsigned int batchSize = 1;
479  unsigned int inputChannels = 2;
480  unsigned int outputChannels = 3;
481  unsigned int inputSize = 5; // The 1D size (could view as 'width' or 'height').
482  unsigned int kernelSize = 3;
483  unsigned int padSize = 2;
484  unsigned int stride = 1;
485  unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
486 
487  armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
488  armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
489  armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
490  armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
491 
492  // Set quantization parameters if the requested type is a quantized type.
493  if(armnn::IsQuantizedType<T>())
494  {
495  inputInfo.SetQuantizationScale(qScale);
496  inputInfo.SetQuantizationOffset(qOffset);
497  outputInfo.SetQuantizationScale(qScale);
498  outputInfo.SetQuantizationOffset(qOffset);
499  kernelInfo.SetQuantizationScale(qScale);
500  kernelInfo.SetQuantizationOffset(qOffset);
501  biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
502  biasInfo.SetQuantizationOffset(0);
503  }
504 
505  std::vector<T> inputData = QuantizedVector<T>(
506  {
507  5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
508  -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
509  },
510  inputInfo.GetQuantizationScale(),
511  inputInfo.GetQuantizationOffset());
512 
513  std::vector<T> kernelData = QuantizedVector<T>(
514  {
515  1.0f, 0.0f, 0.0f,
516  0.0f, 2.0f, -1.5f,
517 
518  0.0f, 0.0f, 0.0f,
519  0.2f, 0.2f, 0.2f,
520 
521  0.5f, 0.0f, 0.5f,
522  0.0f, -1.0f, 0.0f
523  },
524  kernelInfo.GetQuantizationScale(),
525  kernelInfo.GetQuantizationOffset());
526 
527  std::vector<B> biasData =
528  QuantizedVector<B>({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset());
529 
530  std::vector<T> outputData = QuantizedVector<T>(
531  {
532  4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
533  -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
534  2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
535  },
536  outputInfo.GetQuantizationScale(),
537  outputInfo.GetQuantizationOffset());
538 
539  std::vector<T> actualOutput(outputInfo.GetNumElements());
540 
541  // Optionally apply bias to output image.
542  if(biasEnabled)
543  {
544  ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
545  biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
546  1, outputSize);
547  }
548 
549  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
550  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
551 
554  armnn::ScopedTensorHandle weightsTensor(kernelInfo);
555  armnn::ScopedTensorHandle biasTensor(biasInfo);
556 
557  AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
558  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
559 
560  AddInputToWorkload(data, info, inputInfo, inputHandle.get());
561  AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
562 
563  data.m_Weight = &weightsTensor;
564  data.m_Bias = &biasTensor;
565  data.m_Parameters.m_StrideX = 1;
566  data.m_Parameters.m_StrideY = stride;
567  data.m_Parameters.m_PadLeft = 0;
568  data.m_Parameters.m_PadRight = 0;
569  data.m_Parameters.m_PadTop = padSize;
570  data.m_Parameters.m_PadBottom = padSize;
571  data.m_Parameters.m_BiasEnabled = biasEnabled;
572 
573  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
574  inputHandle->Allocate();
575  outputHandle->Allocate();
576 
577  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
578 
579  ExecuteWorkload(*workload, memoryManager);
580 
581  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
582 
583  return LayerTestResult<T, 4>(actualOutput,
584  outputData,
585  outputHandle->GetShape(),
586  outputInfo.GetShape());
587 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
void ApplyBias(std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
const ConstTensorHandle * m_Weight
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
const ConstTensorHandle * m_Bias
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ Convolution1dUint8Test()

LayerTestResult<uint8_t, 4> Convolution1dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled 
)

Definition at line 3387 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3392 {
3393  return Convolution1dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3394  workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128, biasEnabled);
3395 }

◆ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test()

LayerTestResult<T, 4> Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 1244 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

1250 {
1251  armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
1252  std::vector<float> inputNoQuantizedValues =
1253  {
1254  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1255  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1256  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1257  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1258  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1259  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1260  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1261  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1262  1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1263  1, 1, 1, 1, 1, 1, 1, 1, 1, 1
1264  };
1265 
1266  armnn::TensorInfo kernelTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1267  std::vector<float> kernelNoQuantizedValues =
1268  {
1269  1, 2,
1270  3, 4
1271  };
1272 
1273  // Since the dilation rate is 2 this will dilate the kernel to be like 3x3: d(K-1)+1 --> 2 x (2-1) + 1 = 3,
1274  // therefore the output will be 4x4: (I − K + 2P)/S +1 => trunc ( (10 - 3 + 2x2 ) / 3 + 1 )
1275  // where, dilation size = d = 2; kernel size = K = 2; input size = I = 10; padding size = P = 2; stride = S = 3
1276  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1277  std::vector<float> outputExpectedNoQuantizedValues =
1278  {
1279  4, 7, 7, 3,
1280  6, 10, 10, 4,
1281  6, 10, 10, 4,
1282  2, 3, 3, 1
1283  };
1284  uint32_t padLeft = 1;
1285  uint32_t padTop = 1;
1286  uint32_t padRight = 1;
1287  uint32_t padBottom = 1;
1288 
1289  return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1290  workloadFactory,
1291  memoryManager,
1292  tensorHandleFactory,
1293  inputNoQuantizedValues,
1294  inputTensorInfo,
1295  kernelNoQuantizedValues,
1296  kernelTensorInfo,
1297  outputExpectedNoQuantizedValues,
1298  outputTensorInfo,
1299  2,
1300  2,
1301  layout,
1302  padLeft,
1303  padTop,
1304  padRight,
1305  padBottom,
1306  3,
1307  3,
1308  biasEnabled
1309  );
1310 }

◆ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 >()

◆ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 >()

◆ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 >()

◆ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 >()

◆ Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 >()

◆ Convolution2d2x3x3Dilation3x3Test()

LayerTestResult<T, 4> Convolution2d2x3x3Dilation3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 1171 of file Conv2dTestImpl.cpp.

1177 {
1178  armnn::TensorInfo inputTensorInfo({ 1, 2, 10, 10 }, ArmnnType);
1179  std::vector<float> inputNoQuantizedValues =
1180  {
1181  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1182  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1183  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1184  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1185  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1186  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1187  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1188  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1189  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1190  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1191 
1192  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1193  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1194  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1195  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1196  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1197  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1198  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1199  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1200  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1201  0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1202  };
1203 
1204  armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3 }, ArmnnType);
1205  std::vector<float> kernelNoQuantizedValues =
1206  {
1207  1, 2, 3,
1208  4, 5, 6,
1209  7, 8, 9,
1210 
1211  1, 2, 3,
1212  4, 5, 6,
1213  7, 8, 9
1214  };
1215 
1216  // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1217  // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1218  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1219  std::vector<float> outputExpectedNoQuantizedValues =
1220  {
1221  12., 10., 10., 10.,
1222  12., 10., 10., 10.,
1223  12., 10., 10., 10.,
1224  6., 4., 4., 4.
1225  };
1226 
1227  return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1228  workloadFactory,
1229  memoryManager,
1230  tensorHandleFactory,
1231  inputNoQuantizedValues,
1232  inputTensorInfo,
1233  kernelNoQuantizedValues,
1234  kernelTensorInfo,
1235  outputExpectedNoQuantizedValues,
1236  outputTensorInfo,
1237  3,
1238  3,
1239  layout,
1240  biasEnabled);
1241 }

◆ Convolution2d2x3x3Dilation3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 >()

◆ Convolution2d2x3x3Dilation3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 >()

◆ Convolution2d2x3x3Dilation3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 >()

◆ Convolution2d2x3x3Dilation3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 >()

◆ Convolution2d2x3x3Dilation3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 >()

◆ Convolution2d3x3Dilation3x3Test()

LayerTestResult<T, 4> Convolution2d3x3Dilation3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 1113 of file Conv2dTestImpl.cpp.

1119 {
1120  armnn::TensorInfo inputTensorInfo({ 1, 1, 10, 10 }, ArmnnType);
1121  std::vector<float> inputNoQuantizedValues =
1122  {
1123  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1124  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1125  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1126  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1127  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1128  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
1129  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1130  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1131  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1132  0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1133  };
1134 
1135  armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
1136  std::vector<float> kernelNoQuantizedValues =
1137  {
1138  1, 2, 3,
1139  4, 5, 6,
1140  7, 8, 9
1141  };
1142 
1143  // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
1144  // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
1145  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1146  std::vector<float> outputExpectedNoQuantizedValues =
1147  {
1148  6., 5., 5., 5.,
1149  6., 5., 5., 5.,
1150  6., 5., 5., 5.,
1151  3., 2., 2., 2.
1152  };
1153 
1154  return Convolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
1155  workloadFactory,
1156  memoryManager,
1157  tensorHandleFactory,
1158  inputNoQuantizedValues,
1159  inputTensorInfo,
1160  kernelNoQuantizedValues,
1161  kernelTensorInfo,
1162  outputExpectedNoQuantizedValues,
1163  outputTensorInfo,
1164  3,
1165  3,
1166  layout,
1167  biasEnabled);
1168 }

◆ Convolution2d3x3Dilation3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 >()

◆ Convolution2d3x3Dilation3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 >()

◆ Convolution2d3x3Dilation3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 >()

◆ Convolution2d3x3Dilation3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 >()

◆ Convolution2d3x3Dilation3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 >()

◆ Convolution2d3x3DilationTestCommon()

LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const std::vector< float > &  inputNoQuantizedValues,
armnn::TensorInfo inputTensorInfo,
const std::vector< float > &  kernelNoQuantizedValues,
armnn::TensorInfo kernelTensorInfo,
const std::vector< float > &  outputExpectedNoQuantizedValues,
armnn::TensorInfo outputTensorInfo,
uint32_t  dilationX,
uint32_t  dilationY,
armnn::DataLayout  layout = armnn::DataLayout::NCHW,
uint32_t  padLeft = 0,
uint32_t  padTop = 0,
uint32_t  padRight = 0,
uint32_t  padBottom = 0,
uint32_t  strideX = 1,
uint32_t  strideY = 1,
bool  biasEnabled = false 
)

Definition at line 1023 of file Conv2dTestImpl.cpp.

References armnn::Float32, TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

1044 {
1045  float qScale;
1046  int32_t qOffset;
1047  switch (ArmnnType)
1048  {
1051  {
1052  qScale = 0.1f;
1053  qOffset = 128;
1054  break;
1055  }
1057  {
1058  qScale = 0.1f;
1059  qOffset = 0;
1060  break;
1061  }
1063  default:
1064  {
1065  qScale = 0.f;
1066  qOffset = 0;
1067  break;
1068  }
1069  }
1070 
1071  inputTensorInfo.SetQuantizationScale(qScale);
1072  inputTensorInfo.SetQuantizationOffset(qOffset);
1073  kernelTensorInfo.SetQuantizationScale(qScale);
1074  kernelTensorInfo.SetQuantizationOffset(qOffset);
1075  outputTensorInfo.SetQuantizationScale(qScale);
1076  outputTensorInfo.SetQuantizationOffset(qOffset);
1077 
1078  auto input = QuantizedVector<T>(inputNoQuantizedValues,
1079  inputTensorInfo.GetQuantizationScale(),
1080  inputTensorInfo.GetQuantizationOffset());
1081  auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
1082  kernelTensorInfo.GetQuantizationScale(),
1083  kernelTensorInfo.GetQuantizationOffset());
1084  auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
1085  outputTensorInfo.GetQuantizationScale(),
1086  outputTensorInfo.GetQuantizationOffset());
1087 
1088  return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1089  workloadFactory,
1090  memoryManager,
1091  tensorHandleFactory,
1092  input,
1093  kernel,
1094  GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
1095  expectedOutput,
1096  inputTensorInfo.GetShape(),
1097  kernelTensorInfo.GetShape(),
1098  outputTensorInfo.GetShape(),
1099  qScale,
1100  qOffset,
1101  layout,
1102  padLeft,
1103  padTop,
1104  padRight,
1105  padBottom,
1106  strideX,
1107  strideY,
1108  dilationX,
1109  dilationY);
1110 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491

◆ Convolution2d3x3Stride2x2BFloat16SmallValueTest()

LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16SmallValueTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout dataLayout 
)

Definition at line 1524 of file Conv2dTestImpl.cpp.

References armnn::BFloat16, armnn::Float32, armnn::IgnoreUnused(), and SimpleConvolution2dNhwcTestImpl().

Referenced by TEST_SUITE().

1530 {
1531  // BFloat16 input and weight, Float32 output
1532  armnn::IgnoreUnused(biasEnabled);
1533 
1534  // Input is a single-batch, 1 channel, 5x5 image.
1535  armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::DataType::BFloat16);
1536 
1537  std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1538  {
1539  0.0367984f, // 0.0368652
1540  0.0380895f, // 0.0380859
1541  0.0420157f, // 0.0419922
1542  0.0675631f, // 0.0673828
1543  0.0938920f, // 0.09375
1544  0.0476106f, // 0.0476074
1545  0.1035490f, // 0.103516
1546  0.1260370f, // 0.125977
1547  0.0461647f, // 0.0461426
1548  0.0883828f, // 0.0883789
1549  0.1159540f, // 0.115723
1550  0.0498519f, // 0.0498047
1551  0.0104630f, // 0.010437
1552  0.0154114f, // 0.0154419
1553  0.00137681f, // 0.00137329
1554  0.0344238f, // 0.0344616
1555  0.0356445f, // 0.0355693
1556  0.0495605f, // 0.0495018
1557  0.0683594f, // 0.0683308
1558  0.0991211f, // 0.0988837
1559  0.0461426f, // 0.0461838
1560  0.0996094f, // 0.0997546
1561  0.1269530f, // 0.127099
1562  0.0393066f, // 0.0392791
1563  0.103516f // 0.103641
1564  },
1565  1.0f, 0);
1566 
1567  // Use a 3x3 kernel.
1568  armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1569 
1570  std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1571  {
1572  -0.126184f, // -0.125977
1573  -0.150468f, // -0.150391
1574  -0.101412f, // -0.101562
1575  -0.0586369f,// -0.0585938
1576  -0.0865864f,// -0.0864258
1577  -0.0435089f,// -0.043457
1578  0.0347555f, // 0.034668
1579  0.0323111f, // 0.0322266
1580  0.0385381f // 0.0385742
1581  },
1582  1.0f, 0);
1583 
1584  // Expected output is a single-batch, 1 channel, 3x3 image.
1585  armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::DataType::Float32);
1586 
1587  // Expected output (with results if calculated as FP32 in the comments)
1588  const std::vector<float> outputData =
1589  {
1590  0.000686645508f, // 0.000685
1591  0.000640869141f, // 0.000639
1592  -0.00759887695f, // -0.007631
1593  -0.02734375f, // -0.027388
1594  -0.0356445312f, // -0.035737
1595  -0.0145874023f, // -0.014568
1596  -0.0170898438f, // -0.017124
1597  -0.0373535156f, // -0.037431
1598  -0.0346679688f // -0.034808
1599  };
1600 
1601  uint32_t padLeft = 1;
1602  uint32_t padTop = 1;
1603  uint32_t padRight = 1;
1604  uint32_t padBottom = 1;
1605  uint32_t strideX = 2;
1606  uint32_t strideY = 2;
1607 
1610  workloadFactory,
1611  memoryManager,
1612  tensorHandleFactory,
1613  inputValues,
1614  kernelValues,
1615  std::vector<float>(),
1616  outputData,
1617  inputDesc.GetShape(),
1618  kernelDesc.GetShape(),
1619  outputDesc.GetShape(),
1620  dataLayout,
1621  1.0f,
1622  0,
1623  padLeft,
1624  padTop,
1625  padRight,
1626  padBottom,
1627  strideX,
1628  strideY);
1629 }
LayerTestResult< O, 4 > SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &input, const std::vector< T > &kernel, const std::vector< B > &bias, const std::vector< O > &outputExpected, const armnn::TensorShape &inputShape, const armnn::TensorShape &kernelShape, const armnn::TensorShape &outputExpectedShape, const armnn::DataLayout dataLayout, float qScale, int32_t qOffset, uint32_t padLeft=1, uint32_t padTop=1, uint32_t padRight=1, uint32_t padBottom=1, uint32_t strideX=1, uint32_t strideY=1)
void IgnoreUnused(Ts &&...)

◆ Convolution2d3x3Stride2x2BFloat16Test()

LayerTestResult<float, 4> Convolution2d3x3Stride2x2BFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout dataLayout 
)

Definition at line 1417 of file Conv2dTestImpl.cpp.

References armnn::BFloat16, armnn::Float32, armnn::IgnoreUnused(), and SimpleConvolution2dNhwcTestImpl().

Referenced by TEST_SUITE().

1423 {
1424  // BFloat16 input and weight, Float32 output
1425  armnn::IgnoreUnused(biasEnabled);
1426 
1427  // Input is a single-batch, 1 channel, 5x5 image.
1428  armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, armnn::DataType::BFloat16);
1429 
1430  std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1431  {
1432  10.0367984f, // 10.0625
1433  2.0380895f, // 2.03125
1434  15.0420157f, // 15.0625
1435  22.0675631f, // 22.125
1436  8.0938920f, // 8.125
1437  5.0476106f, // 5.0625
1438  80.1035490f, // 80
1439  100.1260370f, // 100
1440  55.0461647f, // 55
1441  120.0883828f, // 120
1442  9.1159540f, // 9.125
1443  90.0498519f, // 90
1444  200.0104630f, // 200
1445  30.0154114f, // 30
1446  75.00137681f, // 75
1447  30.0344238f, // 30
1448  25.0356445f, // 25
1449  130.0495605f, // 130
1450  60.0683594f, // 60
1451  35.0991211f, // 35
1452  8.0461426f, // 8.0625
1453  12.0996094f, // 12.125
1454  98.1269530f, // 98
1455  125.0393066f, // 125
1456  5.103516f // 5.0937
1457  },
1458  1.0f, 0);
1459 
1460  // Use a 3x3 kernel.
1461  armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::DataType::BFloat16);
1462 
1463  std::vector<armnn::BFloat16> kernelValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
1464  {
1465  -0.126184f, // -0.125977
1466  -0.150468f, // -0.150391
1467  -0.101412f, // -0.101562
1468  -0.0586369f,// -0.0585938
1469  -0.0865864f,// -0.0864258
1470  -0.0435089f,// -0.043457
1471  0.0347555f, // 0.034668
1472  0.0323111f, // 0.0322266
1473  0.0385381f // 0.0385742
1474  },
1475  1.0f, 0);
1476 
1477  // Expected output is a single-batch, 1 channel, 3x3 image.
1478  armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, armnn::DataType::Float32);
1479 
1480  // Expected output (with results if calculated as FP32 in the comments)
1481  const std::vector<float> outputData =
1482  {
1483  2.296875f, // 2.29240716
1484  5.75f, // 5.75851926
1485  3.78125f, // 3.79855026
1486  -11.625f, // -11.65498118
1487  -47.25f, // -47.27316893
1488  -30.0f, // -30.04771684
1489  -8.25f, // -8.28126168
1490  -43.5f, // -43.46531337
1491  -20.625f // -20.63477281
1492  };
1493 
1494  uint32_t padLeft = 1;
1495  uint32_t padTop = 1;
1496  uint32_t padRight = 1;
1497  uint32_t padBottom = 1;
1498  uint32_t strideX = 2;
1499  uint32_t strideY = 2;
1500 
1503  workloadFactory,
1504  memoryManager,
1505  tensorHandleFactory,
1506  inputValues,
1507  kernelValues,
1508  std::vector<float>(),
1509  outputData,
1510  inputDesc.GetShape(),
1511  kernelDesc.GetShape(),
1512  outputDesc.GetShape(),
1513  dataLayout,
1514  1.0f,
1515  0,
1516  padLeft,
1517  padTop,
1518  padRight,
1519  padBottom,
1520  strideX,
1521  strideY);
1522 }
LayerTestResult< O, 4 > SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const std::vector< T > &input, const std::vector< T > &kernel, const std::vector< B > &bias, const std::vector< O > &outputExpected, const armnn::TensorShape &inputShape, const armnn::TensorShape &kernelShape, const armnn::TensorShape &outputExpectedShape, const armnn::DataLayout dataLayout, float qScale, int32_t qOffset, uint32_t padLeft=1, uint32_t padTop=1, uint32_t padRight=1, uint32_t padBottom=1, uint32_t strideX=1, uint32_t strideY=1)
void IgnoreUnused(Ts &&...)

◆ Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest()

LayerTestResult<float, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
armnn::DataLayout  layout 
)

Definition at line 3366 of file Conv2dTestImpl.cpp.

References Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(), and armnn::Float32.

Referenced by TEST_SUITE().

3371 {
3373  <armnn::DataType::Float32, armnn::DataType::Float32>(
3374  workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
3375 }
LayerTestResult< T, 4 > Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout, float qScale, int32_t qOffset)

◆ Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon()

LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::DataLayout  layout,
float  qScale,
int32_t  qOffset 
)

Definition at line 888 of file Conv2dTestImpl.cpp.

Referenced by Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest().

895 {
896  // Use a single-batch 1-channel 3x3 image as input.
897  armnn::TensorInfo inputDesc({ 1, 1, 3, 3 }, ArmnnType);
898  std::vector<T> input =
899  QuantizedVector<T>({
900  11,21,31,
901  12,22,32,
902  13,23,33
903  },
904  qScale, qOffset);
905 
906  // Use 1 batch of a 1-channel 2x2 kernel.
907  armnn::TensorInfo kernelDesc({ 1, 1, 2, 2 }, ArmnnType);
908  std::vector<T> kernel =
909  QuantizedVector<T>({
910  -11,-21,
911  -12,-22,
912  },
913  qScale, qOffset);
914 
915 // Expected output is 1 batch of a 1-channel 6x8 image.
916 // Manually calculated like this:
917 //[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
918 //[-11*0 -21*0 -12*0 -22*11 ; -11*0 -21*0 -12*11 -22*21 ; -11*0 -21*0 -12*21 -22*31 ; -11*0 -21*0 -12*31 -22*0 ..]
919 //[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
920 //[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
921 //[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
922 //[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
923 //[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
924  armnn::TensorInfo outputDesc({ 1, 1, 8, 6 }, ArmnnType);
925  std::vector<T> expectedOutput =
926  QuantizedVector<T>({
927  0, 0, 0, 0, 0, 0,
928  -242, -594, -934, -372, 0, 0,
929  -495, -1190, -1850, -725, 0, 0,
930  -538, -1256, -1916, -748, 0, 0,
931  -273, -626, -946, -363, 0, 0,
932  0, 0, 0, 0, 0, 0,
933  0, 0, 0, 0, 0, 0,
934  0, 0, 0, 0, 0, 0
935  },
936  qScale, qOffset);
937 
938  return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
939  workloadFactory,
940  memoryManager,
941  tensorHandleFactory,
942  input,
943  kernel,
944  GetBias2<ArmnnBType>(false, qScale * qScale),
945  expectedOutput,
946  inputDesc.GetShape(),
947  kernelDesc.GetShape(),
948  outputDesc.GetShape(),
949  qScale,
950  qOffset,
951  layout,
952  1, // Padding left.
953  2, // Padding top.
954  3, // Padding right.
955  4); // Padding bottom.
956 }

◆ Convolution2dAsymmetricPaddingTest()

LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
armnn::DataLayout  layout 
)

Definition at line 3356 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3361 {
3362  return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3363  workloadFactory, memoryManager, tensorHandleFactory, layout, 0.0f, 0);
3364 }

◆ Convolution2dPerAxisQuantTest()

LayerTestResult<uint8_t, 4> Convolution2dPerAxisQuantTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::DataLayout  layout 
)

Definition at line 3397 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Convolution2dDescriptor::m_StrideX, armnn::NCHW, and PermuteTensorNhwcToNchw().

Referenced by TEST_SUITE().

3402 {
3403  using namespace armnn;
3404 
3405  const DataType inputType = DataType::QAsymmU8;
3406  const DataType kernelType = DataType::QSymmS8;
3407  const DataType biasType = DataType::Signed32;
3408 
3409  TensorInfo inputInfo ({ 1, 3, 1, 2 }, inputType, 0.5f, 128);
3410  TensorInfo outputInfo({ 1, 3, 1, 3 }, inputType, 1.0f, 128);
3411 
3412  const std::vector<float> quantScales{ 0.5f, 0.75f, 1.0f };
3413  constexpr unsigned int quantDimension = 0;
3414 
3415  TensorInfo kernelInfo({ 3, 1, 1, 2 }, kernelType, quantScales, quantDimension);
3416 
3417  const std::vector<float> biasQuantScales{ 0.25f, 0.375f, 0.5f };
3418  TensorInfo biasInfo({ 3 }, biasType, biasQuantScales, quantDimension);
3419 
3420  std::vector<uint8_t> inputData =
3421  {
3422  138, 108, 138, 108, 138, 108
3423  };
3424 
3425  std::vector<int8_t> kernelData =
3426  {
3427  1, 2, 1, 2, 1, 2
3428  };
3429 
3430  std::vector<int32_t> biasData =
3431  {
3432  4, 4, 4
3433  };
3434 
3435  std::vector<uint8_t> expectedOutputData =
3436  {
3437  121, 118, 115, 121, 118, 115, 121, 118, 115
3438  };
3439 
3440  if (layout == DataLayout::NCHW)
3441  {
3442  PermuteTensorNhwcToNchw(inputInfo, inputData);
3443  PermuteTensorNhwcToNchw(kernelInfo, kernelData);
3444  PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3445  }
3446 
3447  std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3448 
3449  Convolution2dDescriptor descriptor;
3450  descriptor.m_StrideX = 1;
3451  descriptor.m_StrideY = 1;
3452  descriptor.m_PadLeft = 0;
3453  descriptor.m_PadRight = 0;
3454  descriptor.m_PadTop = 0;
3455  descriptor.m_PadBottom = 0;
3456  descriptor.m_BiasEnabled = true;
3457  descriptor.m_DataLayout = layout;
3458 
3459  std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
3460  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
3461 
3462  WorkloadInfo workloadInfo;
3463  ScopedTensorHandle weightTensor(kernelInfo);
3464  ScopedTensorHandle biasTensor(biasInfo);
3465 
3466  AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
3467  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
3468 
3469  Convolution2dQueueDescriptor queueDescriptor;
3470  queueDescriptor.m_Parameters = descriptor;
3471  queueDescriptor.m_Weight = &weightTensor;
3472  queueDescriptor.m_Bias = &biasTensor;
3473 
3474  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3475  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3476 
3477  std::unique_ptr<IWorkload> workload = workloadFactory.CreateConvolution2d(queueDescriptor, workloadInfo);
3478  inputHandle->Allocate();
3479  outputHandle->Allocate();
3480 
3481  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
3482 
3483  ExecuteWorkload(*workload, memoryManager);
3484 
3485  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
3486 
3487  return LayerTestResult<uint8_t, 4>(actualOutput,
3488  expectedOutputData,
3489  outputHandle->GetShape(),
3490  outputInfo.GetShape());
3491 }
A Convolution2dDescriptor for the Convolution2dLayer.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataType
Definition: Types.hpp:35
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)

◆ DepthwiseConvolution2d2x3x3Dilation3x3Test()

LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2682 of file Conv2dTestImpl.cpp.

2688 {
2689  armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType);
2690  std::vector<float> inputNoQuantizedValues =
2691  {
2692  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2693  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2694  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2695  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2696  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2697  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2698  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2699  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2700  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2701  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2702 
2703  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2704  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2705  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2706  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2707  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2708  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2709  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2710  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2711  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2712  0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2713  };
2714 
2715  armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 2}, ArmnnType);
2716  std::vector<float> kernelNoQuantizedValues =
2717  {
2718  1, 2, 3,
2719  4, 5, 6,
2720  7, 8, 9,
2721 
2722  1, 2, 3,
2723  4, 5, 6,
2724  7, 8, 9
2725  };
2726 
2727  // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2728  // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2729  armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType);
2730  std::vector<float> outputExpectedNoQuantizedValues =
2731  {
2732  2, 9, 9, 9, 2, 9, 9, 9, 2, 9, 9, 9, 5, 3, 3, 3, 3,
2733 
2734  1, 1, 1, 3, 1, 1, 1, 3, 1, 1, 1, 6, 4, 4, 4
2735  };
2736 
2737  return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2738  workloadFactory,
2739  memoryManager,
2740  tensorHandleFactory,
2741  inputNoQuantizedValues,
2742  inputTensorInfo,
2743  kernelNoQuantizedValues,
2744  kernelTensorInfo,
2745  outputExpectedNoQuantizedValues,
2746  outputTensorInfo,
2747  3,
2748  3,
2749  layout,
2750  biasEnabled);
2751 }

◆ DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 >()

◆ DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 >()

◆ DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 >()

◆ DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 >()

◆ DepthwiseConvolution2d2x3x3Dilation3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 >()

◆ DepthwiseConvolution2d3x3Dilation3x3Test()

LayerTestResult<T, 4> DepthwiseConvolution2d3x3Dilation3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2624 of file Conv2dTestImpl.cpp.

2630 {
2631  armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType);
2632  std::vector<float> inputNoQuantizedValues =
2633  {
2634  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2635  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2636  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2637  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2638  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2639  0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
2640  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2641  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2642  0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2643  0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2644  };
2645 
2646  armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 1}, ArmnnType);
2647  std::vector<float> kernelNoQuantizedValues =
2648  {
2649  1, 2, 3,
2650  4, 5, 6,
2651  7, 8, 9
2652  };
2653 
2654  // Since the dilation rate is 3 this will dilate the kernel to be like 7x7,
2655  // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1
2656  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
2657  std::vector<float> outputExpectedNoQuantizedValues =
2658  {
2659  6., 5., 5., 5.,
2660  6., 5., 5., 5.,
2661  6., 5., 5., 5.,
2662  3., 2., 2., 2.
2663  };
2664 
2665  return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2666  workloadFactory,
2667  memoryManager,
2668  tensorHandleFactory,
2669  inputNoQuantizedValues,
2670  inputTensorInfo,
2671  kernelNoQuantizedValues,
2672  kernelTensorInfo,
2673  outputExpectedNoQuantizedValues,
2674  outputTensorInfo,
2675  3,
2676  3,
2677  layout,
2678  biasEnabled);
2679 }

◆ DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 >()

◆ DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::Float32, armnn::DataType::Float32 >()

◆ DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::QAsymmS8, armnn::DataType::Signed32 >()

◆ DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::QAsymmU8, armnn::DataType::Signed32 >()

◆ DepthwiseConvolution2d3x3Dilation3x3Test< armnn::DataType::QSymmS16, armnn::DataType::Signed32 >()

◆ DepthwiseConvolution2d3x3DilationTestCommon()

LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const std::vector< float > &  inputNoQuantizedValues,
armnn::TensorInfo inputTensorInfo,
const std::vector< float > &  kernelNoQuantizedValues,
armnn::TensorInfo kernelTensorInfo,
const std::vector< float > &  outputExpectedNoQuantizedValues,
armnn::TensorInfo outputTensorInfo,
uint32_t  dilationX,
uint32_t  dilationY,
armnn::DataLayout  layout = armnn::DataLayout::NCHW,
bool  biasEnabled = false 
)

Definition at line 2534 of file Conv2dTestImpl.cpp.

References armnn::Float32, TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

2548 {
2549  float qScale;
2550  int32_t qOffset;
2551  switch (ArmnnType)
2552  {
2555  {
2556  qScale = 0.1f;
2557  qOffset = 128;
2558  break;
2559  }
2561  {
2562  qScale = 0.1f;
2563  qOffset = 0;
2564  break;
2565  }
2567  default:
2568  {
2569  qScale = 0.f;
2570  qOffset = 0;
2571  break;
2572  }
2573  }
2574 
2575  inputTensorInfo.SetQuantizationScale(qScale);
2576  inputTensorInfo.SetQuantizationOffset(qOffset);
2577  kernelTensorInfo.SetQuantizationScale(qScale);
2578  kernelTensorInfo.SetQuantizationOffset(qOffset);
2579  outputTensorInfo.SetQuantizationScale(qScale);
2580  outputTensorInfo.SetQuantizationOffset(qOffset);
2581 
2582  auto input = QuantizedVector<T>(inputNoQuantizedValues,
2583  inputTensorInfo.GetQuantizationScale(),
2584  inputTensorInfo.GetQuantizationOffset());
2585  auto kernel = QuantizedVector<T>(kernelNoQuantizedValues,
2586  kernelTensorInfo.GetQuantizationScale(),
2587  kernelTensorInfo.GetQuantizationOffset());
2588  auto expectedOutput = QuantizedVector<T>(outputExpectedNoQuantizedValues,
2589  outputTensorInfo.GetQuantizationScale(),
2590  outputTensorInfo.GetQuantizationOffset());
2591 
2592  uint32_t padLeft = 0;
2593  uint32_t padTop = 0;
2594  uint32_t padRight = 0;
2595  uint32_t padBottom = 0;
2596  uint32_t strideX = 1;
2597  uint32_t strideY = 1;
2598 
2599  return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2600  workloadFactory,
2601  memoryManager,
2602  tensorHandleFactory,
2603  input,
2604  kernel,
2605  GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout),
2606  expectedOutput,
2607  inputTensorInfo.GetShape(),
2608  kernelTensorInfo.GetShape(),
2609  outputTensorInfo.GetShape(),
2610  qScale,
2611  qOffset,
2612  layout,
2613  padLeft,
2614  padTop,
2615  padRight,
2616  padBottom,
2617  strideX,
2618  strideY,
2619  dilationX,
2620  dilationY);
2621 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491

◆ DepthwiseConvolution2dAsymmetricTest()

LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3579 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3585 {
3586  return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3587  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3588 }

◆ DepthwiseConvolution2dAsymmetricTestCommon()

LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2287 of file Conv2dTestImpl.cpp.

2295 {
2296  // Use a single-batch 2-channel 5x5 image as input.
2297  armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
2298  auto input = QuantizedVector<T>(
2299  {
2300  0, 1, 2, 3, 4,
2301  5, 6, 7, 8, 9,
2302  10, 11, 12, 13, 14,
2303  15, 16, 17, 18, 19,
2304  20, 21, 22, 23, 24,
2305 
2306  25, 26, 27, 28, 29,
2307  30, 31, 32, 33, 34,
2308  35, 36, 37, 38, 39,
2309  40, 41, 42, 43, 44,
2310  45, 46, 47, 48, 49
2311  },
2312  inputTensorInfo.GetQuantizationScale(),
2313  inputTensorInfo.GetQuantizationOffset());
2314 
2315  // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
2316  // Weights layout for depthwise: [1,H,W,I*M]
2317  armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2 }, ArmnnType);
2318  auto kernel = QuantizedVector<T>({
2319  32, 31, 30, 29,
2320  28, 27, 26, 25,
2321  24, 23, 22, 21,
2322  20, 19, 18, 17,
2323 
2324  16, 15, 14, 13,
2325  12, 11, 10, 9,
2326  8, 7, 6, 5,
2327  4, 3, 2, 1
2328  },
2329  kernelTensorInfo.GetQuantizationScale(),
2330  kernelTensorInfo.GetQuantizationOffset());
2331 
2332  // Expected output is 1 batch of a 2-channel 5x5 image.
2333  // Calculated using the python tensorflow library with strideX=1, strideY=1.
2334  armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
2335  auto expectedOutput = QuantizedVector<T>(
2336  {
2337  396, 664, 820, 756, 602, 1016, 1608, 1880, 1652, 1268, 1976, 2968, 3240, 2732,
2338  2028, 2628, 3808, 4060, 3312, 2390, 2596, 3700, 3900, 3130, 2226, 2817, 4186,
2339  4330, 3609, 2651, 5414, 7864, 8120, 6626, 4780, 6314, 9144, 9400, 7646, 5500,
2340  6759, 9610, 9850, 7875, 5579, 5935, 8348, 8540, 6757, 4742
2341  },
2342  outputTensorInfo.GetQuantizationScale(),
2343  outputTensorInfo.GetQuantizationOffset());
2344 
2345  return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
2346  workloadFactory,
2347  memoryManager,
2348  tensorHandleFactory,
2349  input,
2350  kernel,
2351  GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2352  expectedOutput,
2353  inputTensorInfo.GetShape(),
2354  kernelTensorInfo.GetShape(),
2355  outputTensorInfo.GetShape(),
2356  qScale,
2357  qOffset,
2358  layout,
2359  1, // Padding left.
2360  1, // Padding top.
2361  2, // Padding right.
2362  2, // Padding bottom.
2363  1, // strideX
2364  1); // strideY
2365 }

◆ DepthwiseConvolution2dAsymmetricTestImpl()

LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const std::vector< T > &  input,
const std::vector< T > &  kernel,
const std::vector< B > &  bias,
const std::vector< T > &  outputExpected,
const armnn::TensorShape inputShape,
const armnn::TensorShape kernelShape,
const armnn::TensorShape outputExpectedShape,
float  qScale,
int32_t  qOffset,
const armnn::DataLayout  layout,
uint32_t  padLeft = 0,
uint32_t  padTop = 0,
uint32_t  padRight = 0,
uint32_t  padBottom = 0,
uint32_t  strideX = 1,
uint32_t  strideY = 1 
)

Definition at line 1637 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), ApplyBias(), ARMNN_ASSERT, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateDepthwiseConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), armnn::info, DepthwiseConvolution2dQueueDescriptor::m_Weight, armnn::NHWC, armnn::numeric_cast(), armnnUtils::Permute(), TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

1657 {
1658  unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
1659  unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[1]);
1660  unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[2]);
1661  unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[3]);
1662  unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
1663  unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
1664  unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
1665  unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
1666  unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
1667  unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
1668  unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
1669 
1670  // If a bias is used, its size must equal the number of output channels.
1671  bool biasEnabled = bias.size() > 0;
1672  ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
1673 
1674  // Creates the tensors.
1675  armnn::TensorInfo inputTensorInfo =
1676  armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1677  armnn::TensorInfo outputTensorInfo =
1678  armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
1679  armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
1680  armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
1681 
1682  // Set quantization parameters if the requested type is a quantized type.
1683  if (armnn::IsQuantizedType<T>())
1684  {
1685  inputTensorInfo.SetQuantizationScale(qScale);
1686  inputTensorInfo.SetQuantizationOffset(qOffset);
1687  outputTensorInfo.SetQuantizationScale(qScale);
1688  outputTensorInfo.SetQuantizationOffset(qOffset);
1689  kernelDesc.SetQuantizationScale(qScale);
1690  kernelDesc.SetQuantizationOffset(qOffset);
1691  biasDesc.SetQuantizationScale(qScale*qScale);
1692  biasDesc.SetQuantizationOffset(0);
1693  }
1694 
1695  // Construct the input data.
1696  std::vector<T> inputData;
1697  inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
1698 
1699  // At this point if we require it permute the input data
1700  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1701  if (layout == armnn::DataLayout::NHWC)
1702  {
1703  std::vector<T> tmp(inputData.size());
1704  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
1705  inputData = tmp;
1706  }
1707 
1708  // Construct the output data, with bias applied, as appropriate.
1709  std::vector<T> outputData;
1710  outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
1711  if (biasEnabled)
1712  {
1713  std::vector<T> biasV;
1714  biasV.assign(bias.data(), bias.data() + outputChannels);
1715  ApplyBias(outputData, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1716  biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1717  outputWidth, outputHeight);
1718  }
1719 
1720  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1721 
1722  // At this point if we require it permute the expected output
1723  if (layout == armnn::DataLayout::NHWC)
1724  {
1725  std::vector<T> tmp(outputData.size());
1726  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
1727  outputData = tmp;
1728  }
1729 
1730  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1731  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1732 
1733  armnn::ScopedTensorHandle weightsTensor(kernelDesc);
1734 
1735  AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
1736 
1737  armnn::ScopedTensorHandle biasTensor(biasDesc);
1738  if (biasEnabled)
1739  {
1740  AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
1741  }
1742 
1744  data.m_Weight = &weightsTensor;
1745  data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
1746  data.m_Parameters.m_StrideX = strideX;
1747  data.m_Parameters.m_StrideY = strideY;
1748  data.m_Parameters.m_PadLeft = padLeft;
1749  data.m_Parameters.m_PadRight = padRight;
1750  data.m_Parameters.m_PadTop = padTop;
1751  data.m_Parameters.m_PadBottom = padBottom;
1752  data.m_Parameters.m_BiasEnabled = biasEnabled;
1753  data.m_Parameters.m_DataLayout = layout;
1754 
1756  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1757  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1758 
1759  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
1760  inputHandle->Allocate();
1761  outputHandle->Allocate();
1762 
1763  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
1764 
1765  ExecuteWorkload(*workload, memoryManager);
1766 
1767  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1768 
1769  return LayerTestResult<T, 4>(actualOutput,
1770  outputData,
1771  outputHandle->GetShape(),
1772  outputTensorInfo.GetShape());
1773 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void ApplyBias(std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
Depthwise Convolution 2D layer workload data.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ DepthwiseConvolution2dDepthMul1Int16Test()

LayerTestResult<int16_t, 4> DepthwiseConvolution2dDepthMul1Int16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3637 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3643 {
3644  return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3645  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3646 }

◆ DepthwiseConvolution2dDepthMul1Test()

LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3525 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3531 {
3532  return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3533  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3534 }

◆ DepthwiseConvolution2dDepthMul1TestImpl()

LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 1776 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), ApplyBias(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateDepthwiseConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), armnn::info, DepthwiseConvolution2dQueueDescriptor::m_Bias, DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, DepthwiseConvolution2dQueueDescriptor::m_Weight, armnn::NHWC, armnnUtils::Permute(), TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

1784 {
1786 
1787  unsigned int inputHeight = 3;
1788  unsigned int inputWidth = 3;
1789  unsigned int inputChannels = 2;
1790  unsigned int inputNum = 1;
1791 
1792  unsigned int kernelHeight = 3;
1793  unsigned int kernelWidth = 3;
1794 
1795  unsigned int outputHeight = 1;
1796  unsigned int outputWidth = 1;
1797  unsigned int outputChannels = inputChannels;
1798  unsigned int outputNum = inputNum;
1799 
1800  armnn::TensorInfo inputTensorInfo =
1801  armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1802  armnn::TensorInfo outputTensorInfo =
1803  armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
1804  armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
1805  ArmnnType);
1806  armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
1807 
1808  // Set quantization parameters if the requested type is a quantized type.
1809  if(armnn::IsQuantizedType<T>())
1810  {
1811  inputTensorInfo.SetQuantizationScale(qScale);
1812  inputTensorInfo.SetQuantizationOffset(qOffset);
1813  outputTensorInfo.SetQuantizationScale(qScale);
1814  outputTensorInfo.SetQuantizationOffset(qOffset);
1815  kernelDesc.SetQuantizationScale(qScale);
1816  kernelDesc.SetQuantizationOffset(qOffset);
1817  biasDesc.SetQuantizationScale(qScale*qScale);
1818  biasDesc.SetQuantizationOffset(0);
1819  }
1820  std::vector<T> inputData = std::vector<T>(
1821  QuantizedVector<T>({
1822  1.f, 2.f, 1.f,
1823  2.f, 1.f, 2.f,
1824  1.f, 2.f, 1.f,
1825 
1826  1.f, 2.f, 1.f,
1827  2.f, 1.f, 2.f,
1828  1.f, 2.f, 1.f,
1829  },
1830  inputTensorInfo.GetQuantizationScale(),
1831  inputTensorInfo.GetQuantizationOffset()));
1832 
1833  // at this point if we require it permute the input data
1834  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1835  if (layout == armnn::DataLayout::NHWC)
1836  {
1837  std::vector<T> tmp(inputData.size());
1838  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
1839  inputData = tmp;
1840  }
1841 
1842  std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
1843  biasDesc.GetQuantizationScale(),
1844  biasDesc.GetQuantizationOffset()));
1845 
1846  std::vector<T> kernelData = std::vector<T>(
1847  QuantizedVector<T>({
1848  1.f, 0.f, 1.f,
1849  0.f, 0.f, 0.f,
1850  -1.f, 0.f, -1.f,
1851 
1852  1.f, 0.f, 1.f,
1853  0.f, 0.f, 0.f,
1854  -1.f, 0.f, -1.f,
1855  },
1856  kernelDesc.GetQuantizationScale(),
1857  kernelDesc.GetQuantizationOffset()));
1858 
1859  // Manually calculated.
1860  std::vector<T> outputImage(
1861  QuantizedVector<T>({ 0.f, 0.f },
1862  outputTensorInfo.GetQuantizationScale(),
1863  outputTensorInfo.GetQuantizationOffset())
1864  );
1865 
1866  // Optionally apply bias to output image.
1867  if(biasEnabled)
1868  {
1869  ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
1870  biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
1871  outputWidth, outputHeight);
1872  }
1873 
1874  if (layout == armnn::DataLayout::NHWC)
1875  {
1876  std::vector<T> tmp(outputImage.size());
1877  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data(), sizeof(T));
1878  outputImage = tmp;
1879  }
1880 
1881  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1882 
1883  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1884  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1885 
1888  armnn::ScopedTensorHandle weightsTensor(kernelDesc);
1889  armnn::ScopedTensorHandle biasTensor(biasDesc);
1890 
1891  AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
1892  AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
1893 
1894  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1895  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1896 
1897  data.m_Weight = &weightsTensor;
1898  data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
1899  data.m_Parameters.m_StrideX = 1;
1900  data.m_Parameters.m_StrideY = 1;
1901  data.m_Parameters.m_PadLeft = 0;
1902  data.m_Parameters.m_PadRight = 0;
1903  data.m_Parameters.m_PadTop = 0;
1904  data.m_Parameters.m_PadBottom = 0;
1905  data.m_Parameters.m_BiasEnabled = biasEnabled;
1906  data.m_Parameters.m_DataLayout = layout;
1907 
1908  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
1909  inputHandle->Allocate();
1910  outputHandle->Allocate();
1911 
1912  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
1913 
1914  ExecuteWorkload(*workload, memoryManager);
1915 
1916  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1917 
1918  return LayerTestResult<T, 4>(actualOutput,
1919  outputImage,
1920  outputHandle->GetShape(),
1921  outputTensorInfo.GetShape());
1922 }
bool m_BiasEnabled
Enable/disable bias.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PadBottom
Padding bottom value in the height dimension.
void ApplyBias(std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
Depthwise Convolution 2D layer workload data.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
uint32_t m_PadRight
Padding right value in the width dimension.

◆ DepthwiseConvolution2dDepthMul1Uint8Test()

LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3601 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3607 {
3608  return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3609  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3610 }

◆ DepthwiseConvolution2dDepthMul64Test()

LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul64Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 3536 of file Conv2dTestImpl.cpp.

References armnn::Float32, armnn::GetDataTypeSize(), armnn::NCHW, armnnUtils::Permute(), and armnnUtils::Permuted().

Referenced by TEST_SUITE().

3540 {
3541  armnn::TensorInfo inputTensorInfo({ 1, 1, 2, 2 }, armnn::DataType::Float32);
3542  std::vector<float> input = { 1.f, 2.f, 3.f, 4.f };
3543 
3544  std::vector<float> kernelData;
3545  std::vector<float> singleDepthKernel{ 1.f, -1.f, -1.f, 1.f };
3546  for (unsigned int i = 0; i < 64; ++i)
3547  {
3548  kernelData.insert(kernelData.end(), singleDepthKernel.begin(), singleDepthKernel.end());
3549  }
3550  armnn::TensorInfo kernelTensorInfo({ 64, 1, 2, 2 }, armnn::DataType::Float32);
3551 
3552  // permute from [O,1,H,W] --> [1,H,W,O]
3553  armnn::PermutationVector permutationVector {3,0,1,2};
3554  kernelTensorInfo = armnnUtils::Permuted(kernelTensorInfo, permutationVector);
3555  std::vector<float> kernelPermuted(kernelTensorInfo.GetNumElements());
3556  armnnUtils::Permute(kernelTensorInfo.GetShape(), permutationVector,
3557  kernelData.data(), kernelPermuted.data(),
3558  GetDataTypeSize(kernelTensorInfo.GetDataType()));
3559 
3560  std::vector<float> expectedOutputData(64, 0.f);
3561  armnn::TensorInfo outputTensorInfo({ 1, 64, 1, 1 }, armnn::DataType::Float32);
3562 
3563  return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3564  workloadFactory,
3565  memoryManager,
3566  tensorHandleFactory,
3567  input,
3568  kernelPermuted,
3569  std::vector<float>(),
3570  expectedOutputData,
3571  inputTensorInfo.GetShape(),
3572  kernelTensorInfo.GetShape(),
3573  outputTensorInfo.GetShape(),
3574  0.f,
3575  0,
3577 }
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:151

◆ DepthwiseConvolution2dDepthNhwcTest()

LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled 
)

Definition at line 3515 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3520 {
3521  return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3522  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled);
3523 }

◆ DepthwiseConvolution2dInt16Test()

LayerTestResult<int16_t, 4> DepthwiseConvolution2dInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3626 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3632 {
3633  return DepthwiseConvolution2dTestImpl<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3634  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3635 }

◆ DepthwiseConvolution2dMult2Test()

LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2829 of file Conv2dTestImpl.cpp.

2835 {
2836  armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2837  std::vector<float> inputNoQuantizedValues =
2838  {
2839  10.0, 10.0, 10.0,
2840  10.0, 10.0, 10.0,
2841  10.0, 10.0, 10.0,
2842 
2843  21.0, 22.0, 23.0,
2844  24.0, 25.0, 26.0,
2845  27.0, 28.0, 29.0
2846  };
2847 
2848  armnn::TensorInfo kernelTensorInfo({ 1, 2, 2, 4}, ArmnnType);
2849 
2850  std::vector<float> kernelNoQuantizedValues =
2851  {
2852  0.25f, 0.25f,
2853  0.25f, 0.25f,
2854 
2855  0.2f , 0.0f,
2856  0.0f , 0.0f,
2857 
2858  0.0f , 0.0f,
2859  0.0f , 0.1f,
2860 
2861  0.0f , 0.3f,
2862  0.0f , 0.0f
2863 
2864  };
2865 
2866  armnn::TensorInfo outputTensorInfo({ 1, 4, 2, 2}, ArmnnType);
2867  std::vector<float> outputExpectedNoQuantizedValues =
2868  {
2869  4.5f, 4.5f, 4.5f, 4.5f,
2870  5.5f, 5.5f, 5.5f, 5.5f,
2871  5.25f, 5.5f, 6.0f, 6.25f,
2872  7.65f, 8.0f, 8.7f, 9.05f
2873  };
2874 
2875 
2876  return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2877  workloadFactory,
2878  memoryManager,
2879  tensorHandleFactory,
2880  inputNoQuantizedValues,
2881  inputTensorInfo,
2882  kernelNoQuantizedValues,
2883  kernelTensorInfo,
2884  outputExpectedNoQuantizedValues,
2885  outputTensorInfo,
2886  1,
2887  1,
2888  layout,
2889  biasEnabled);
2890 }

◆ DepthwiseConvolution2dMult2Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 >()

◆ DepthwiseConvolution2dMult2Test< armnn::DataType::Float32, armnn::DataType::Float32 >()

◆ DepthwiseConvolution2dMult4Test()

LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 2754 of file Conv2dTestImpl.cpp.

2760 {
2761  armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType);
2762  std::vector<float> inputNoQuantizedValues =
2763  {
2764  10.0, 10.0, 10.0,
2765  10.0, 10.0, 10.0,
2766  10.0, 10.0, 10.0,
2767 
2768  21.0, 22.0, 23.0,
2769  24.0, 25.0, 26.0,
2770  27.0, 28.0, 29.0
2771  };
2772 
2773  armnn::TensorInfo kernelTensorInfo({ 1, 2, 2, 8}, ArmnnType);
2774 
2775  std::vector<float> kernelNoQuantizedValues =
2776  {
2777  0.25f, 0.25f,
2778  0.25f, 0.25f,
2779 
2780  0.25f, 0.25f,
2781  0.25f, 0.25f,
2782 
2783  0.0f , 0.0f,
2784  0.0f , 0.1f,
2785 
2786  0.0f , 0.0f,
2787  0.0f , 0.1f,
2788 
2789  0.2f , 0.0f,
2790  0.0f , 0.0f,
2791 
2792  0.2f , 0.0f,
2793  0.0f , 0.0f,
2794 
2795  0.0f , 0.3f,
2796  0.0f , 0.0f,
2797 
2798  0.0f , 0.3f,
2799  0.0f , 0.0f
2800  };
2801 
2802  armnn::TensorInfo outputTensorInfo({ 1, 8, 2, 2}, ArmnnType);
2803  std::vector<float> outputExpectedNoQuantizedValues =
2804  {
2805  4.5f, 4.5f, 4.5f, 4.5f, 5.5f, 5.5f, 5.5f, 5.5f,
2806  2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f,
2807  10.05f, 10.5f, 11.4f, 11.85f, 12.75f, 13.3f, 14.4f, 14.95f,
2808  5.25f, 5.5f, 6.0f, 6.25f, 7.45f, 7.8f, 8.5f, 8.85f
2809  };
2810 
2811 
2812  return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>(
2813  workloadFactory,
2814  memoryManager,
2815  tensorHandleFactory,
2816  inputNoQuantizedValues,
2817  inputTensorInfo,
2818  kernelNoQuantizedValues,
2819  kernelTensorInfo,
2820  outputExpectedNoQuantizedValues,
2821  outputTensorInfo,
2822  1,
2823  1,
2824  layout,
2825  biasEnabled);
2826 }

◆ DepthwiseConvolution2dMult4Test< armnn::DataType::BFloat16, armnn::DataType::BFloat16 >()

◆ DepthwiseConvolution2dMult4Test< armnn::DataType::Float32, armnn::DataType::Float32 >()

◆ DepthwiseConvolution2dNhwcTestCommon()

LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled 
)

Definition at line 2369 of file Conv2dTestImpl.cpp.

References armnn::NHWC.

2376 {
2377  auto layout = armnn::DataLayout::NHWC;
2378 
2379  armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
2380  auto input = QuantizedVector<T>(
2381  {
2382  0, 1, 2, 3, 4,
2383  5, 6, 7, 8, 9,
2384  10, 11, 12, 13, 14,
2385  15, 16, 17, 18, 19,
2386  20, 21, 22, 23, 24,
2387 
2388  25, 26, 27, 28, 29,
2389  30, 31, 32, 33, 34,
2390  35, 36, 37, 38, 39,
2391  40, 41, 42, 43, 44,
2392  45, 46, 47, 48, 49
2393  },
2394  inputTensorInfo.GetQuantizationScale(),
2395  inputTensorInfo.GetQuantizationOffset());
2396 
2397  armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2 }, ArmnnType);
2398  auto kernel = QuantizedVector<T>({
2399  32, 31, 30, 29,
2400  28, 27, 26, 25,
2401  24, 23, 22, 21,
2402  20, 19, 18, 17,
2403 
2404  16, 15, 14, 13,
2405  12, 11, 10, 9,
2406  8, 7, 6, 5,
2407  4, 3, 2, 1
2408  },
2409  kernelTensorInfo.GetQuantizationScale(),
2410  kernelTensorInfo.GetQuantizationOffset());
2411 
2412  armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
2413  auto expectedOutput = QuantizedVector<T>(
2414  {
2415  396,664,820,756,602,
2416  1016,1608,1880,1652,1268,
2417  1976,2968,3240,2732,2028,
2418  2628,3808,4060,3312,2390,
2419  2596,3700,3900,3130,2226,
2420 
2421  2817,4186,4330,3609,2651,
2422  5414,7864,8120,6626,4780,
2423  6314,9144,9400,7646,5500,
2424  6759,9610,9850,7875,5579,
2425  5935,8348,8540,6757,4742
2426  },
2427  outputTensorInfo.GetQuantizationScale(),
2428  outputTensorInfo.GetQuantizationOffset());
2429 
2430  return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2431  workloadFactory,
2432  memoryManager,
2433  tensorHandleFactory,
2434  input,
2435  kernel,
2436  GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2437  expectedOutput,
2438  inputTensorInfo.GetShape(),
2439  kernelTensorInfo.GetShape(),
2440  outputTensorInfo.GetShape(),
2441  qScale,
2442  qOffset,
2443  layout,
2444  1, // Padding left.
2445  1, // Padding top.
2446  2, // Padding right.
2447  2, // Padding bottom.
2448  1, // strideX
2449  1); // strideY
2450 }

◆ DepthwiseConvolution2dPerAxisQuantTest()

LayerTestResult<uint8_t, 4> DepthwiseConvolution2dPerAxisQuantTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::DataLayout  layout 
)

Definition at line 3648 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateDepthwiseConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, DepthwiseConvolution2dDescriptor::m_StrideX, armnn::NCHW, and PermuteTensorNhwcToNchw().

Referenced by TEST_SUITE().

3653 {
3654  using namespace armnn;
3655 
3656  const DataType inputType = DataType::QAsymmU8;
3657  const DataType kernelType = DataType::QSymmS8;
3658  const DataType biasType = DataType::Signed32;
3659 
3660  TensorInfo inputInfo ({ 1, 3, 3, 2 }, inputType, 0.5f, 128); // N H W C
3661  TensorInfo outputInfo({ 1, 2, 2, 4 }, inputType, 1.0f, 128); // N H W C
3662 
3663  const std::vector<float> quantScales{ 1.0f, 0.5f, 1.0f, 0.5f };
3664  const unsigned int quantDimension = 3;
3665  TensorInfo kernelInfo({ 1, 2, 2, 4 }, kernelType, quantScales, quantDimension); // [1, H, W, I*M]
3666 
3667  const std::vector<float> biasQuantScales{ 0.5f, 0.25f, 0.5f, 0.25f };
3668  constexpr unsigned int biasQuantDimension = 0;
3669  TensorInfo biasInfo({ 4 }, biasType, biasQuantScales, biasQuantDimension);
3670 
3671  std::vector<uint8_t> inputData =
3672  {
3673  129, 130,
3674  129, 130,
3675  129, 130,
3676  129, 130,
3677  129, 130,
3678  129, 130,
3679  129, 130,
3680  129, 130,
3681  129, 130
3682  };
3683 
3684  std::vector<int8_t> kernelData =
3685  {
3686  1, 1, 1, 1,
3687  1, 1, 1, 1,
3688  1, 1, 1, 1,
3689  1, 1, 1, 1
3690  };
3691 
3692  std::vector<int32_t> biasData =
3693  {
3694  4, 4, 4, 4
3695  };
3696 
3697  std::vector<uint8_t> expectedOutputData =
3698  {
3699  132, 130, 134, 131,
3700  132, 130, 134, 131,
3701  132, 130, 134, 131,
3702  132, 130, 134, 131
3703  };
3704 
3705  if (layout == DataLayout::NCHW)
3706  {
3707  PermuteTensorNhwcToNchw(inputInfo, inputData);
3708  PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
3709  }
3710 
3711  std::vector<uint8_t> actualOutput(outputInfo.GetNumElements());
3712 
3714  descriptor.m_StrideX = 1;
3715  descriptor.m_StrideY = 1;
3716  descriptor.m_PadLeft = 0;
3717  descriptor.m_PadRight = 0;
3718  descriptor.m_PadTop = 0;
3719  descriptor.m_PadBottom = 0;
3720  descriptor.m_DilationX = 1;
3721  descriptor.m_DilationY = 1;
3722  descriptor.m_BiasEnabled = true;
3723  descriptor.m_DataLayout = layout;
3724 
3725  std::unique_ptr<ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
3726  std::unique_ptr<ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
3727 
3728  WorkloadInfo workloadInfo;
3729  ScopedTensorHandle weightTensor(kernelInfo);
3730  ScopedTensorHandle biasTensor(biasInfo);
3731 
3732  AllocateAndCopyDataToITensorHandle(&weightTensor, kernelData.data());
3733  AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
3734 
3735  DepthwiseConvolution2dQueueDescriptor queueDescriptor;
3736  queueDescriptor.m_Parameters = descriptor;
3737  queueDescriptor.m_Weight = &weightTensor;
3738  queueDescriptor.m_Bias = &biasTensor;
3739 
3740  AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
3741  AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
3742 
3743  std::unique_ptr<IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(queueDescriptor, workloadInfo);
3744  inputHandle->Allocate();
3745  outputHandle->Allocate();
3746 
3747  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
3748 
3749  ExecuteWorkload(*workload, memoryManager);
3750 
3751  LayerTestResult<uint8_t, 4> ret(outputInfo);
3752 
3753  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
3754 
3755  return LayerTestResult<uint8_t, 4>(actualOutput,
3756  expectedOutputData,
3757  outputHandle->GetShape(),
3758  outputInfo.GetShape());
3759 }
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
DataType
Definition: Types.hpp:35
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Depthwise Convolution 2D layer workload data.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)

◆ DepthwiseConvolution2dTest()

LayerTestResult<float, 4> DepthwiseConvolution2dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3504 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3510 {
3511  return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
3512  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, biasEnabled, layout);
3513 }

◆ DepthwiseConvolution2dTestImpl() [1/2]

LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 1925 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), ApplyBias(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateDepthwiseConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), armnn::info, DepthwiseConvolution2dQueueDescriptor::m_Bias, DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, DepthwiseConvolution2dQueueDescriptor::m_Weight, armnn::NHWC, armnnUtils::Permute(), TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

1933 {
1935 
1936  unsigned int depthMultiplier = 2;
1937 
1938  unsigned int inputHeight = 8;
1939  unsigned int inputWidth = 16;
1940  unsigned int inputChannels = 2;
1941  unsigned int inputBatchSize = 1;
1942 
1943  unsigned int kernelHeight = 5;
1944  unsigned int kernelWidth = 3;
1945 
1946  unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
1947  unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
1948  unsigned int outputChannels = inputChannels * depthMultiplier;
1949  unsigned int outputBatchSize = inputBatchSize;
1950 
1951  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
1952  inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
1953  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
1954  outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
1955  armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, outputChannels},
1956  ArmnnType);
1957  armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
1958 
1959  // Set quantization parameters if the requested type is a quantized type.
1960  if(armnn::IsQuantizedType<T>())
1961  {
1962  inputTensorInfo.SetQuantizationScale(qScale);
1963  inputTensorInfo.SetQuantizationOffset(qOffset);
1964  outputTensorInfo.SetQuantizationScale(qScale);
1965  outputTensorInfo.SetQuantizationOffset(qOffset);
1966  kernelDesc.SetQuantizationScale(qScale);
1967  kernelDesc.SetQuantizationOffset(qOffset);
1968  biasDesc.SetQuantizationScale(qScale*qScale);
1969  biasDesc.SetQuantizationOffset(0);
1970  }
1971 
1972  // NOTE: originalInputData is in NCHW format
1973  std::vector<T> originalInputData = std::vector<T>(
1974  QuantizedVector<T>({
1975  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1976  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1977  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1978  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1979  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1980  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1981  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1982  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
1983  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1984  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1985  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1986  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1987  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1988  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1989  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
1990  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
1991  },
1992  inputTensorInfo.GetQuantizationScale(),
1993  inputTensorInfo.GetQuantizationOffset()));
1994 
1995  std::vector<T> inputData = originalInputData;
1996  // at this point if we require it permute the input data
1997  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
1998  if (layout == armnn::DataLayout::NHWC)
1999  {
2000  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
2001  originalInputData.data(), inputData.data(), sizeof(T));
2002  }
2003 
2004  std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
2005  biasDesc.GetQuantizationScale(),
2006  biasDesc.GetQuantizationOffset());
2007 
2008  std::vector<T> kernelData = std::vector<T>(
2009  QuantizedVector<T>({
2010  1, 1, 1,
2011  1, -1, 1,
2012  1, 1, 1,
2013  1, 1, 1,
2014  1, 1, 1,
2015 
2016  2, 2, 2,
2017  2, 2, 2,
2018  2, 2, 2,
2019  2, 2, 2,
2020  2, 2, 2,
2021 
2022  0, 0, 0,
2023  0, -1, 0,
2024  0, 0, 0,
2025  0, 0, 0,
2026  0, 0, 0,
2027 
2028  0, 0, 0,
2029  0, 0, 0,
2030  0, 1, 0,
2031  0, 0, 0,
2032  0, 0, 0
2033  },
2034  kernelDesc.GetQuantizationScale(),
2035  kernelDesc.GetQuantizationOffset()));
2036 
2037  // Manually calculated.
2038  std::vector<T> originalOutputImage = std::vector<T>(
2039  QuantizedVector<T>({
2040  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2041  5, 5, 5, 5, 5, 5, 5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5,
2042  5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5.5, 5, 5, 5, 5, 5, 5, 5,
2043  2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5,
2044  4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 6, 6, 6, 6, 6, 6, 6,
2045  6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
2046  1, 3, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2047  2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2048  2, 4, 0, 0, 0, 0, 0, 2, 4, 0, 0, 0, 0, 0,
2049  2, 4, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2050  3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0,
2051  3, 5, 0, 0, 0, 0, 0, 3, 5, 0, 0, 0, 0, 0
2052  },
2053  outputTensorInfo.GetQuantizationScale(),
2054  outputTensorInfo.GetQuantizationOffset()));
2055 
2056  // Optionally apply bias to output image.
2057  if(biasEnabled)
2058  {
2059  ApplyBias(originalOutputImage,
2060  outputTensorInfo.GetQuantizationScale(),
2061  outputTensorInfo.GetQuantizationOffset(),
2062  biasV,
2063  biasDesc.GetQuantizationScale(),
2064  biasDesc.GetQuantizationOffset(),
2065  outputWidth,
2066  outputHeight);
2067  }
2068 
2069  std::vector<T> outputImage = originalOutputImage;
2070  if (layout == armnn::DataLayout::NHWC)
2071  {
2072  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC,
2073  originalOutputImage.data(), outputImage.data(), sizeof(T));
2074  }
2075 
2076  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2077 
2078  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
2079  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2080 
2083  armnn::ScopedTensorHandle weightsTensor(kernelDesc);
2084  armnn::ScopedTensorHandle biasTensor(biasDesc);
2085 
2086  AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
2087  AllocateAndCopyDataToITensorHandle(&biasTensor, biasV.data());
2088 
2089  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2090  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2091 
2092  data.m_Weight = &weightsTensor;
2093  data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
2094  data.m_Parameters.m_StrideX = 2;
2095  data.m_Parameters.m_StrideY = 1;
2096  data.m_Parameters.m_PadLeft = 0;
2097  data.m_Parameters.m_PadRight = 0;
2098  data.m_Parameters.m_PadTop = 1;
2099  data.m_Parameters.m_PadBottom = 1;
2100  data.m_Parameters.m_BiasEnabled = biasEnabled;
2101  data.m_Parameters.m_DataLayout = layout;
2102 
2103  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
2104  inputHandle->Allocate();
2105  outputHandle->Allocate();
2106 
2107  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
2108 
2109  ExecuteWorkload(*workload, memoryManager);
2110 
2111  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2112 
2113  return LayerTestResult<T, 4>(actualOutput,
2114  outputImage,
2115  outputHandle->GetShape(),
2116  outputTensorInfo.GetShape());
2117 
2118 }
bool m_BiasEnabled
Enable/disable bias.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PadBottom
Padding bottom value in the height dimension.
void ApplyBias(std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
Depthwise Convolution 2D layer workload data.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
uint32_t m_PadRight
Padding right value in the width dimension.

◆ DepthwiseConvolution2dTestImpl() [2/2]

LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const std::vector< T > &  originalInput,
const std::vector< T > &  originalKernel,
const std::vector< B > &  bias,
const std::vector< T > &  originalOutputExpected,
const armnn::TensorShape originalInputShape,
const armnn::TensorShape originalKernelShape,
const armnn::TensorShape originalOutputExpectedShape,
float  qScale,
int32_t  qOffset,
const armnn::DataLayout  layout = armnn::DataLayout::NCHW,
uint32_t  padLeft = 0,
uint32_t  padTop = 0,
uint32_t  padRight = 0,
uint32_t  padBottom = 0,
uint32_t  strideX = 1,
uint32_t  strideY = 1,
uint32_t  dilationX = 1,
uint32_t  dilationY = 1 
)

Definition at line 2122 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), ApplyBias(), ARMNN_ASSERT, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateDepthwiseConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), armnn::info, DepthwiseConvolution2dQueueDescriptor::m_Bias, DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_DilationX, DepthwiseConvolution2dDescriptor::m_DilationY, DepthwiseConvolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, DepthwiseConvolution2dQueueDescriptor::m_Weight, armnn::NHWC, armnn::numeric_cast(), armnnUtils::Permute(), TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

2144 {
2145  unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
2146  unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
2147  unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
2148  unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
2149 
2150  unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
2151  unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
2152  unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
2153  unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
2154 
2155  unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
2156  unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
2157  unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
2158 
2159  bool biasEnabled = bias.size() > 0;
2160 
2161  // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
2162  ARMNN_ASSERT(inputNum == 1);
2163  ARMNN_ASSERT(outputNum == 1);
2164 
2165  // If a bias is used, its size must equal the number of output channels.
2166  ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
2167 
2168 
2169  // Note these tensors will use two (identical) batches.
2170  armnn::TensorInfo inputTensorInfo =
2171  armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
2172  armnn::TensorInfo outputTensorInfo =
2173  armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
2174 
2175  // Kernel must be NCHW layout always, independently of the layout of the input and output for depthwise convolution.
2176  armnn::TensorInfo kernelDesc({1, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
2177 
2178  armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
2179 
2180  // Set quantization parameters if the requested type is a quantized type.
2181  if(armnn::IsQuantizedType<T>())
2182  {
2183  inputTensorInfo.SetQuantizationScale(qScale);
2184  inputTensorInfo.SetQuantizationOffset(qOffset);
2185  outputTensorInfo.SetQuantizationScale(qScale);
2186  outputTensorInfo.SetQuantizationOffset(qOffset);
2187  kernelDesc.SetQuantizationScale(qScale);
2188  kernelDesc.SetQuantizationOffset(qOffset);
2189  biasDesc.SetQuantizationScale(qScale*qScale);
2190  biasDesc.SetQuantizationOffset(0);
2191  }
2192 
2193  // Construct input data
2194  std::vector<T> input;
2195  input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
2196  std::vector<T> inputData;
2197  inputData.insert(inputData.end(), input.begin(), input.end());
2198  inputData.insert(inputData.end(), input.begin(), input.end());
2199 
2200  // at this point if we require it permute the input data
2201  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
2202  if (layout == armnn::DataLayout::NHWC)
2203  {
2204  std::vector<T> tmp(inputData.size());
2205  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
2206  inputData = tmp;
2207  }
2208 
2209  std::vector<T> output;
2210  output.assign(originalOutputExpected.data(),
2211  originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
2212 
2213  // Apply bias to output data if it is enabled.
2214  if(biasEnabled)
2215  {
2216  std::vector<T> biasV;
2217  biasV.assign(bias.data(), bias.data() + outputChannels);
2218  ApplyBias(output, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
2219  biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
2220  outputWidth, outputHeight);
2221  }
2222 
2223  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
2224 
2225  // Construct expected output data
2226  std::vector<T> outputData;
2227  outputData.insert(outputData.end(), output.begin(), output.end());
2228  outputData.insert(outputData.end(), output.begin(), output.end());
2229 
2230  // at this point if we require it permute the expected output
2231  if (layout == armnn::DataLayout::NHWC)
2232  {
2233  std::vector<T> tmp(outputData.size());
2234  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T));
2235  outputData = tmp;
2236  }
2237 
2238  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
2239  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
2240 
2243  armnn::ScopedTensorHandle weightsTensor(kernelDesc);
2244  armnn::ScopedTensorHandle biasTensor(biasDesc);
2245 
2246  AllocateAndCopyDataToITensorHandle(&weightsTensor, originalKernel.data());
2247 
2248  if(biasEnabled)
2249  {
2250  AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
2251  }
2252 
2253  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
2254  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
2255 
2256  data.m_Weight = &weightsTensor;
2257  data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
2258  data.m_Parameters.m_StrideX = strideX;
2259  data.m_Parameters.m_StrideY = strideY;
2260  data.m_Parameters.m_PadLeft = padLeft;
2261  data.m_Parameters.m_PadRight = padRight;
2262  data.m_Parameters.m_PadTop = padTop;
2263  data.m_Parameters.m_PadBottom = padBottom;
2264  data.m_Parameters.m_BiasEnabled = biasEnabled;
2265  data.m_Parameters.m_DataLayout = layout;
2266  data.m_Parameters.m_DilationX = dilationX;
2267  data.m_Parameters.m_DilationY = dilationY;
2268 
2269  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
2270  inputHandle->Allocate();
2271  outputHandle->Allocate();
2272 
2273  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
2274 
2275  ExecuteWorkload(*workload, memoryManager);
2276 
2277  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2278 
2279  return LayerTestResult<T, 4>(actualOutput,
2280  outputData,
2281  outputHandle->GetShape(),
2282  outputTensorInfo.GetShape());
2283 }
bool m_BiasEnabled
Enable/disable bias.
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PadBottom
Padding bottom value in the height dimension.
void ApplyBias(std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_DilationY
Dilation factor value for height dimension.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< IWorkload > CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
Depthwise Convolution 2D layer workload data.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
uint32_t m_PadRight
Padding right value in the width dimension.

◆ DepthwiseConvolution2dUint8Test()

LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3590 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3596 {
3597  return DepthwiseConvolution2dTestImpl<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3598  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3599 }

◆ GetBias()

std::vector<T> GetBias ( bool  biasEnabled,
float  qScale,
armnn::TensorInfo  outputInfo,
armnn::DataLayout  layout 
)

Definition at line 115 of file Conv2dTestImpl.cpp.

References DataLayoutIndexed::GetChannelsIndex(), and TensorInfo::GetShape().

116 {
117  const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout);
118  const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
119  const unsigned int outputChannels = outputInfo.GetShape()[channelsIndex];
120 
121  switch (outputChannels)
122  {
123  case 2:
124  default:
125  {
126  return GetBias2<ArmnnType>(biasEnabled, qScale);
127  }
128  case 4:
129  {
130  return GetBias4<ArmnnType>(biasEnabled, qScale);
131  }
132  case 8:
133  {
134  return GetBias8<ArmnnType>(biasEnabled, qScale);
135  }
136  }
137 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...

◆ GetBias2()

std::vector<T> GetBias2 ( bool  biasEnabled,
float  qScale 
)

Definition at line 73 of file Conv2dTestImpl.cpp.

74 {
75  if(biasEnabled)
76  {
77  return QuantizedVector<T>(Bias2, qScale, 0);
78  }
79  else
80  {
81  return std::vector<T>();
82  }
83 }

◆ GetBias4()

std::vector<T> GetBias4 ( bool  biasEnabled,
float  qScale 
)

Definition at line 87 of file Conv2dTestImpl.cpp.

88 {
89  if(biasEnabled)
90  {
91  return QuantizedVector<T>(Bias4, qScale, 0);
92  }
93  else
94  {
95  return std::vector<T>();
96  }
97 }

◆ GetBias8()

std::vector<T> GetBias8 ( bool  biasEnabled,
float  qScale 
)

Definition at line 101 of file Conv2dTestImpl.cpp.

102 {
103  if(biasEnabled)
104  {
105  return QuantizedVector<T>(Bias8, qScale, 0);
106  }
107  else
108  {
109  return std::vector<T>();
110  }
111 }

◆ SimpleConvolution2d3x3NhwcTest()

LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled 
)

Definition at line 3290 of file Conv2dTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

3295 {
3296  return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
3297  workloadFactory,
3298  memoryManager,
3299  tensorHandleFactory,
3300  0.f,
3301  0,
3302  biasEnabled,
3304 }

◆ SimpleConvolution2d3x3NhwcTestCommon()

LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled,
armnn::DataLayout  dataLayout 
)

Definition at line 590 of file Conv2dTestImpl.cpp.

References armnn::IgnoreUnused().

598 {
599  armnn::IgnoreUnused(biasEnabled);
600  // Use common single-batch 5x5 image.
601 
602  armnn::TensorInfo inputDesc({ 1, 3, 4, 1 }, ArmnnType);
603  std::vector<T> input =
604  {
605  1, 5, 2, 3,
606  8, 7, 3, 6,
607  3, 3, 9, 1
608  };
609 
610  // Use a 2-element batch of 3-channel 3x3 kernels.
611  armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
612  std::vector<T> kernel =
613  {
614  4, 5, 6,
615  0, 0, 0,
616  3, 2, 1
617  };
618 
619  // Expected output is 1 batch of a 5x5 image.
620  armnn::TensorInfo outputDesc({ 1, 3, 4, 1 }, ArmnnType);
621  const std::vector<float> outputData =
622  {
623  23, 41, 33, 21,
624  44, 65, 76, 52,
625  82, 85, 79, 42
626  };
627 
628  return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
629  workloadFactory,
630  memoryManager,
631  tensorHandleFactory,
632  input,
633  kernel,
634  std::vector<T>(),
635  outputData,
636  inputDesc.GetShape(),
637  kernelDesc.GetShape(),
638  outputDesc.GetShape(),
639  dataLayout,
640  qScale,
641  qOffset);
642 }
void IgnoreUnused(Ts &&...)

◆ SimpleConvolution2d3x3QSymm16Test()

LayerTestResult<int16_t, 4> SimpleConvolution2d3x3QSymm16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3345 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3351 {
3352  return SimpleConvolution2d3x3TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3353  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3354 }

◆ SimpleConvolution2d3x3Stride2x2Test()

LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3306 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3312 {
3313  return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
3314  workloadFactory,
3315  memoryManager,
3316  tensorHandleFactory,
3317  0.f,
3318  0,
3319  biasEnabled,
3320  layout);
3321 }

◆ SimpleConvolution2d3x3Stride2x2TestCommon()

LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled,
const armnn::DataLayout dataLayout 
)

Definition at line 645 of file Conv2dTestImpl.cpp.

References armnn::IgnoreUnused().

653 {
654  armnn::IgnoreUnused(biasEnabled);
655 
656  // Input is a single-batch, 1 channel, 5x5 image.
657  armnn::TensorInfo inputDesc({ 1, 5, 5, 1 }, ArmnnType);
658  std::vector<T> input =
659  {
660  1, 5, 2, 3, 5,
661  8, 7, 3, 6, 3,
662  3, 3, 9, 1, 9,
663  4, 1, 8, 1, 3,
664  6, 8, 1, 9, 2
665  };
666 
667  // Use a 3x3 kernel.
668  armnn::TensorInfo kernelDesc({ 1, 3, 3, 1 }, ArmnnType);
669  std::vector<T> kernel =
670  {
671  4, 5, 6,
672  0, 0, 0,
673  3, 2, 1
674  };
675 
676  // Expected output is a single-batch, 1 channel, 3x3 image.
677  armnn::TensorInfo outputDesc({ 1, 3, 3, 1 }, ArmnnType);
678  std::vector<T> outputData =
679  {
680  23, 33, 24,
681  91, 99, 48,
682  26, 50, 19
683  };
684 
685  uint32_t padLeft = 1;
686  uint32_t padTop = 1;
687  uint32_t padRight = 1;
688  uint32_t padBottom = 1;
689  uint32_t strideX = 2;
690  uint32_t strideY = 2;
691 
692  return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
693  workloadFactory,
694  memoryManager,
695  tensorHandleFactory,
696  input,
697  kernel,
698  std::vector<T>(),
699  outputData,
700  inputDesc.GetShape(),
701  kernelDesc.GetShape(),
702  outputDesc.GetShape(),
703  dataLayout,
704  qScale,
705  qOffset,
706  padLeft,
707  padTop,
708  padRight,
709  padBottom,
710  strideX,
711  strideY);
712 }
void IgnoreUnused(Ts &&...)

◆ SimpleConvolution2d3x3Test()

LayerTestResult<float, 4> SimpleConvolution2d3x3Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3279 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3285 {
3286  return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3287  workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
3288 }

◆ SimpleConvolution2d3x3TestCommon()

LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 805 of file Conv2dTestImpl.cpp.

813 {
814  // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
815 
816  // Use common single-batch 3-channel 16x8 image.
817  armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
818  std::vector<unsigned int> inputShape = { 1, 3, 8, 16 };
819  std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
820 
821  // Use a 2-element batch of 3-channel 3x3 kernels.
822  armnn::TensorInfo kernelDesc({ 2, 3, 3, 3 }, ArmnnType);
823  std::vector<T> kernel = QuantizedVector<T>({
824  1, 1, 1,
825  1, -1, 1,
826  1, 1, 1,
827 
828  0, 0, 0,
829  0, 0, 0,
830  0, 0, 0,
831 
832  2, 2, 2,
833  2, 2, 2,
834  2, 2, 2,
835 
836 
837  0, 0, 0,
838  0, 0, 0,
839  0, 0, 0,
840 
841  1, 1, 1,
842  1, 1, 1,
843  1, 1, 1,
844 
845  0, 0, 0,
846  0, 0, 0,
847  0, 0, 0
848  },
849  qScale, qOffset);
850 
851  // Expected output is 1 batch of a 2-channel 14x6 image.
852  armnn::TensorInfo outputDesc({ 1, 2, 6, 14 }, ArmnnType);
853  std::vector<T> expectedOutput = QuantizedVector<T>({
854  -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
855  -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
856  -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
857  -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
858  -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
859  -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
860 
861  3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
862  3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
863  3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
864  3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
865  3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
866  3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
867  },
868  qScale, qOffset);
869 
870  return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
871  workloadFactory,
872  memoryManager,
873  tensorHandleFactory,
874  input,
875  kernel,
876  GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
877  expectedOutput,
878  inputDesc.GetShape(),
879  kernelDesc.GetShape(),
880  outputDesc.GetShape(),
881  qScale,
882  qOffset,
883  layout);
884 }

◆ SimpleConvolution2d3x3Uint8Test()

LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3323 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3329 {
3330  return SimpleConvolution2d3x3TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3331  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3332 }

◆ SimpleConvolution2d3x5QSymm16Test()

LayerTestResult<int16_t, 4> SimpleConvolution2d3x5QSymm16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3334 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3340 {
3341  return SimpleConvolution2d3x5TestCommon<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
3342  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3343 }

◆ SimpleConvolution2d3x5Test()

LayerTestResult<float, 4> SimpleConvolution2d3x5Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3257 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3263 {
3264  return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3265  workloadFactory, memoryManager, tensorHandleFactory, 0.f, 0, biasEnabled, layout);
3266 }

◆ SimpleConvolution2d3x5TestCommon()

LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 715 of file Conv2dTestImpl.cpp.

723 {
724  // Use common single-batch 3-channel 16x8 image.
725  armnn::TensorInfo inputDesc({ 1, 3, 8, 16 }, ArmnnType);
726  std::vector<T> input = QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset);
727 
728  // Use a 2-element batch with 3-channel 3x5 kernels.
729  armnn::TensorInfo kernelDesc({ 2, 3, 5, 3 }, ArmnnType);
730  std::vector<T> kernel = QuantizedVector<T>({
731  1, 1, 1,
732  1, -1, 1,
733  1, 1, 1,
734  1, 1, 1,
735  1, 1, 1,
736 
737  0, 0, 0,
738  0, 0, 0,
739  0, 0, 0,
740  0, 0, 0,
741  0, 0, 0,
742 
743  2, 2, 2,
744  2, 2, 2,
745  2, 2, 2,
746  2, 2, 2,
747  2, 2, 2,
748 
749 
750  0, 0, 0,
751  0, 0, 0,
752  0, 0, 0,
753  0, 0, 0,
754  0, 0, 0,
755 
756  1, 1, 1,
757  1, 1, 1,
758  1, 1, 1,
759  1, 1, 1,
760  1, 1, 1,
761 
762  0, 0, 0,
763  0, 0, 0,
764  0, 0, 0,
765  0, 0, 0,
766  0, 0, 0
767  },
768  qScale, qOffset);
769 
770  // Expected output is 2 batch elements of a 1-channel 14x4 image.
771  armnn::TensorInfo outputDesc({ 1, 2, 4, 14 }, ArmnnType);
772  std::vector<T> expectedOutput = QuantizedVector<T>({
773  -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
774  -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
775  -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
776  -23.5f, -23.5f, -23.5f,
777  -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
778  -23.5f, -23.5f, -23.5f,
779 
780  5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
781  5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
782  5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
783  5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
784  },
785  qScale, qOffset);
786 
787  return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
788  workloadFactory,
789  memoryManager,
790  tensorHandleFactory,
791  input,
792  kernel,
793  GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
794  expectedOutput,
795  inputDesc.GetShape(),
796  kernelDesc.GetShape(),
797  outputDesc.GetShape(),
798  qScale,
799  qOffset,
800  layout);
801 }

◆ SimpleConvolution2d3x5Uint8Test()

LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
bool  biasEnabled,
const armnn::DataLayout  layout 
)

Definition at line 3268 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3274 {
3275  return SimpleConvolution2d3x5TestCommon<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
3276  workloadFactory, memoryManager, tensorHandleFactory, 0.5f, 50, biasEnabled, layout);
3277 }

◆ SimpleConvolution2dAsymmetricPaddingTestCommon()

LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::DataLayout  layout,
float  qScale,
int32_t  qOffset 
)

Definition at line 960 of file Conv2dTestImpl.cpp.

967 {
968  // Use a single-batch 1-channel 5x5 image as input.
969  armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
970  std::vector<T> input =
971  QuantizedVector<T>({
972  11,21,31,41,51,
973  12,22,32,42,52,
974  13,23,33,43,53,
975  14,24,34,44,54,
976  15,25,35,45,55,
977  }, qScale, qOffset);
978 
979  // Use 1 batch of a 1-channel 4x4 kernel.
980  armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
981  std::vector<T> kernel =
982  QuantizedVector<T>({
983  -11,-21,-31,-41,
984  -12,-22,-32,-42,
985  -13,-23,-33,-43,
986  -14,-24,-34,-44,
987  },
988  qScale, qOffset);
989 
990  // Expected output is 1 batch of a 1-channel 5x5 image.
991  armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
992  std::vector<T> expectedOutput =
993  QuantizedVector<T>({
994  -7140, -10580, -13940, -9300, -5230,
995  -9590, -14120, -18520, -12290, -6860,
996  -9980, -14560, -18960, -12560, -7000,
997  -7518, -10904, -14144, -9318, -5152,
998  -5032, -7256, -9376, -6142, -3368,
999  },
1000  qScale, qOffset);
1001 
1002  return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
1003  workloadFactory,
1004  memoryManager,
1005  tensorHandleFactory,
1006  input,
1007  kernel,
1008  GetBias2<ArmnnBType>(false, qScale * qScale),
1009  expectedOutput,
1010  inputDesc.GetShape(),
1011  kernelDesc.GetShape(),
1012  outputDesc.GetShape(),
1013  qScale,
1014  qOffset,
1015  layout,
1016  1, // Padding left.
1017  1, // Padding top.
1018  2, // Padding right.
1019  2); // Padding bottom.
1020 }

◆ SimpleConvolution2dNhwcTestImpl()

LayerTestResult<O, 4> SimpleConvolution2dNhwcTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const std::vector< T > &  input,
const std::vector< T > &  kernel,
const std::vector< B > &  bias,
const std::vector< O > &  outputExpected,
const armnn::TensorShape inputShape,
const armnn::TensorShape kernelShape,
const armnn::TensorShape outputExpectedShape,
const armnn::DataLayout  dataLayout,
float  qScale,
int32_t  qOffset,
uint32_t  padLeft = 1,
uint32_t  padTop = 1,
uint32_t  padRight = 1,
uint32_t  padBottom = 1,
uint32_t  strideX = 1,
uint32_t  strideY = 1 
)

Definition at line 366 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::info, Convolution2dQueueDescriptor::m_Bias, Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dQueueDescriptor::m_Weight, and armnn::numeric_cast().

Referenced by Convolution2d3x3Stride2x2BFloat16SmallValueTest(), and Convolution2d3x3Stride2x2BFloat16Test().

386 {
387  armnn::IgnoreUnused(qScale, qOffset);
388  unsigned int inputNum = armnn::numeric_cast<unsigned int>(inputShape[0]);
389  unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[3]);
390  unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[1]);
391  unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[2]);
392 
393  unsigned int kernelChanMul = armnn::numeric_cast<unsigned int>(kernelShape[0]);
394  unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(kernelShape[3]);
395  unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(kernelShape[1]);
396  unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(kernelShape[2]);
397 
398  unsigned int outputNum = armnn::numeric_cast<unsigned int>(outputExpectedShape[0]);
399  unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpectedShape[3]);
400  unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpectedShape[1]);
401  unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
402 
403  bool biasEnabled = bias.size() > 0;
404 
405  // Creates the tensors.
406  armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
407  armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
408  OutType);
409  armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
410  armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
411 
412  // Construct the input data.
413  std::vector<T> inputData;
414  inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
415 
416  // Construct the output data, with bias applied, as appropriate.
417  std::vector<O> outputData;
418  outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
419 
420  std::vector<O> actualOutput(outputTensorInfo.GetNumElements());
421 
422  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
423  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
424 
425  armnn::ScopedTensorHandle weightsTensor(kernelDesc);
426  AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
427 
428  armnn::ScopedTensorHandle biasTensor(biasDesc);
429 
431 
432  data.m_Weight = &weightsTensor;
433  data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
434  data.m_Parameters.m_StrideX = strideX;
435  data.m_Parameters.m_StrideY = strideY;
436  data.m_Parameters.m_PadLeft = padLeft;
437  data.m_Parameters.m_PadRight = padRight;
438  data.m_Parameters.m_PadTop = padTop;
439  data.m_Parameters.m_PadBottom = padBottom;
440  data.m_Parameters.m_BiasEnabled = biasEnabled;
441  data.m_Parameters.m_DataLayout = dataLayout;
442 
444  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
445  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
446 
447  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
448  inputHandle->Allocate();
449  outputHandle->Allocate();
450 
451  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
452 
453  ExecuteWorkload(*workload, memoryManager);
454 
455  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
456 
457  return LayerTestResult<O, 4>(actualOutput,
458  outputData,
459  outputHandle->GetShape(),
460  outputTensorInfo.GetShape());
461 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstTensorHandle * m_Weight
const ConstTensorHandle * m_Bias
uint32_t m_PadRight
Padding right value in the width dimension.
void IgnoreUnused(Ts &&...)
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_PadLeft
Padding left value in the width dimension.
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ SimpleConvolution2dTestImpl()

LayerTestResult<T, 4> SimpleConvolution2dTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
const std::vector< T > &  originalInput,
const std::vector< T > &  originalKernel,
const std::vector< B > &  bias,
const std::vector< T > &  originalOutputExpected,
const armnn::TensorShape originalInputShape,
const armnn::TensorShape originalKernelShape,
const armnn::TensorShape originalOutputExpectedShape,
float  qScale,
int32_t  qOffset,
const armnn::DataLayout  layout = armnn::DataLayout::NCHW,
uint32_t  padLeft = 0,
uint32_t  padTop = 0,
uint32_t  padRight = 0,
uint32_t  padBottom = 0,
uint32_t  strideX = 1,
uint32_t  strideY = 1,
uint32_t  dilationX = 1,
uint32_t  dilationY = 1 
)

Definition at line 194 of file Conv2dTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), ApplyBias(), ARMNN_ASSERT, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateConvolution2d(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), TensorInfo::GetShape(), armnnUtils::GetTensorInfo(), armnn::IgnoreUnused(), armnn::info, Convolution2dQueueDescriptor::m_Bias, Convolution2dDescriptor::m_BiasEnabled, Convolution2dDescriptor::m_DataLayout, Convolution2dDescriptor::m_DilationX, Convolution2dDescriptor::m_DilationY, Convolution2dDescriptor::m_PadBottom, Convolution2dDescriptor::m_PadLeft, Convolution2dDescriptor::m_PadRight, Convolution2dDescriptor::m_PadTop, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Convolution2dDescriptor::m_StrideX, Convolution2dDescriptor::m_StrideY, Convolution2dQueueDescriptor::m_Weight, armnn::NHWC, armnn::numeric_cast(), armnnUtils::Permute(), TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

216 {
217  armnn::IgnoreUnused(memoryManager);
218  unsigned int inputHeight = armnn::numeric_cast<unsigned int>(originalInputShape[2]);
219  unsigned int inputWidth = armnn::numeric_cast<unsigned int>(originalInputShape[3]);
220  unsigned int inputChannels = armnn::numeric_cast<unsigned int>(originalInputShape[1]);
221  unsigned int inputNum = armnn::numeric_cast<unsigned int>(originalInputShape[0]);
222 
223  unsigned int outputHeight = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[2]);
224  unsigned int outputWidth = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[3]);
225  unsigned int outputChannels = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[1]);
226  unsigned int outputNum = armnn::numeric_cast<unsigned int>(originalOutputExpectedShape[0]);
227 
228  unsigned int kernelHeight = armnn::numeric_cast<unsigned int>(originalKernelShape[2]);
229  unsigned int kernelWidth = armnn::numeric_cast<unsigned int>(originalKernelShape[3]);
230  unsigned int kernelChannels = armnn::numeric_cast<unsigned int>(originalKernelShape[1]);
231  unsigned int kernelDepthMul = armnn::numeric_cast<unsigned int>(originalKernelShape[0]);
232 
233  bool biasEnabled = bias.size() > 0;
234 
235  // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
236  ARMNN_ASSERT(inputNum == 1);
237  ARMNN_ASSERT(outputNum == 1);
238 
239  // If a bias is used, its size must equal the number of output channels.
240  ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
241 
242  // Note these tensors will use two (identical) batches.
243  armnn::TensorInfo inputTensorInfo =
244  armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
245  armnn::TensorInfo outputTensorInfo =
246  armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
247  armnn::TensorInfo kernelDesc =
248  armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
249  armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
250 
251  // Set quantization parameters if the requested type is a quantized type.
252  if(armnn::IsQuantizedType<T>())
253  {
254  inputTensorInfo.SetQuantizationScale(qScale);
255  inputTensorInfo.SetQuantizationOffset(qOffset);
256  outputTensorInfo.SetQuantizationScale(qScale);
257  outputTensorInfo.SetQuantizationOffset(qOffset);
258  kernelDesc.SetQuantizationScale(qScale);
259  kernelDesc.SetQuantizationOffset(qOffset);
260  biasDesc.SetQuantizationScale(qScale*qScale);
261  biasDesc.SetQuantizationOffset(0);
262  }
263 
264  // Construct input data - two batches of the same input image.
265  std::vector<T> inputImage;
266  inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
267  std::vector<T> inputData;
268  inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
269  inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
270 
271  // at this point if we require it permute the input data
272  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
273  if (layout == armnn::DataLayout::NHWC)
274  {
275  std::vector<T> tmp(inputData.size());
276  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
277  inputData = tmp;
278  }
279 
280  std::vector<T> outputImage;
281  outputImage.assign(originalOutputExpected.data(),
282  originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
283 
284  // Apply bias to output image if it is enabled.
285  if(biasEnabled)
286  {
287  std::vector<T> biasV;
288  biasV.assign(bias.data(), bias.data() + outputChannels);
289  ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
290  biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
291  outputWidth, outputHeight);
292  }
293 
294  // Data will be copied from outputHandle
295  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
296 
297  // Construct expected output data - two identical images.
298  std::vector<T> expectedOutput;
299  expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
300  expectedOutput.insert(expectedOutput.end(), outputImage.begin(), outputImage.end());
301 
302  // at this point if we require it permute the expected output
303  if (layout == armnn::DataLayout::NHWC)
304  {
305  std::vector<T> tmp(expectedOutput.size());
306  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, expectedOutput.data(), tmp.data(), sizeof(T));
307  expectedOutput = tmp;
308  }
309 
310  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
311  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
312 
315  armnn::ScopedTensorHandle weightsTensor(kernelDesc);
316  armnn::ScopedTensorHandle biasTensor(biasDesc);
317 
318  // Permute the kernel if necessary
319  std::vector<T> kernel = originalKernel;
320  if (layout == armnn::DataLayout::NHWC)
321  {
322  armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data(), sizeof(T));
323  }
324  AllocateAndCopyDataToITensorHandle(&weightsTensor, kernel.data());
325 
326  if(biasEnabled)
327  {
328  AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
329  }
330 
331  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
332  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
333 
334  data.m_Weight = &weightsTensor;
335  data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
336  data.m_Parameters.m_StrideX = strideX;
337  data.m_Parameters.m_StrideY = strideY;
338  data.m_Parameters.m_PadLeft = padLeft;
339  data.m_Parameters.m_PadRight = padRight;
340  data.m_Parameters.m_PadTop = padTop;
341  data.m_Parameters.m_PadBottom = padBottom;
342  data.m_Parameters.m_BiasEnabled = biasEnabled;
343  data.m_Parameters.m_DataLayout = layout;
344  data.m_Parameters.m_DilationX = dilationX;
345  data.m_Parameters.m_DilationY = dilationY;
346 
347  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
348  inputHandle->Allocate();
349  outputHandle->Allocate();
350 
351  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
352 
353  ExecuteWorkload(*workload, memoryManager);
354 
355  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
356 
357  return LayerTestResult<T, 4>(actualOutput,
358  expectedOutput,
359  outputHandle->GetShape(),
360  outputTensorInfo.GetShape());
361 }
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void ApplyBias(std::vector< T > &v, float vScale, int32_t vOffset, const std::vector< B > &bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
const ConstTensorHandle * m_Weight
const ConstTensorHandle * m_Bias
uint32_t m_PadRight
Padding right value in the width dimension.
void IgnoreUnused(Ts &&...)
uint32_t m_DilationY
Dilation along y axis.
uint32_t m_PadTop
Padding top value in the height dimension.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_DilationX
Dilation along x axis.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
virtual std::unique_ptr< IWorkload > CreateConvolution2d(const Convolution2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest()

LayerTestResult<float, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 3612 of file Conv2dTestImpl.cpp.

Referenced by TEST_SUITE().

3616 {
3617  return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
3618  workloadFactory,
3619  memoryManager,
3620  tensorHandleFactory,
3621  0.f,
3622  0,
3623  false);
3624 }

◆ SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon()

LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset,
bool  biasEnabled 
)

Definition at line 2454 of file Conv2dTestImpl.cpp.

References armnn::NHWC.

2461 {
2462  auto layout = armnn::DataLayout::NHWC;
2463 
2464  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
2465  auto input = QuantizedVector<T>(
2466  {
2467  0, 0, 0, 0, 0, 0, 0, 0, 0,
2468  0, 0, 0, 0, 0, 0, 0, 0, 0,
2469  0, 0, 0, 0, 0, 0, 0, 0, 0,
2470  0, 0, 0, 1, 1, 1, 0, 0, 0,
2471  0, 0, 0, 1, 1, 1, 0, 0, 0,
2472  0, 0, 0, 1, 1, 1, 0, 0, 0,
2473  0, 0, 0, 0, 0, 0, 0, 0, 0,
2474  0, 0, 0, 0, 0, 0, 0, 0, 0,
2475  0, 0, 0, 0, 0, 0, 0, 0, 0
2476  },
2477  inputTensorInfo.GetQuantizationScale(),
2478  inputTensorInfo.GetQuantizationOffset());
2479 
2480  armnn::TensorInfo kernelTensorInfo({ 1, 3, 3, 1}, ArmnnType);
2481  auto kernel = QuantizedVector<T>({
2482  1, 2, 3,
2483  4, 5, 6,
2484  7, 8, 9
2485  },
2486  kernelTensorInfo.GetQuantizationScale(),
2487  kernelTensorInfo.GetQuantizationOffset());
2488 
2489  uint32_t padLeft = 0;
2490  uint32_t padTop = 0;
2491  uint32_t padRight = 0;
2492  uint32_t padBottom = 0;
2493  uint32_t strideX = 1;
2494  uint32_t strideY = 1;
2495  uint32_t dilationX = 3;
2496  uint32_t dilationY = 3;
2497 
2498  // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
2499  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
2500  auto expectedOutput = QuantizedVector<T>(
2501  {
2502  5, 5, 5,
2503  5, 5, 5,
2504  5, 5, 5
2505  },
2506  outputTensorInfo.GetQuantizationScale(),
2507  outputTensorInfo.GetQuantizationOffset());
2508 
2509  return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
2510  workloadFactory,
2511  memoryManager,
2512  tensorHandleFactory,
2513  input,
2514  kernel,
2515  GetBias2<ArmnnBType>(biasEnabled, qScale * qScale),
2516  expectedOutput,
2517  inputTensorInfo.GetShape(),
2518  kernelTensorInfo.GetShape(),
2519  outputTensorInfo.GetShape(),
2520  qScale,
2521  qOffset,
2522  layout,
2523  padLeft,
2524  padTop,
2525  padRight,
2526  padBottom,
2527  strideX,
2528  strideY,
2529  dilationX,
2530  dilationY);
2531 }