From 747ef82c88f9afe14a8b80b6b3b34118353e97f2 Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Tue, 18 Dec 2018 09:26:39 +0000 Subject: MLCE-77 Depthwise Convolution with depth multiplier > 1 doesn't work * Unified ArmNN's weight format to [ M, I, H, W ] for the depthwise convolution * Added conversion utilities to permute/reshape the weights as appropriate when using CL and Neon backends * Updated the reference implementation of the convolution * Updated the relevant unit tests accordingly !android-nn-driver:459 Change-Id: I07d0818efa9d1ca1e5dad82983aac1fe78eadb18 --- .../backendsCommon/test/Conv2dTestImpl.hpp | 64 ++++++++-------------- src/backends/backendsCommon/test/LayerTests.cpp | 30 ++++------ 2 files changed, 33 insertions(+), 61 deletions(-) (limited to 'src/backends/backendsCommon/test') diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp index 37fa0f63d6..2ff66b08d5 100755 --- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp @@ -327,7 +327,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const boost::multi_array& input, - const boost::multi_array& originalKernel, + const boost::multi_array& kernel, const boost::multi_array& bias, const boost::multi_array& outputExpected, float qScale, @@ -344,10 +344,10 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( unsigned int inputChannels = boost::numeric_cast(input.shape()[1]); unsigned int inputHeight = boost::numeric_cast(input.shape()[2]); unsigned int inputWidth = boost::numeric_cast(input.shape()[3]); - unsigned int kernelChanMul = boost::numeric_cast(originalKernel.shape()[0]); - unsigned int kernelChannels = boost::numeric_cast(originalKernel.shape()[1]); - unsigned int kernelHeight = boost::numeric_cast(originalKernel.shape()[2]); - unsigned int kernelWidth = boost::numeric_cast(originalKernel.shape()[3]); + unsigned int kernelChanMul = boost::numeric_cast(kernel.shape()[0]); + unsigned int kernelChannels = boost::numeric_cast(kernel.shape()[1]); + unsigned int kernelHeight = boost::numeric_cast(kernel.shape()[2]); + unsigned int kernelWidth = boost::numeric_cast(kernel.shape()[3]); unsigned int outputNum = boost::numeric_cast(outputExpected.shape()[0]); unsigned int outputChannels = boost::numeric_cast(outputExpected.shape()[1]); unsigned int outputHeight = boost::numeric_cast(outputExpected.shape()[2]); @@ -362,8 +362,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout); armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout); - armnn::TensorInfo kernelDesc = - armnnUtils::GetTensorInfo(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout); + armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType()); armnn::TensorInfo biasDesc({static_cast(bias.size())}, armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. @@ -423,13 +422,6 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestImpl( armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc); - // Permute the kernel if necessary - boost::multi_array kernel = boost::multi_array(originalKernel); - if (layout == armnn::DataLayout::NHWC) - { - armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data()); - } - AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); armnn::ScopedCpuTensorHandle biasTensor(biasDesc); @@ -484,6 +476,7 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( unsigned int kernelHeight = 3; unsigned int kernelWidth = 3; unsigned int kernelChannels = inputChannels; + unsigned int kernelDepthMultiplier = 1; unsigned int outputHeight = 1; unsigned int outputWidth = 1; @@ -494,7 +487,8 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout); armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout); - armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo(1, outputChannels, kernelHeight, kernelWidth, layout); + armnn::TensorInfo kernelDesc({kernelDepthMultiplier, kernelChannels, kernelHeight, kernelWidth}, + armnn::GetDataType()); armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. @@ -543,12 +537,6 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( 0.f, 0.f, 0.f, -1.f, 0.f, -1.f, })); - if (layout == armnn::DataLayout::NHWC) - { - std::vector tmp(kernelData.size()); - armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, kernelData.data(), tmp.data()); - kernelData = tmp; - } auto kernel = MakeTensor(kernelDesc, kernelData); // Manually calculated. @@ -642,8 +630,8 @@ LayerTestResult DepthwiseConvolution2dTestImpl( inputBatchSize, inputChannels, inputHeight, inputWidth, layout); armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo( outputBatchSize, outputChannels, outputHeight, outputWidth, layout); - armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo( - depthMultiplier, inputChannels, kernelHeight, kernelWidth, layout); + armnn::TensorInfo kernelDesc({depthMultiplier, inputChannels, kernelHeight, kernelWidth}, + armnn::GetDataType()); armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. @@ -692,7 +680,7 @@ LayerTestResult DepthwiseConvolution2dTestImpl( {0, 2, 1, -1})); auto bias = MakeTensor(biasDesc, biasV); - std::vector originalKernelData = std::vector( + std::vector kernelData = std::vector( QuantizedVector(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), { 1, 1, 1, 1, -1, 1, @@ -717,12 +705,8 @@ LayerTestResult DepthwiseConvolution2dTestImpl( 0, 1, 0, 0, 0, 0, 0, 0, 0 + })); - std::vector kernelData = originalKernelData; - if (layout == armnn::DataLayout::NHWC) - { - armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernelData.data(), kernelData.data()); - } auto kernel = MakeTensor(kernelDesc, kernelData); // Manually calculated. @@ -840,9 +824,9 @@ LayerTestResult DepthwiseConvolution2dNhwcTestImpl( unsigned int inputWidth = boost::numeric_cast(input.shape()[2]); unsigned int kernelChanMul = boost::numeric_cast(kernel.shape()[0]); - unsigned int kernelChannels = boost::numeric_cast(kernel.shape()[3]); - unsigned int kernelHeight = boost::numeric_cast(kernel.shape()[1]); - unsigned int kernelWidth = boost::numeric_cast(kernel.shape()[2]); + unsigned int kernelChannels = boost::numeric_cast(kernel.shape()[1]); + unsigned int kernelHeight = boost::numeric_cast(kernel.shape()[2]); + unsigned int kernelWidth = boost::numeric_cast(kernel.shape()[3]); unsigned int outputNum = boost::numeric_cast(outputExpected.shape()[0]); unsigned int outputChannels = boost::numeric_cast(outputExpected.shape()[3]); @@ -853,7 +837,7 @@ LayerTestResult DepthwiseConvolution2dNhwcTestImpl( armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType()); armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels}, armnn::GetDataType()); - armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType()); + armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType()); armnn::TensorInfo biasDesc({static_cast(bias.size())}, armnn::GetDataType()); // Set quantization parameters if the requested type is a quantized type. @@ -1068,10 +1052,10 @@ LayerTestResult CompareConvolution2dTestImpl( armnn::TensorInfo kernelDesc; armnn::TensorInfo biasDesc; - unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth}; - unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth}; - unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth}; - unsigned int biasShape[] = {outputChannels}; + unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth}; + unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth}; + unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth}; + unsigned int biasShape[] = {outputChannels}; inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType()); outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType()); @@ -1171,19 +1155,17 @@ LayerTestResult CompareDepthwiseConvolution2dTestImpl( std::vector inputShape; std::vector outputShape; - std::vector kernelShape; - std::vector biasShape= { outputChannels }; + std::vector kernelShape{ channelMultiplier, inputChannels, kernelHeight, kernelWidth }; + std::vector biasShape{ outputChannels }; switch (layout.GetDataLayout()) { case armnn::DataLayout::NCHW: inputShape = { inputNum, inputChannels, inputHeight, inputWidth }; outputShape = { outputNum, outputChannels, outputHeight, outputWidth }; - kernelShape = { channelMultiplier, inputChannels, kernelHeight, kernelWidth }; break; case armnn::DataLayout ::NHWC: inputShape = { inputNum, inputHeight, inputWidth, inputChannels }; outputShape = { outputNum, outputHeight, outputWidth, outputChannels }; - kernelShape = { channelMultiplier, kernelHeight, kernelWidth, inputChannels }; break; default: throw armnn::InvalidArgumentException("unknown data layout [" diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index ddf0d0b587..819b9d6e37 100755 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -661,28 +661,18 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 24, 49 }))); - armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType()); + armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType()); auto kernel = MakeTensor(kernelTensorInfo, std::vector( QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), { - 32, 16, - 31, 15, - 30, 14, - 29, 13, - - 28, 12, - 27, 11, - 26, 10, - 25, 9, - - 24, 8, - 23, 7, - 22, 6, - 21, 5, - - 20, 4, - 19, 3, - 18, 2, - 17, 1 + 32, 31, 30, 29, + 28, 27, 26, 25, + 24, 23, 22, 21, + 20, 19, 18, 17, + + 16, 15, 14, 13, + 12, 11, 10, 9, + 8, 7, 6, 5, + 4, 3, 2, 1 }))); armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType()); -- cgit v1.2.1