From 20b1f88309903b576ae030888022f38cce2bbc82 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Wed, 19 Jun 2019 09:34:37 +0100 Subject: IVGCVSW-3271 Add unit test for dilated DepthwiseConvolution2d Signed-off-by: Teresa Charlin Change-Id: I0e7132f61001f7b2a9fad3d7b21acf2558c01df4 --- .../backendsCommon/test/Conv2dTestImpl.hpp | 131 +++++-- src/backends/backendsCommon/test/LayerTests.cpp | 401 +++++++++++++++++---- src/backends/backendsCommon/test/LayerTests.hpp | 19 +- src/backends/reference/test/RefLayerTests.cpp | 60 ++- 4 files changed, 492 insertions(+), 119 deletions(-) diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp index 5f66d2ec85..98e5090e27 100644 --- a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp @@ -816,16 +816,17 @@ LayerTestResult DepthwiseConvolution2dTestImpl( } template, typename B = armnn::ResolveType> -LayerTestResult DepthwiseConvolution2dNhwcTestImpl( + typename T = armnn::ResolveType, typename B = armnn::ResolveType> +LayerTestResult DepthwiseConvolution2dTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const boost::multi_array& input, - const boost::multi_array& kernel, + const boost::multi_array& originalInput, + const boost::multi_array& originalKernel, const boost::multi_array& bias, - const boost::multi_array& outputExpected, + const boost::multi_array& originalOutputExpected, float qScale, int32_t qOffset, + const armnn::DataLayout layout = armnn::DataLayout::NCHW, uint32_t padLeft = 0, uint32_t padTop = 0, uint32_t padRight = 0, @@ -835,30 +836,44 @@ LayerTestResult DepthwiseConvolution2dNhwcTestImpl( uint32_t dilationX = 1, uint32_t dilationY = 1) { - unsigned int inputNum = boost::numeric_cast(input.shape()[0]); - unsigned int inputChannels = boost::numeric_cast(input.shape()[3]); - unsigned int inputHeight = boost::numeric_cast(input.shape()[1]); - unsigned int inputWidth = boost::numeric_cast(input.shape()[2]); + unsigned int inputHeight = boost::numeric_cast(originalInput.shape()[2]); + unsigned int inputWidth = boost::numeric_cast(originalInput.shape()[3]); + unsigned int inputChannels = boost::numeric_cast(originalInput.shape()[1]); + unsigned int inputNum = boost::numeric_cast(originalInput.shape()[0]); - unsigned int kernelChanMul = boost::numeric_cast(kernel.shape()[0]); - unsigned int kernelChannels = boost::numeric_cast(kernel.shape()[1]); - unsigned int kernelHeight = boost::numeric_cast(kernel.shape()[2]); - unsigned int kernelWidth = boost::numeric_cast(kernel.shape()[3]); + unsigned int outputHeight = boost::numeric_cast(originalOutputExpected.shape()[2]); + unsigned int outputWidth = boost::numeric_cast(originalOutputExpected.shape()[3]); + unsigned int outputChannels = boost::numeric_cast(originalOutputExpected.shape()[1]); + unsigned int outputNum = boost::numeric_cast(originalOutputExpected.shape()[0]); - unsigned int outputNum = boost::numeric_cast(outputExpected.shape()[0]); - unsigned int outputChannels = boost::numeric_cast(outputExpected.shape()[3]); - unsigned int outputHeight = boost::numeric_cast(outputExpected.shape()[1]); - unsigned int outputWidth = boost::numeric_cast(outputExpected.shape()[2]); + unsigned int kernelHeight = boost::numeric_cast(originalKernel.shape()[2]); + unsigned int kernelWidth = boost::numeric_cast(originalKernel.shape()[3]); + unsigned int kernelChannels = boost::numeric_cast(originalKernel.shape()[1]); + unsigned int kernelDepthMul = boost::numeric_cast(originalKernel.shape()[0]); + + bool biasEnabled = bias.size() > 0; + + // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches). + BOOST_ASSERT(inputNum == 1); + BOOST_ASSERT(outputNum == 1); + + // If a bias is used, its size must equal the number of output channels. + BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels); + + + // Note these tensors will use two (identical) batches. + armnn::TensorInfo inputTensorInfo = + armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType); + armnn::TensorInfo outputTensorInfo = + armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType); + + // Kernel must be NCHW layout always, independently of the layout of the input and output for depthwise convolution. + armnn::TensorInfo kernelDesc({kernelDepthMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType); - // Creates the tensors. - armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType); - armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels}, - ArmnnType); - armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType); armnn::TensorInfo biasDesc({static_cast(bias.size())}, ArmnnBType); // Set quantization parameters if the requested type is a quantized type. - if (armnn::IsQuantizedType()) + if(armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); @@ -870,45 +885,87 @@ LayerTestResult DepthwiseConvolution2dNhwcTestImpl( biasDesc.SetQuantizationOffset(0); } - // Construct the input data. + LayerTestResult ret(outputTensorInfo); + + // Construct input data + std::vector input; + input.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth); std::vector inputData; - inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels); + inputData.insert(inputData.end(), input.begin(), input.end()); + inputData.insert(inputData.end(), input.begin(), input.end()); + + // at this point if we require it permute the input data + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (layout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T)); + inputData = tmp; + } + auto batchedInput = MakeTensor(inputTensorInfo, inputData); - // Construct the output data, with bias applied, as appropriate. + std::vector output; + output.assign(originalOutputExpected.data(), + originalOutputExpected.data() + outputChannels*outputHeight*outputWidth); + + // Apply bias to output data if it is enabled. + if(biasEnabled) + { + std::vector biasV; + biasV.assign(bias.data(), bias.data() + outputChannels); + ApplyBias(output, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), + biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(), + outputWidth, outputHeight); + } + + // Construct expected output data std::vector outputData; - outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels); + outputData.insert(outputData.end(), output.begin(), output.end()); + outputData.insert(outputData.end(), output.begin(), output.end()); - LayerTestResult ret(outputTensorInfo); + // at this point if we require it permute the expected output + if (layout == armnn::DataLayout::NHWC) + { + std::vector tmp(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data(), sizeof(T)); + outputData = tmp; + } ret.outputExpected = MakeTensor(outputTensorInfo, outputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + armnn::DepthwiseConvolution2dQueueDescriptor data; + armnn::WorkloadInfo info; armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc); + armnn::ScopedCpuTensorHandle biasTensor(biasDesc); + + boost::multi_array kernel = boost::multi_array(originalKernel); AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]); - armnn::ScopedCpuTensorHandle biasTensor(biasDesc); + if(biasEnabled) + { + AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]); + } + + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - armnn::DepthwiseConvolution2dQueueDescriptor data; data.m_Weight = &weightsTensor; - data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs. + data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs. data.m_Parameters.m_StrideX = strideX; data.m_Parameters.m_StrideY = strideY; data.m_Parameters.m_PadLeft = padLeft; data.m_Parameters.m_PadRight = padRight; data.m_Parameters.m_PadTop = padTop; data.m_Parameters.m_PadBottom = padBottom; - data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC; + data.m_Parameters.m_BiasEnabled = biasEnabled; + data.m_Parameters.m_DataLayout = layout; data.m_Parameters.m_DilationX = dilationX; data.m_Parameters.m_DilationY = dilationY; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - std::unique_ptr workload = workloadFactory.CreateDepthwiseConvolution2d(data, info); - inputHandle->Allocate(); outputHandle->Allocate(); diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index 45791e50f2..d9ae546739 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -826,7 +826,7 @@ LayerTestResult Convolution2d2x3x3Dilation3x3Test( 12., 10., 10., 10., 12., 10., 10., 10., 12., 10., 10., 10., - 6., 4., 4., 4. + 6., 4., 4., 4. }; return Convolution2d3x3DilationTestCommon( @@ -899,7 +899,8 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( // Use a single-batch 2-channel 5x5 image as input. armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { + QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, @@ -916,7 +917,8 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( // Use a depth multiplier of 1 on a 2-channel 4x4 kernel. armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType); auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), { + QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), + { 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, @@ -932,12 +934,14 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( // Calculated using the python tensorflow library with strideX=1, strideY=1. armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), { + QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), + { 1062, 1580, 1850, 1530, 1117, 2140, 3108, 3500, 2842, 2042, 3580, 5068, 5460, 4342, 3062, 3618, 5072, 5390, 4248, 2971, 3074, 4282, 4510, 3533, 2457, + 1550, 2284, 2362, 1955, 1428, 2910, 4206, 4342, 3528, 2536, 3390, 4886, 5022, 4068, 2916, @@ -972,43 +976,29 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( int32_t qOffset, bool biasEnabled) { - armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType); + auto layout = armnn::DataLayout::NHWC; + + armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType); auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { - 0, 25, - 1, 26, - 2, 27, - 3, 28, - 4, 29, - - 5, 30, - 6, 31, - 7, 32, - 8, 33, - 9, 34, - - 10, 35, - 11, 36, - 12, 37, - 13, 38, - 14, 39, - - 15, 40, - 16, 41, - 17, 42, - 18, 43, - 19, 44, - - 20, 45, - 21, 46, - 22, 47, - 23, 48, - 24, 49 + QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), + { + 0, 1, 2, 3, 4, + 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, + + 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49 }))); armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType); auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), { + QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), + { 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, @@ -1020,41 +1010,24 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 4, 3, 2, 1 }))); - armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), { - 1062, 1550, - 1580, 2284, - 1850, 2362, - 1530, 1955, - 1117, 1428, - - 2140, 2910, - 3108, 4206, - 3500, 4342, - 2842, 3528, - 2042, 2536, - - 3580, 3390, - 5068, 4886, - 5460, 5022, - 4342, 4068, - 3062, 2916, - - 3618, 3566, - 5072, 5056, - 5390, 5182, - 4248, 4133, - 2971, 2922, - - 3074, 3100, - 4282, 4352, - 4510, 4452, - 3533, 3517, - 2457, 2465 + QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), + { + 1062, 1580, 1850, 1530, 1117, + 2140, 3108, 3500, 2842, 2042, + 3580, 5068, 5460, 4342, 3062, + 3618, 5072, 5390, 4248, 2971, + 3074, 4282, 4510, 3533, 2457, + + 1550, 2284, 2362, 1955, 1428, + 2910, 4206, 4342, 3528, 2536, + 3390, 4886, 5022, 4068, 2916, + 3566, 5056, 5182, 4133, 2922, + 3100, 4352, 4452, 3517, 2465 }))); - return DepthwiseConvolution2dNhwcTestImpl( + return DepthwiseConvolution2dTestImpl( workloadFactory, memoryManager, input, @@ -1063,6 +1036,7 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( expectedOutput, qScale, qOffset, + layout, 1, // Padding left. 1, // Padding top. 2, // Padding right. @@ -1080,9 +1054,12 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( int32_t qOffset, bool biasEnabled) { - armnn::TensorInfo inputTensorInfo({ 1, 9, 9, 1}, ArmnnType); + auto layout = armnn::DataLayout::NHWC; + + armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType); auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { + QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1096,7 +1073,8 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType); auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), { + QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), + { 1, 2, 3, 4, 5, 6, 7, 8, 9 @@ -1112,15 +1090,16 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( uint32_t dilationY = 3; // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s. - armnn::TensorInfo outputTensorInfo({ 1, 3, 3, 1}, ArmnnType); + armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), { + QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), + { 5, 5, 5, 5, 5, 5, 5, 5, 5 }))); - return DepthwiseConvolution2dNhwcTestImpl( + return DepthwiseConvolution2dTestImpl( workloadFactory, memoryManager, input, @@ -1129,6 +1108,7 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( expectedOutput, qScale, qOffset, + layout, padLeft, padTop, padRight, @@ -1139,6 +1119,269 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( dilationY); } + +template> +LayerTestResult DepthwiseConvolution2d3x3DilationTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const std::vector& inputNoQuantizedValues, + armnn::TensorInfo& inputTensorInfo, + const std::vector& kernelNoQuantizedValues, + armnn::TensorInfo& kernelTensorInfo, + const std::vector& outputExpectedNoQuantizedValues, + armnn::TensorInfo& outputTensorInfo, + uint32_t dilationX, + uint32_t dilationY, + armnn::DataLayout layout = armnn::DataLayout::NCHW, + bool biasEnabled = false) +{ + float qScale; + int32_t qOffset; + switch (ArmnnType) + { + case armnn::DataType::QuantisedAsymm8: + { + qScale = 0.1f; + qOffset = 128; + break; + } + case armnn::DataType::QuantisedSymm16: + { + qScale = 0.1f; + qOffset = 0; + break; + } + case armnn::DataType::Float32: + default: + { + qScale = 0.f; + qOffset = 0; + break; + } + } + + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + kernelTensorInfo.SetQuantizationScale(qScale); + kernelTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + + auto input = MakeTensor(inputTensorInfo, + std::vector(QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputNoQuantizedValues))); + auto kernel = MakeTensor(kernelTensorInfo, + std::vector(QuantizedVector(kernelTensorInfo.GetQuantizationScale(), + kernelTensorInfo.GetQuantizationOffset(), + kernelNoQuantizedValues))); + auto expectedOutput = MakeTensor(outputTensorInfo, + std::vector(QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputExpectedNoQuantizedValues))); + + uint32_t padLeft = 0; + uint32_t padTop = 0; + uint32_t padRight = 0; + uint32_t padBottom = 0; + uint32_t strideX = 1; + uint32_t strideY = 1; + + return DepthwiseConvolution2dTestImpl( + workloadFactory, + memoryManager, + input, + kernel, + GetBias2(biasEnabled, qScale * qScale), + expectedOutput, + qScale, + qOffset, + layout, + padLeft, + padTop, + padRight, + padBottom, + strideX, + strideY, + dilationX, + dilationY); +} + +template +LayerTestResult DepthwiseConvolution2d3x3Dilation3x3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout) +{ + armnn::TensorInfo inputTensorInfo({1, 1, 10, 10}, ArmnnType); + std::vector inputNoQuantizedValues = + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + + armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType); + std::vector kernelNoQuantizedValues = + { + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }; + + // Since the dilation rate is 3 this will dilate the kernel to be like 7x7, + // therefore the output will be 4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1 + armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4}, ArmnnType); + std::vector outputExpectedNoQuantizedValues = + { + 6., 5., 5., 5., + 6., 5., 5., 5., + 6., 5., 5., 5., + 3., 2., 2., 2. + }; + + return DepthwiseConvolution2d3x3DilationTestCommon( + workloadFactory, + memoryManager, + inputNoQuantizedValues, + inputTensorInfo, + kernelNoQuantizedValues, + kernelTensorInfo, + outputExpectedNoQuantizedValues, + outputTensorInfo, + 3, + 3, + layout, + biasEnabled); +} + +template +LayerTestResult DepthwiseConvolution2d2x3x3Dilation3x3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout) +{ + armnn::TensorInfo inputTensorInfo({1, 2, 10, 10}, ArmnnType); + std::vector inputNoQuantizedValues = + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + + armnn::TensorInfo kernelTensorInfo({ 1, 2, 3, 3}, ArmnnType); + std::vector kernelNoQuantizedValues = + { + 1, 2, 3, + 4, 5, 6, + 7, 8, 9, + + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }; + + // Since the dilation rate is 3 this will dilate the kernel to be like 7x7, + // therefore the output will be 2x4x4: (I−K+2P)/S +1 => (10-7 +0)/1 +1 + armnn::TensorInfo outputTensorInfo({ 1, 2, 4, 4}, ArmnnType); + std::vector outputExpectedNoQuantizedValues = + { + 6., 5., 5., 5., + 6., 5., 5., 5., + 6., 5., 5., 5., + 3., 2., 2., 2., + + 6., 5., 5., 5., + 6., 5., 5., 5., + 6., 5., 5., 5., + 3., 2., 2., 2. + }; + + return DepthwiseConvolution2d3x3DilationTestCommon( + workloadFactory, + memoryManager, + inputNoQuantizedValues, + inputTensorInfo, + kernelNoQuantizedValues, + kernelTensorInfo, + outputExpectedNoQuantizedValues, + outputTensorInfo, + 3, + 3, + layout, + biasEnabled); +} + + +template LayerTestResult, 4> +DepthwiseConvolution2d3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + +template LayerTestResult, 4> +DepthwiseConvolution2d3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + +template LayerTestResult, 4> +DepthwiseConvolution2d3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + +template LayerTestResult, 4> +DepthwiseConvolution2d2x3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + +template LayerTestResult, 4> +DepthwiseConvolution2d2x3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + +template LayerTestResult, 4> +DepthwiseConvolution2d2x3x3Dilation3x3Test( + armnn::IWorkloadFactory&, + const armnn::IBackendInternal::IMemoryManagerSharedPtr&, + bool, + armnn::DataLayout); + LayerTestResult DepthwiseConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -1203,11 +1446,11 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { return SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( - workloadFactory, - memoryManager, - 0.f, - 0, - false); + workloadFactory, + memoryManager, + 0.f, + 0, + false); } LayerTestResult DepthwiseConvolution2dInt16Test( diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index e0b0273d9d..25ccfa09f0 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -127,6 +127,20 @@ LayerTestResult Convolution2d2x3x3Dilation3x3Test( bool biasEnabled, const armnn::DataLayout layout); +template> +LayerTestResult DepthwiseConvolution2d3x3Dilation3x3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + +template> +LayerTestResult DepthwiseConvolution2d2x3x3Dilation3x3Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + LayerTestResult DepthwiseConvolution2dDepthNhwcTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, @@ -144,8 +158,9 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTest( bool biasEnabled, const armnn::DataLayout layout); -LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); LayerTestResult CompareDepthwiseConvolution2dFloatTest( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index ccb1dc2d5d..cb9ee4b5a0 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -138,9 +138,68 @@ ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dUint8Nhwc, DepthwiseConvolution2dUint8Test, false, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false) ARMNN_AUTO_TEST_CASE(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc, SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3, + DepthwiseConvolution2d3x3Dilation3x3Test, + false, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Nhwc, + DepthwiseConvolution2d3x3Dilation3x3Test, + false, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Uint8, + DepthwiseConvolution2d3x3Dilation3x3Test + , + false, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcUint8, + DepthwiseConvolution2d3x3Dilation3x3Test + , + false, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3Int16, + DepthwiseConvolution2d3x3Dilation3x3Test + , + false, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d3x3Dilation3x3NhwcInt16, + DepthwiseConvolution2d3x3Dilation3x3Test + , + false, + armnn::DataLayout::NHWC) + +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3, + DepthwiseConvolution2d2x3x3Dilation3x3Test + , + false, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Nhwc, + DepthwiseConvolution2d2x3x3Dilation3x3Test, + false, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Uint8, + DepthwiseConvolution2d2x3x3Dilation3x3Test + , + false, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcUint8, + DepthwiseConvolution2d2x3x3Dilation3x3Test + , + false, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3Int16, + DepthwiseConvolution2d2x3x3Dilation3x3Test + , + false, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2d2x3x3Dilation3x3NhwcInt16, + DepthwiseConvolution2d2x3x3Dilation3x3Test + , + false, + armnn::DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, true, armnn::DataLayout::NCHW) @@ -173,7 +232,6 @@ ARMNN_AUTO_TEST_CASE(DepthwiseConvolution2dAsymmetricNhwc, ARMNN_AUTO_TEST_CASE(UnbiasedDepthwiseConvolution2dAsymmetricNhwc, DepthwiseConvolution2dAsymmetricTest, false, armnn::DataLayout::NHWC) - // Pooling //MaxPooling ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize2x2Stride2x2, SimpleMaxPooling2dSize2x2Stride2x2Test, false) -- cgit v1.2.1