diff options
author | Kevin May <kevin.may@arm.com> | 2019-08-21 16:53:50 +0100 |
---|---|---|
committer | mike.kelly <mike.kelly@arm.com> | 2019-08-26 16:22:55 +0000 |
commit | 665a964aab8858447c3e1261f2c38d59b150da82 (patch) | |
tree | 1a752fe37dec8e639ccea9bdab0993f20e1acd25 /src/backends/backendsCommon/test | |
parent | dcaa6109c95034aa3b945acd50a2882e40f13370 (diff) | |
download | armnn-665a964aab8858447c3e1261f2c38d59b150da82.tar.gz |
IVGCVSW-3575 Fix DepthwiseConvolution VTS Test Failures
Failing VTS tests were "NeuralnetworksHidlTest.depthwise_conv2d_*"
In depthwise convolution there was a difference in weight tensor channel
order between the reference and ACL implementations. This specifically related
to NCHW. This commit:
* Adds ReorderWeightChannelsForAcl to WorkloadUtils which will correct the weight tensor channel order.
* Add unit tests to detect this problem.
Signed-off-by: Colm Donelan <Colm.Donelan@arm.com>
Change-Id: Icaeac08e14b3d5da9e222ad2f118db55ebb15d09
Diffstat (limited to 'src/backends/backendsCommon/test')
-rw-r--r-- | src/backends/backendsCommon/test/LayerTests.cpp | 239 | ||||
-rw-r--r-- | src/backends/backendsCommon/test/LayerTests.hpp | 14 |
2 files changed, 252 insertions, 1 deletions
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index 561e526345..1504806998 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -75,6 +75,10 @@ static std::vector<float> ConvInput3x8x16({ // 2-channel bias used by a number of Conv2d tests. static std::vector<float> Bias2({0, 2}); +static std::vector<float> Bias4({1, 2, 3, 4}); + +static std::vector<float> Bias8({1, 2, 3, 4, 1, 2, 3, 4}); + struct Simple3dSoftmaxOutputData { const std::vector<float> outputData = @@ -121,6 +125,65 @@ boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale) } } +// Helper function that returns either Bias4 or an empty vector depending on whether bias is enabled. +template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> +boost::multi_array<T, 1> GetBias4(bool biasEnabled, float qScale) +{ + if(biasEnabled) + { + armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias4.size())}, ArmnnType); + boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias4)); + return bias; + } + else + { + return boost::multi_array<T, 1>(); + } +} + +// Helper function that returns either Bias8 or an empty vector depending on whether bias is enabled. +template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> +boost::multi_array<T, 1> GetBias8(bool biasEnabled, float qScale) +{ + if(biasEnabled) + { + armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias4.size())}, ArmnnType); + boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias8)); + return bias; + } + else + { + return boost::multi_array<T, 1>(); + } +} + +// Helper function that returns either Bias4 or an empty vector depending on whether bias is enabled. +template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>> +boost::multi_array<T, 1> GetBias(bool biasEnabled, float qScale, armnn::TensorInfo outputInfo, armnn::DataLayout layout) +{ + const armnnUtils::DataLayoutIndexed dataLayoutIndexed(layout); + const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex(); + const unsigned int outputChannels = outputInfo.GetShape()[channelsIndex]; + + switch (outputChannels) + { + case 2: + default: + { + return GetBias2<ArmnnType>(biasEnabled, qScale); + } + case 4: + { + return GetBias4<ArmnnType>(biasEnabled, qScale); + } + case 8: + { + return GetBias8<ArmnnType>(biasEnabled, qScale); + } + } +} + + template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>> LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon( armnn::IWorkloadFactory& workloadFactory, @@ -1307,7 +1370,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon( memoryManager, input, kernel, - GetBias2<ArmnnBType>(biasEnabled, qScale * qScale), + GetBias<ArmnnBType>(biasEnabled, qScale * qScale, outputTensorInfo, layout), expectedOutput, qScale, qOffset, @@ -1454,6 +1517,166 @@ LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test( biasEnabled); } +template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T> +LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout) +{ + armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType); + std::vector<float> inputNoQuantizedValues = + { + 10.0, 10.0, 10.0, + 10.0, 10.0, 10.0, + 10.0, 10.0, 10.0, + + 21.0, 22.0, 23.0, + 24.0, 25.0, 26.0, + 27.0, 28.0, 29.0 + }; + + armnn::TensorInfo kernelTensorInfo({ 4, 2, 2, 2}, ArmnnType); + + std::vector<float> kernelNoQuantizedValues = + { + 0.25f, 0.25f, + 0.25f, 0.25f, + + 0.25f, 0.25f, + 0.25f, 0.25f, + + 0.0f , 0.0f, + 0.0f , 0.1f, + + 0.0f , 0.0f, + 0.0f , 0.1f, + + 0.2f , 0.0f, + 0.0f , 0.0f, + + 0.2f , 0.0f, + 0.0f , 0.0f, + + 0.0f , 0.3f, + 0.0f , 0.0f, + + 0.0f , 0.3f, + 0.0f , 0.0f + }; + + armnn::TensorInfo outputTensorInfo({ 1, 8, 2, 2}, ArmnnType); + std::vector<float> outputExpectedNoQuantizedValues = + { + 10.f, 10.f, + 10.f, 10.f, + + 1.f, 1.f, + 1.f, 1.f, + + 2.f, 2.f, + 2.f, 2.f, + + 3.f, 3.f, + 3.f, 3.f, + + 23.f, 24.f, + 26.f, 27.f, + + 2.5f, 2.6000001f, + 2.8f, 2.9f, + + 4.2000003f, 4.4f, + 4.8f, 5.f, + + 6.6000004f, 6.9f, + 7.5000005f, 7.8f + }; + + + return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>( + workloadFactory, + memoryManager, + inputNoQuantizedValues, + inputTensorInfo, + kernelNoQuantizedValues, + kernelTensorInfo, + outputExpectedNoQuantizedValues, + outputTensorInfo, + 1, + 1, + layout, + biasEnabled); +} + +template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T> +LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout) +{ + armnn::TensorInfo inputTensorInfo({1, 2, 3, 3}, ArmnnType); + std::vector<float> inputNoQuantizedValues = + { + 10.0, 10.0, 10.0, + 10.0, 10.0, 10.0, + 10.0, 10.0, 10.0, + + 21.0, 22.0, 23.0, + 24.0, 25.0, 26.0, + 27.0, 28.0, 29.0 + }; + + armnn::TensorInfo kernelTensorInfo({ 2, 2, 2, 2}, ArmnnType); + + std::vector<float> kernelNoQuantizedValues = + { + 0.25f, 0.25f, + 0.25f, 0.25f, + + 0.2f , 0.0f, + 0.0f , 0.0f, + + 0.0f , 0.0f, + 0.0f , 0.1f, + + 0.0f , 0.3f, + 0.0f , 0.0f + + }; + + armnn::TensorInfo outputTensorInfo({ 1, 4, 2, 2}, ArmnnType); + std::vector<float> outputExpectedNoQuantizedValues = + { + 10.f, 10.f, + 10.f, 10.f, + + 1.f, 1.f, + 1.f, 1.f, + + 4.2000003f, 4.4f, + 4.8f, 5.f, + + 6.6000004f, 6.9f, + 7.5000005f, 7.8f + }; + + + return DepthwiseConvolution2d3x3DilationTestCommon<ArmnnType, ArmnnBType>( + workloadFactory, + memoryManager, + inputNoQuantizedValues, + inputTensorInfo, + kernelNoQuantizedValues, + kernelTensorInfo, + outputExpectedNoQuantizedValues, + outputTensorInfo, + 1, + 1, + layout, + biasEnabled); +} template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4> DepthwiseConvolution2d3x3Dilation3x3Test<armnn::DataType::Float32, armnn::DataType::Float32>( @@ -1497,6 +1720,20 @@ DepthwiseConvolution2d2x3x3Dilation3x3Test<armnn::DataType::QuantisedSymm16, arm bool, armnn::DataLayout); +template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4> +DepthwiseConvolution2dMult4Test<armnn::DataType::Float32, armnn::DataType::Float32>( + armnn::IWorkloadFactory &workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + +template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4> +DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>( + armnn::IWorkloadFactory &workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + LayerTestResult<float, 4> DepthwiseConvolution2dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index df33aa192e..235c5dcbd2 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -148,6 +148,20 @@ LayerTestResult<T, 4> DepthwiseConvolution2d2x3x3Dilation3x3Test( bool biasEnabled, const armnn::DataLayout layout); +template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>> +LayerTestResult<T, 4> DepthwiseConvolution2dMult4Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + +template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>> +LayerTestResult<T, 4> DepthwiseConvolution2dMult2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + bool biasEnabled, + const armnn::DataLayout layout); + LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, |