From 45a9b775bf63283320315d90e4e9a6c641df6e20 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Wed, 31 Oct 2018 11:47:53 +0000 Subject: IVGCVSW-2102: Fix Pooling2D CpuRef indexing bug * Fixes bug when calcuating indexes for NHWC in Pooling2D CpuRef implementation, it now uses TensorBufferArrayView. * Adds 2-Channel unit tests for Pooling2d on CpuRef, Cl and Neon. The single channel tests were not properly exercising Pooling2d using NHWC data layout. * Refactors Pooling2D NHWC tests so that the input and output data are permuted to NHWC when necessary, instead of hard coding the data in NHWC format. Change-Id: I5b9d41ed425ff283ea8c8ef6b1266ae0bc80f43b --- src/armnn/test/TensorHelpers.hpp | 19 +++ src/backends/cl/test/ClLayerTests.cpp | 19 ++- src/backends/neon/test/NeonLayerTests.cpp | 19 ++- src/backends/reference/test/RefLayerTests.cpp | 20 ++- src/backends/reference/workloads/Pooling2d.cpp | 27 ++-- src/backends/test/Conv2dTestImpl.hpp | 21 --- src/backends/test/LayerTests.cpp | 30 ++-- src/backends/test/LayerTests.hpp | 20 ++- src/backends/test/Pooling2dTestImpl.hpp | 215 +++++++++++++++++-------- 9 files changed, 255 insertions(+), 135 deletions(-) diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp index 7f3ac9ec95..f1ab6c99b5 100644 --- a/src/armnn/test/TensorHelpers.hpp +++ b/src/armnn/test/TensorHelpers.hpp @@ -210,3 +210,22 @@ boost::multi_array MakeRandomTensor(const armnn::TensorInfo& tensorInfo, int32_t qOffset = tensorInfo.GetQuantizationOffset(); return MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, init)); } + +template +armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, + unsigned int numberOfChannels, + unsigned int height, + unsigned int width, + const armnn::DataLayoutIndexed& dataLayout) +{ + switch (dataLayout.GetDataLayout()) + { + case armnn::DataLayout::NCHW: + return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType()); + case armnn::DataLayout::NHWC: + return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType()); + default: + throw armnn::InvalidArgumentException("unknown data layout [" + + std::to_string(static_cast(dataLayout.GetDataLayout())) + "]"); + } +} diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index beb523f84e..198bddd1e4 100755 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -137,9 +137,16 @@ ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePadd ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2d, SimpleMaxPooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dNhwc, SimpleMaxPooling2dTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8Nhwc, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NHWC) + +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NHWC) + ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2, IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false) @@ -149,8 +156,10 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dNhwc, SimpleL2Pooling2dTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test, armnn::DataLayout::NCHW) + ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test) ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test) ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test) diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index c7b0050311..d242245ba0 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -247,15 +247,24 @@ BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils) // Pooling ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) + +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2d, SimpleMaxPooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dNhwc, SimpleMaxPooling2dTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8Nhwc, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NHWC) + +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dNeon, SimpleL2Pooling2dTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test, armnn::DataLayout::NCHW) + ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test) ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test) ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test) diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 40944bf5a6..f5884aee17 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -114,10 +114,16 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2P ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test) ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8NhwcTest) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2d, SimpleMaxPooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dNhwc, SimpleMaxPooling2dTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8Nhwc, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NHWC) + +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NHWC) + ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2, IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false) ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, @@ -126,8 +132,10 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding, ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest) ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test) -ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest) -ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dNhwc, SimpleL2Pooling2dTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dNhwcUint8, SimpleL2Pooling2dUint8Test, armnn::DataLayout::NHWC) ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test) ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test) diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp index 9890920113..d2fd0da42c 100644 --- a/src/backends/reference/workloads/Pooling2d.cpp +++ b/src/backends/reference/workloads/Pooling2d.cpp @@ -4,6 +4,7 @@ // #include "Pooling2d.hpp" +#include "TensorBufferArrayView.hpp" #include #include @@ -143,9 +144,10 @@ void Pooling2d(const float* in, const TensorInfo& outputInfo, const Pooling2dDescriptor& params) { - const unsigned int channelsIndex = params.m_DataLayout.GetChannelsIndex(); - const unsigned int heightIndex = params.m_DataLayout.GetHeightIndex(); - const unsigned int widthIndex = params.m_DataLayout.GetWidthIndex(); + const armnn::DataLayoutIndexed dataLayout = params.m_DataLayout; + auto channelsIndex = dataLayout.GetChannelsIndex(); + auto heightIndex = dataLayout.GetHeightIndex(); + auto widthIndex = dataLayout.GetWidthIndex(); const int batchSize = boost::numeric_cast(outputInfo.GetShape()[0]); const int channels = boost::numeric_cast(outputInfo.GetShape()[channelsIndex]); @@ -167,6 +169,9 @@ void Pooling2d(const float* in, Accumulator accumulate = GetAccumulator(params.m_PoolType); Executor execute = GetExecutor(params.m_PoolType); + TensorBufferArrayView input(inputInfo.GetShape(), in, dataLayout); + TensorBufferArrayView output(outputInfo.GetShape(), out, dataLayout); + // Check supported padding methods outside the loop to simplify // the inner loop. if (params.m_PaddingMethod != PaddingMethod::Exclude && @@ -221,10 +226,10 @@ void Pooling2d(const float* in, { for (auto xInput = wstart; xInput < wend; xInput++) { - float inval = in[n * widthInput * heightInput * channels + - c * widthInput * heightInput + - yInput * widthInput + - xInput]; + float inval = input.Get(boost::numeric_cast(n), + boost::numeric_cast(c), + boost::numeric_cast(yInput), + boost::numeric_cast(xInput)); accumulate(result, inval); } @@ -232,10 +237,10 @@ void Pooling2d(const float* in, execute(result, poolAreaSize); - out[n * widthOutput * heightOutput * channels + - c * widthOutput * heightOutput + - yOutput * widthOutput + - xOutput] = result; + output.Get(boost::numeric_cast(n), + boost::numeric_cast(c), + boost::numeric_cast(yOutput), + boost::numeric_cast(xOutput)) = result; } } } diff --git a/src/backends/test/Conv2dTestImpl.hpp b/src/backends/test/Conv2dTestImpl.hpp index 7a3f452515..ce3e435d1a 100755 --- a/src/backends/test/Conv2dTestImpl.hpp +++ b/src/backends/test/Conv2dTestImpl.hpp @@ -62,27 +62,6 @@ void ApplyBias(std::vector& v, float vScale, int32_t vOffset, } } -template -armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, - unsigned int numberOfChannels, - unsigned int height, - unsigned int width, - const armnn::DataLayoutIndexed& layout) -{ - switch (layout.GetDataLayout()) - { - case armnn::DataLayout::NCHW: - return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType()); - case armnn::DataLayout ::NHWC: - return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType()); - default: - throw armnn::InvalidArgumentException("unknown data layout [" - + std::to_string(static_cast(layout.GetDataLayout())) + "]"); - } -} - - - template LayerTestResult SimpleConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory, const boost::multi_array& originalInput, diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp index ae6d16c755..abe3704a17 100755 --- a/src/backends/test/LayerTests.cpp +++ b/src/backends/test/LayerTests.cpp @@ -5602,24 +5602,28 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::I return SimpleMaxPooling2dSize3x3Stride2x4TestCommon(workloadFactory, forceNoPadding, 0.1f, 128); } -LayerTestResult SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - return SimpleAveragePooling2dTest(workloadFactory); + return SimpleMaxPooling2dTestCommon(workloadFactory, dataLayout); } -LayerTestResult SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - return SimpleAveragePooling2dNhwcTest(workloadFactory); + return SimpleMaxPooling2dTestCommon(workloadFactory, dataLayout); } -LayerTestResult SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - return SimpleAveragePooling2dTest(workloadFactory, 0.5, -1); + return SimpleAveragePooling2dTestCommon(workloadFactory, dataLayout); } -LayerTestResult SimpleAveragePooling2dUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - return SimpleAveragePooling2dNhwcTest(workloadFactory, 0.5, -1); + return SimpleAveragePooling2dTestCommon(workloadFactory, dataLayout, 0.5, -1); } LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory, @@ -5638,14 +5642,16 @@ LayerTestResult LargeTensorsAveragePooling2dUint8Test(armnn::IWorklo return LargeTensorsAveragePooling2dTestCommon(workloadFactory, 0.5, -1); } -LayerTestResult SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - return SimpleL2Pooling2dTestCommon(workloadFactory); + return SimpleL2Pooling2dTestCommon(workloadFactory, dataLayout); } -LayerTestResult SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - return SimpleL2Pooling2dTestCommon(workloadFactory); + return SimpleL2Pooling2dTestCommon(workloadFactory, dataLayout); } LayerTestResult L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory) diff --git a/src/backends/test/LayerTests.hpp b/src/backends/test/LayerTests.hpp index 0e45024e22..392d3bf34e 100644 --- a/src/backends/test/LayerTests.hpp +++ b/src/backends/test/LayerTests.hpp @@ -99,10 +99,16 @@ LayerTestResult IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWor LayerTestResult IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult SimpleAveragePooling2dUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); +LayerTestResult SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); + +LayerTestResult SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); +LayerTestResult SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); + LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory, bool forceNoPadding); LayerTestResult IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory); @@ -113,8 +119,10 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Tes LayerTestResult IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); +LayerTestResult SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); LayerTestResult L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory); diff --git a/src/backends/test/Pooling2dTestImpl.hpp b/src/backends/test/Pooling2dTestImpl.hpp index 90be2897e8..eea423275c 100644 --- a/src/backends/test/Pooling2dTestImpl.hpp +++ b/src/backends/test/Pooling2dTestImpl.hpp @@ -4,6 +4,7 @@ // #pragma once +#include #include #include @@ -13,6 +14,8 @@ #include #include #include +#include "Permute.hpp" +#include template LayerTestResult SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadFactory, @@ -22,9 +25,10 @@ LayerTestResult SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadF const boost::multi_array& input, const boost::multi_array& outputExpected) { - const unsigned int channelsIndex = descriptor.m_DataLayout.GetChannelsIndex(); - const unsigned int heightIndex = descriptor.m_DataLayout.GetHeightIndex(); - const unsigned int widthIndex = descriptor.m_DataLayout.GetWidthIndex(); + const armnn::DataLayoutIndexed dataLayout = descriptor.m_DataLayout; + auto heightIndex = dataLayout.GetHeightIndex(); + auto widthIndex = dataLayout.GetWidthIndex(); + auto channelsIndex = dataLayout.GetChannelsIndex(); unsigned int inputHeight = boost::numeric_cast(input.shape()[heightIndex]); unsigned int inputWidth = boost::numeric_cast(input.shape()[widthIndex]); @@ -36,23 +40,10 @@ LayerTestResult SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadF unsigned int outputChannels = boost::numeric_cast(outputExpected.shape()[channelsIndex]); unsigned int outputBatchSize = boost::numeric_cast(outputExpected.shape()[0]); - armnn::TensorShape inputTensorShape; - armnn::TensorShape outputTensorShape; - - switch (descriptor.m_DataLayout.GetDataLayout()) - { - case armnn::DataLayout::NHWC: - inputTensorShape = { inputBatchSize, inputHeight, inputWidth, inputChannels }; - outputTensorShape = { outputBatchSize, outputHeight, outputWidth, outputChannels }; - break; - case armnn::DataLayout::NCHW: - default: - inputTensorShape = { inputBatchSize, inputChannels, inputHeight, inputWidth }; - outputTensorShape = { outputBatchSize, outputChannels, outputHeight, outputWidth }; - } - - armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo = GetTensorInfo(inputBatchSize, inputChannels, inputHeight, + inputWidth, dataLayout); + armnn::TensorInfo outputTensorInfo = GetTensorInfo(outputBatchSize, outputChannels, outputHeight, + outputWidth, dataLayout); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -70,7 +61,7 @@ LayerTestResult SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadF armnn::Pooling2dQueueDescriptor queueDescriptor; queueDescriptor.m_Parameters = descriptor; - queueDescriptor.m_Parameters.m_DataLayout = descriptor.m_DataLayout; + queueDescriptor.m_Parameters.m_DataLayout = dataLayout; armnn::WorkloadInfo workloadInfo; AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get()); @@ -234,26 +225,20 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon(armnn::IWorkl } template -LayerTestResult SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, - const armnn::TensorShape& inputTensorShape, - const armnn::TensorShape& outputTensorShape, - armnn::DataLayout dataLayout, - float qScale = 1.0f, - int32_t qOffset = 0) +LayerTestResult SimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout = armnn::DataLayout::NCHW, + float qScale = 1.0f, + int32_t qOffset = 0) { armnn::Pooling2dDescriptor descriptor; - descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Max; descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; descriptor.m_StrideX = descriptor.m_StrideY = 2; - descriptor.m_PadLeft = 1; - descriptor.m_PadRight = 1; - descriptor.m_PadTop = 1; - descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; descriptor.m_DataLayout = dataLayout; - armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::GetDataType()); - armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::GetDataType()); + armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); + armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) @@ -264,46 +249,111 @@ LayerTestResult SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, + std::vector inputData( QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, 3.0f, 4.0f, - 1.0f, 2.0f, 3.0f, 4.0f, - 1.0f, 2.0f, 3.0f, 4.0f, - 1.0f, 2.0f, 3.0f, 4.0f, + 1.0f, 2.0f, 5.0f, 6.0f, + 3.0f, 4.0f, 7.0f, 8.0f, + 9.0f, 10.0f, 13.0f, 14.0f, + 11.0f, 12.0f, 15.0f, 16.0f, + + 17.0f, 18.0f, 21.0f, 22.0f, + 19.0f, 20.0f, 23.0f, 24.0f, + 25.0f, 26.0f, 29.0f, 30.0f, + 27.0f, 28.0f, 31.0f, 32.0f, })); - auto outputExpected = MakeTensor(outputTensorInfo, + std::vector outputData( QuantizedVector(qScale, qOffset, { - 1.0f, 2.5f, 4.0f, - 1.0f, 2.5f, 4.0f, - 1.0f, 2.5f, 4.0f, + 4.0f, 8.0f, + 12.0f, 16.0f, + + 20.0f, 24.0f, + 28.0f, 32.0f, })); + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data()); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, inputData); + + auto outputExpected = MakeTensor(outputTensorInfo, outputData); + return SimplePooling2dTestImpl(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); } template -LayerTestResult SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory, - float qScale = 1.0f, - int32_t qOffset = 0) +LayerTestResult SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW, + float qScale = 1.0f, + int32_t qOffset = 0) { - const armnn::TensorShape inputTensorShape { 1, 1, 4, 4 }; - const armnn::TensorShape outputTensorShape { 1, 1, 3, 3 }; + armnn::Pooling2dDescriptor descriptor; + descriptor.m_PoolType = armnn::PoolingAlgorithm::Average; + descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; + descriptor.m_StrideX = descriptor.m_StrideY = 2; + descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + descriptor.m_DataLayout = dataLayout; - return SimpleAveragePooling2dTestCommon(workloadFactory, inputTensorShape, outputTensorShape, - armnn::DataLayout::NCHW, qScale, qOffset); -} + armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); + armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); -template -LayerTestResult SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory, - float qScale = 1.0f, - int32_t qOffset = 0) -{ - const armnn::TensorShape inputTensorShape { 1, 4, 4, 1 }; - const armnn::TensorShape outputTensorShape { 1, 3, 3, 1 }; + // Set quantization parameters if the requested type is a quantized type. + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(qScale); + inputTensorInfo.SetQuantizationOffset(qOffset); + outputTensorInfo.SetQuantizationScale(qScale); + outputTensorInfo.SetQuantizationOffset(qOffset); + } + + std::vector inputData( + QuantizedVector(qScale, qOffset, { + 2.0f, 2.0f, 6.0f, 6.0f, + 4.0f, 4.0f, 8.0f, 8.0f, + 10.0f, 12.0f, 14.0f, 16.0f, + 10.0f, 12.0f, 16.0f, 14.0f, + + 18.0f, 20.0f, 24.0f, 22.0f, + 20.0f, 18.0f, 22.0f, 24.0f, + 26.0f, 28.0f, 0.0f, 0.0f, + 26.0f, 28.0f, 0.0f, 0.0f, + })); + + std::vector outputData( + QuantizedVector(qScale, qOffset, { + 3.0f, 7.0f, + 11.0f, 15.0f, + + 19.0f, 23.0f, + 27.0f, 0.0f, + })); + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); + inputData = tmp; - return SimpleAveragePooling2dTestCommon(workloadFactory, inputTensorShape, outputTensorShape, - armnn::DataLayout::NHWC, qScale, qOffset); + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data()); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, inputData); + + auto outputExpected = MakeTensor(outputTensorInfo, outputData); + + return SimplePooling2dTestImpl(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); } template @@ -356,6 +406,7 @@ LayerTestResult LargeTensorsAveragePooling2dTestCommon(armnn::IWorkloadFac template LayerTestResult SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory, + armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW, float qScale = 1.0f, int32_t qOffset = 0) { @@ -364,23 +415,49 @@ LayerTestResult SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workl descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2; descriptor.m_StrideX = descriptor.m_StrideY = 2; descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude; + descriptor.m_DataLayout = dataLayout; - armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType()); - auto input = MakeTensor(inputTensorInfo, + armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); + armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); + + std::vector inputData( QuantizedVector(qScale, qOffset, { - 1.0f, 7.0f, 1.0f, 7.0f, - 1.0f, 7.0f, 1.0f, 7.0f, - 1.0f, 7.0f, 1.0f, 7.0f, - 1.0f, 7.0f, 1.0f, 7.0f, + 1.0f, 7.0f, 5.0f, 5.0f, + 1.0f, 7.0f, 5.0f, 5.0f, + 3.0f, 3.0f, 1.0f, 1.0f, + 3.0f, 3.0f, 1.0f, 1.0f, + + 1.0f, 7.0f, 0.0f, 0.0f, + 1.0f, 7.0f, 2.0f, 0.0f, + 0.0f, 2.0f, 1.0f, 1.0f, + 0.0f, 0.0f, 1.0f, 1.0f, })); - armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType()); - auto outputExpected = MakeTensor(outputTensorInfo, + std::vector outputData( QuantizedVector(qScale, qOffset, { 5.0f, 5.0f, - 5.0f, 5.0f, + 3.0f, 1.0f, + + 5.0f, 1.0f, + 1.0f, 1.0f, })); + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data()); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, inputData); + + auto outputExpected = MakeTensor(outputTensorInfo, outputData); + return SimplePooling2dTestImpl(workloadFactory, descriptor, qScale, qOffset, input, outputExpected); } -- cgit v1.2.1