aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2018-10-31 11:47:53 +0000
committerJames Conroy <james.conroy@arm.com>2018-10-31 12:06:53 +0000
commit45a9b775bf63283320315d90e4e9a6c641df6e20 (patch)
treee1f0d33d98410255a6804ea9cccf16805fc6080f
parentd84216a013445e86183e39c8b5b904836c71a95b (diff)
downloadarmnn-45a9b775bf63283320315d90e4e9a6c641df6e20.tar.gz
IVGCVSW-2102: Fix Pooling2D CpuRef indexing bug
* Fixes bug when calcuating indexes for NHWC in Pooling2D CpuRef implementation, it now uses TensorBufferArrayView. * Adds 2-Channel unit tests for Pooling2d on CpuRef, Cl and Neon. The single channel tests were not properly exercising Pooling2d using NHWC data layout. * Refactors Pooling2D NHWC tests so that the input and output data are permuted to NHWC when necessary, instead of hard coding the data in NHWC format. Change-Id: I5b9d41ed425ff283ea8c8ef6b1266ae0bc80f43b
-rw-r--r--src/armnn/test/TensorHelpers.hpp19
-rwxr-xr-xsrc/backends/cl/test/ClLayerTests.cpp19
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp19
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp20
-rw-r--r--src/backends/reference/workloads/Pooling2d.cpp27
-rwxr-xr-xsrc/backends/test/Conv2dTestImpl.hpp21
-rwxr-xr-xsrc/backends/test/LayerTests.cpp30
-rw-r--r--src/backends/test/LayerTests.hpp20
-rw-r--r--src/backends/test/Pooling2dTestImpl.hpp215
9 files changed, 255 insertions, 135 deletions
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index 7f3ac9ec95..f1ab6c99b5 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -210,3 +210,22 @@ boost::multi_array<T, n> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
int32_t qOffset = tensorInfo.GetQuantizationOffset();
return MakeTensor<T, n>(tensorInfo, QuantizedVector<T>(qScale, qOffset, init));
}
+
+template<typename T>
+armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
+ unsigned int numberOfChannels,
+ unsigned int height,
+ unsigned int width,
+ const armnn::DataLayoutIndexed& dataLayout)
+{
+ switch (dataLayout.GetDataLayout())
+ {
+ case armnn::DataLayout::NCHW:
+ return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
+ case armnn::DataLayout::NHWC:
+ return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
+ default:
+ throw armnn::InvalidArgumentException("unknown data layout ["
+ + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
+ }
+}
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index beb523f84e..198bddd1e4 100755
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -137,9 +137,16 @@ ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePadd
ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test)
ARMNN_AUTO_TEST_CASE(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2d, SimpleMaxPooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dNhwc, SimpleMaxPooling2dTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8Nhwc, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NHWC)
+
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NHWC)
+
ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
false)
@@ -149,8 +156,10 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest)
-ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dNhwc, SimpleL2Pooling2dTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test, armnn::DataLayout::NCHW)
+
ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test)
ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test)
ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index c7b0050311..d242245ba0 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -247,15 +247,24 @@ BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils)
// Pooling
ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true)
ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test)
+
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2d, SimpleMaxPooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dNhwc, SimpleMaxPooling2dTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8Nhwc, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NHWC)
+
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest)
-ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dNeon, SimpleL2Pooling2dTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test, armnn::DataLayout::NCHW)
+
ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test)
ARMNN_AUTO_TEST_CASE(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test)
ARMNN_AUTO_TEST_CASE(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test)
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 40944bf5a6..f5884aee17 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -114,10 +114,16 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2P
ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test)
ARMNN_AUTO_TEST_CASE(IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dNhwcTest)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8NhwcTest)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2d, SimpleMaxPooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dNhwc, SimpleMaxPooling2dTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dUint8Nhwc, SimpleMaxPooling2dUint8Test, armnn::DataLayout::NHWC)
+
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2d, SimpleAveragePooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8Test, armnn::DataLayout::NHWC)
+
ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
IgnorePaddingAveragePooling2dSize3x2Stride2x2Test, false)
ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
@@ -126,8 +132,10 @@ ARMNN_AUTO_TEST_CASE(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
ARMNN_AUTO_TEST_CASE(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest)
-ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2d, SimpleL2Pooling2dTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dNhwc, SimpleL2Pooling2dTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleL2Pooling2dNhwcUint8, SimpleL2Pooling2dUint8Test, armnn::DataLayout::NHWC)
ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7, L2Pooling2dSize7Test)
ARMNN_AUTO_TEST_CASE(L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test)
diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp
index 9890920113..d2fd0da42c 100644
--- a/src/backends/reference/workloads/Pooling2d.cpp
+++ b/src/backends/reference/workloads/Pooling2d.cpp
@@ -4,6 +4,7 @@
//
#include "Pooling2d.hpp"
+#include "TensorBufferArrayView.hpp"
#include <armnn/Exceptions.hpp>
#include <armnn/Types.hpp>
@@ -143,9 +144,10 @@ void Pooling2d(const float* in,
const TensorInfo& outputInfo,
const Pooling2dDescriptor& params)
{
- const unsigned int channelsIndex = params.m_DataLayout.GetChannelsIndex();
- const unsigned int heightIndex = params.m_DataLayout.GetHeightIndex();
- const unsigned int widthIndex = params.m_DataLayout.GetWidthIndex();
+ const armnn::DataLayoutIndexed dataLayout = params.m_DataLayout;
+ auto channelsIndex = dataLayout.GetChannelsIndex();
+ auto heightIndex = dataLayout.GetHeightIndex();
+ auto widthIndex = dataLayout.GetWidthIndex();
const int batchSize = boost::numeric_cast<int>(outputInfo.GetShape()[0]);
const int channels = boost::numeric_cast<int>(outputInfo.GetShape()[channelsIndex]);
@@ -167,6 +169,9 @@ void Pooling2d(const float* in,
Accumulator accumulate = GetAccumulator(params.m_PoolType);
Executor execute = GetExecutor(params.m_PoolType);
+ TensorBufferArrayView<const float> input(inputInfo.GetShape(), in, dataLayout);
+ TensorBufferArrayView<float> output(outputInfo.GetShape(), out, dataLayout);
+
// Check supported padding methods outside the loop to simplify
// the inner loop.
if (params.m_PaddingMethod != PaddingMethod::Exclude &&
@@ -221,10 +226,10 @@ void Pooling2d(const float* in,
{
for (auto xInput = wstart; xInput < wend; xInput++)
{
- float inval = in[n * widthInput * heightInput * channels +
- c * widthInput * heightInput +
- yInput * widthInput +
- xInput];
+ float inval = input.Get(boost::numeric_cast<unsigned int>(n),
+ boost::numeric_cast<unsigned int>(c),
+ boost::numeric_cast<unsigned int>(yInput),
+ boost::numeric_cast<unsigned int>(xInput));
accumulate(result, inval);
}
@@ -232,10 +237,10 @@ void Pooling2d(const float* in,
execute(result, poolAreaSize);
- out[n * widthOutput * heightOutput * channels +
- c * widthOutput * heightOutput +
- yOutput * widthOutput +
- xOutput] = result;
+ output.Get(boost::numeric_cast<unsigned int>(n),
+ boost::numeric_cast<unsigned int>(c),
+ boost::numeric_cast<unsigned int>(yOutput),
+ boost::numeric_cast<unsigned int>(xOutput)) = result;
}
}
}
diff --git a/src/backends/test/Conv2dTestImpl.hpp b/src/backends/test/Conv2dTestImpl.hpp
index 7a3f452515..ce3e435d1a 100755
--- a/src/backends/test/Conv2dTestImpl.hpp
+++ b/src/backends/test/Conv2dTestImpl.hpp
@@ -62,27 +62,6 @@ void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
}
}
-template<typename T>
-armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
- unsigned int numberOfChannels,
- unsigned int height,
- unsigned int width,
- const armnn::DataLayoutIndexed& layout)
-{
- switch (layout.GetDataLayout())
- {
- case armnn::DataLayout::NCHW:
- return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
- case armnn::DataLayout ::NHWC:
- return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
- default:
- throw armnn::InvalidArgumentException("unknown data layout ["
- + std::to_string(static_cast<int>(layout.GetDataLayout())) + "]");
- }
-}
-
-
-
template<typename T, typename B>
LayerTestResult<T, 4> SimpleConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
const boost::multi_array<T, 4>& originalInput,
diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp
index ae6d16c755..abe3704a17 100755
--- a/src/backends/test/LayerTests.cpp
+++ b/src/backends/test/LayerTests.cpp
@@ -5602,24 +5602,28 @@ LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::I
return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
}
-LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- return SimpleAveragePooling2dTest<float>(workloadFactory);
+ return SimpleMaxPooling2dTestCommon<float>(workloadFactory, dataLayout);
}
-LayerTestResult<float, 4> SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- return SimpleAveragePooling2dNhwcTest<float>(workloadFactory);
+ return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
}
-LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- return SimpleAveragePooling2dTest<uint8_t>(workloadFactory, 0.5, -1);
+ return SimpleAveragePooling2dTestCommon<float>(workloadFactory, dataLayout);
}
-LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- return SimpleAveragePooling2dNhwcTest<uint8_t>(workloadFactory, 0.5, -1);
+ return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, dataLayout, 0.5, -1);
}
LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
@@ -5638,14 +5642,16 @@ LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorklo
return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
}
-LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- return SimpleL2Pooling2dTestCommon<float>(workloadFactory);
+ return SimpleL2Pooling2dTestCommon<float>(workloadFactory, dataLayout);
}
-LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
+ return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
diff --git a/src/backends/test/LayerTests.hpp b/src/backends/test/LayerTests.hpp
index 0e45024e22..392d3bf34e 100644
--- a/src/backends/test/LayerTests.hpp
+++ b/src/backends/test/LayerTests.hpp
@@ -99,10 +99,16 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWor
LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<float, 4> SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
+LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
+
+LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
+LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
+
LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
bool forceNoPadding);
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory);
@@ -113,8 +119,10 @@ LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Tes
LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
+LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory);
diff --git a/src/backends/test/Pooling2dTestImpl.hpp b/src/backends/test/Pooling2dTestImpl.hpp
index 90be2897e8..eea423275c 100644
--- a/src/backends/test/Pooling2dTestImpl.hpp
+++ b/src/backends/test/Pooling2dTestImpl.hpp
@@ -4,6 +4,7 @@
//
#pragma once
+#include <string>
#include <armnn/ArmNN.hpp>
#include <test/TensorHelpers.hpp>
@@ -13,6 +14,8 @@
#include <backends/WorkloadFactory.hpp>
#include <backends/WorkloadInfo.hpp>
#include <algorithm>
+#include "Permute.hpp"
+#include <boost/numeric/conversion/cast.hpp>
template<typename T>
LayerTestResult<T, 4> SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
@@ -22,9 +25,10 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadF
const boost::multi_array<T, 4>& input,
const boost::multi_array<T, 4>& outputExpected)
{
- const unsigned int channelsIndex = descriptor.m_DataLayout.GetChannelsIndex();
- const unsigned int heightIndex = descriptor.m_DataLayout.GetHeightIndex();
- const unsigned int widthIndex = descriptor.m_DataLayout.GetWidthIndex();
+ const armnn::DataLayoutIndexed dataLayout = descriptor.m_DataLayout;
+ auto heightIndex = dataLayout.GetHeightIndex();
+ auto widthIndex = dataLayout.GetWidthIndex();
+ auto channelsIndex = dataLayout.GetChannelsIndex();
unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
@@ -36,23 +40,10 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadF
unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- armnn::TensorShape inputTensorShape;
- armnn::TensorShape outputTensorShape;
-
- switch (descriptor.m_DataLayout.GetDataLayout())
- {
- case armnn::DataLayout::NHWC:
- inputTensorShape = { inputBatchSize, inputHeight, inputWidth, inputChannels };
- outputTensorShape = { outputBatchSize, outputHeight, outputWidth, outputChannels };
- break;
- case armnn::DataLayout::NCHW:
- default:
- inputTensorShape = { inputBatchSize, inputChannels, inputHeight, inputWidth };
- outputTensorShape = { outputBatchSize, outputChannels, outputHeight, outputWidth };
- }
-
- armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
+ inputWidth, dataLayout);
+ armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
+ outputWidth, dataLayout);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -70,7 +61,7 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadF
armnn::Pooling2dQueueDescriptor queueDescriptor;
queueDescriptor.m_Parameters = descriptor;
- queueDescriptor.m_Parameters.m_DataLayout = descriptor.m_DataLayout;
+ queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
armnn::WorkloadInfo workloadInfo;
AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
@@ -234,26 +225,20 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(armnn::IWorkl
}
template<typename T>
-LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
- const armnn::TensorShape& inputTensorShape,
- const armnn::TensorShape& outputTensorShape,
- armnn::DataLayout dataLayout,
- float qScale = 1.0f,
- int32_t qOffset = 0)
+LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout = armnn::DataLayout::NCHW,
+ float qScale = 1.0f,
+ int32_t qOffset = 0)
{
armnn::Pooling2dDescriptor descriptor;
- descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+ descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
descriptor.m_StrideX = descriptor.m_StrideY = 2;
- descriptor.m_PadLeft = 1;
- descriptor.m_PadRight = 1;
- descriptor.m_PadTop = 1;
- descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
@@ -264,46 +249,111 @@ LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory&
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- auto input = MakeTensor<T, 4>(inputTensorInfo,
+ std::vector<T> inputData(
QuantizedVector<T>(qScale, qOffset, {
- 1.0f, 2.0f, 3.0f, 4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f,
- 1.0f, 2.0f, 3.0f, 4.0f,
+ 1.0f, 2.0f, 5.0f, 6.0f,
+ 3.0f, 4.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 13.0f, 14.0f,
+ 11.0f, 12.0f, 15.0f, 16.0f,
+
+ 17.0f, 18.0f, 21.0f, 22.0f,
+ 19.0f, 20.0f, 23.0f, 24.0f,
+ 25.0f, 26.0f, 29.0f, 30.0f,
+ 27.0f, 28.0f, 31.0f, 32.0f,
}));
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ std::vector<T> outputData(
QuantizedVector<T>(qScale, qOffset, {
- 1.0f, 2.5f, 4.0f,
- 1.0f, 2.5f, 4.0f,
- 1.0f, 2.5f, 4.0f,
+ 4.0f, 8.0f,
+ 12.0f, 16.0f,
+
+ 20.0f, 24.0f,
+ 28.0f, 32.0f,
}));
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+ {
+ std::vector<T> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+ inputData = tmp;
+
+ std::vector<T> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+ outputData = tmp1;
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+ auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
}
template<typename T>
-LayerTestResult<T, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
- float qScale = 1.0f,
- int32_t qOffset = 0)
+LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+ armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
+ float qScale = 1.0f,
+ int32_t qOffset = 0)
{
- const armnn::TensorShape inputTensorShape { 1, 1, 4, 4 };
- const armnn::TensorShape outputTensorShape { 1, 1, 3, 3 };
+ armnn::Pooling2dDescriptor descriptor;
+ descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+ descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
+ descriptor.m_StrideX = descriptor.m_StrideY = 2;
+ descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+ descriptor.m_DataLayout = dataLayout;
- return SimpleAveragePooling2dTestCommon<T>(workloadFactory, inputTensorShape, outputTensorShape,
- armnn::DataLayout::NCHW, qScale, qOffset);
-}
+ armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
-template<typename T>
-LayerTestResult<T, 4> SimpleAveragePooling2dNhwcTest(armnn::IWorkloadFactory& workloadFactory,
- float qScale = 1.0f,
- int32_t qOffset = 0)
-{
- const armnn::TensorShape inputTensorShape { 1, 4, 4, 1 };
- const armnn::TensorShape outputTensorShape { 1, 3, 3, 1 };
+ // Set quantization parameters if the requested type is a quantized type.
+ if(armnn::IsQuantizedType<T>())
+ {
+ inputTensorInfo.SetQuantizationScale(qScale);
+ inputTensorInfo.SetQuantizationOffset(qOffset);
+ outputTensorInfo.SetQuantizationScale(qScale);
+ outputTensorInfo.SetQuantizationOffset(qOffset);
+ }
+
+ std::vector<T> inputData(
+ QuantizedVector<T>(qScale, qOffset, {
+ 2.0f, 2.0f, 6.0f, 6.0f,
+ 4.0f, 4.0f, 8.0f, 8.0f,
+ 10.0f, 12.0f, 14.0f, 16.0f,
+ 10.0f, 12.0f, 16.0f, 14.0f,
+
+ 18.0f, 20.0f, 24.0f, 22.0f,
+ 20.0f, 18.0f, 22.0f, 24.0f,
+ 26.0f, 28.0f, 0.0f, 0.0f,
+ 26.0f, 28.0f, 0.0f, 0.0f,
+ }));
+
+ std::vector<T> outputData(
+ QuantizedVector<T>(qScale, qOffset, {
+ 3.0f, 7.0f,
+ 11.0f, 15.0f,
+
+ 19.0f, 23.0f,
+ 27.0f, 0.0f,
+ }));
+
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+ {
+ std::vector<T> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+ inputData = tmp;
- return SimpleAveragePooling2dTestCommon<T>(workloadFactory, inputTensorShape, outputTensorShape,
- armnn::DataLayout::NHWC, qScale, qOffset);
+ std::vector<T> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+ outputData = tmp1;
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+ auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+ return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
}
template<typename T>
@@ -356,6 +406,7 @@ LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(armnn::IWorkloadFac
template<typename T>
LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+ armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
float qScale = 1.0f,
int32_t qOffset = 0)
{
@@ -364,23 +415,49 @@ LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workl
descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
descriptor.m_StrideX = descriptor.m_StrideY = 2;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+ descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- auto input = MakeTensor<T, 4>(inputTensorInfo,
+ armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+
+ std::vector<T> inputData(
QuantizedVector<T>(qScale, qOffset, {
- 1.0f, 7.0f, 1.0f, 7.0f,
- 1.0f, 7.0f, 1.0f, 7.0f,
- 1.0f, 7.0f, 1.0f, 7.0f,
- 1.0f, 7.0f, 1.0f, 7.0f,
+ 1.0f, 7.0f, 5.0f, 5.0f,
+ 1.0f, 7.0f, 5.0f, 5.0f,
+ 3.0f, 3.0f, 1.0f, 1.0f,
+ 3.0f, 3.0f, 1.0f, 1.0f,
+
+ 1.0f, 7.0f, 0.0f, 0.0f,
+ 1.0f, 7.0f, 2.0f, 0.0f,
+ 0.0f, 2.0f, 1.0f, 1.0f,
+ 0.0f, 0.0f, 1.0f, 1.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
- auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+ std::vector<T> outputData(
QuantizedVector<T>(qScale, qOffset, {
5.0f, 5.0f,
- 5.0f, 5.0f,
+ 3.0f, 1.0f,
+
+ 5.0f, 1.0f,
+ 1.0f, 1.0f,
}));
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+ {
+ std::vector<T> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+ inputData = tmp;
+
+ std::vector<T> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+ outputData = tmp1;
+ }
+
+ auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+ auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
}