aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2018-11-01 11:33:09 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2018-11-01 15:45:50 +0000
commit6b9658239d377372523fe49c71fde31701d986e3 (patch)
tree0b14b3e81a1321c8e5e85d2800a6e969d6cb724f
parentb9c8963c3d393baf27edf37ab732fa76ee53af50 (diff)
downloadarmnn-6b9658239d377372523fe49c71fde31701d986e3.tar.gz
IVGCVSW-2103: Add 2-Channel unit tests ResizeBilinear
* Modifies ResizeBilinear unit tests to use 2-Channel tensor shapes for input and output data, to improve test coverage when exercising NHWC data layout. * Refactors unit tests to permute input and output data when exercising NHWC data layout. Change-Id: Ib7fb438cac23e78ff0104c895c3b7596bf7c3aa7
-rwxr-xr-xsrc/backends/cl/test/ClLayerTests.cpp20
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp20
-rwxr-xr-xsrc/backends/test/LayerTests.cpp303
-rw-r--r--src/backends/test/LayerTests.hpp22
4 files changed, 179 insertions, 186 deletions
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 4e7f785bbb..3ff3c093be 100755
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -231,18 +231,18 @@ ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest)
ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest)
// Resize Bilinear - NCHW data layout
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW)
// Resize Bilinear - NHWC data layout
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC)
// Constant
ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 00fba20c5d..d481b827e0 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -246,23 +246,23 @@ ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test)
ARMNN_AUTO_TEST_CASE(BatchNormUint8Nhwc, BatchNormUint8NhwcTest)
// Resize Bilinear - NCHW
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test)
// Resize Bilinear - NHWC
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC)
// Fake Quantization
ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest)
diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp
index 3a4e95ce94..b5fd629d66 100755
--- a/src/backends/test/LayerTests.cpp
+++ b/src/backends/test/LayerTests.cpp
@@ -3041,19 +3041,33 @@ LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadF
return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
}
-LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
- const armnn::TensorShape& inputOutputTensorShape,
- armnn::DataLayout dataLayout)
+LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
+ const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+
+ std::vector<float> inputData({
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f,
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
1.0f, 2.0f, 3.0f, 4.0f,
2.0f, 3.0f, 4.0f, 5.0f,
3.0f, 4.0f, 5.0f, 6.0f,
4.0f, 5.0f, 6.0f, 7.0f
- }));
+ });
+
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+ inputData = tmp;
+ }
+
+ auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
LayerTestResult<float, 4> result(outputTensorInfo);
result.outputExpected = input;
@@ -3080,44 +3094,48 @@ LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& wor
return result;
}
-LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- // BatchSize = 1, Channels = 1, Height = 4, Width = 4
- const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
+ const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
- return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
-}
+ std::vector<float> inputData({
+ 1.0f, 255.0f,
+ 200.0f, 250.0f,
-LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
-{
- // BatchSize = 1, Height = 4, Width = 4, Channels = 1
- const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
+ 250.0f, 200.0f,
+ 250.0f, 1.0f
+ });
- return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
-}
+ // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
+ // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+ // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
+ // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
+ // which we would expect if projecting the centre).
-LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
- const armnn::TensorShape& inputTensorShape,
- const armnn::TensorShape& outputTensorShape,
- armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
+ std::vector<float> outputData({
+ 1.0f,
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f, 255.0f,
- 200.0f, 250.0f
- }));
+ 250.0f
+ });
+
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+ inputData = tmp;
+
+ std::vector<float> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+ outputData = tmp1;
+ }
+
+ auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
- // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
- // then figures out the interpolants and weights. Note this is different to projecting the centre of the
- // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
- // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
- // the centre).
LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
- 1.0f
- }));
+ result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -3141,48 +3159,48 @@ LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory&
return result;
}
-LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
- const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
+ const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
- // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
- const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
+ std::vector<float> inputData({
+ 1.0f, 2.0f, 3.0f, 4.0f,
+ 2.0f, 3.0f, 4.0f, 5.0f,
+ 3.0f, 4.0f, 5.0f, 6.0f,
+ 4.0f, 5.0f, 6.0f, 7.0f,
- return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
-}
+ 7.0f, 6.0f, 5.0f, 4.0f,
+ 6.0f, 5.0f, 4.0f, 3.0f,
+ 5.0f, 4.0f, 3.0f, 2.0f,
+ 4.0f, 3.0f, 2.0f, 1.0f
+ });
-LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
-{
- // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
- const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
+ std::vector<float> outputData({
+ 1.0f, 3.0f,
+ 3.0f, 5.0f,
- // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
- const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
+ 7.0f, 5.0f,
+ 5.0f, 3.0f
+ });
- return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
-}
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+ inputData = tmp;
-LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
- const armnn::TensorShape& inputTensorShape,
- const armnn::TensorShape& outputTensorShape,
- armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
+ std::vector<float> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+ outputData = tmp1;
+ }
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f, 2.0f, 3.0f, 4.0f,
- 2.0f, 3.0f, 4.0f, 5.0f,
- 3.0f, 4.0f, 5.0f, 6.0f,
- 4.0f, 5.0f, 6.0f, 7.0f
- }));
+ auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
- 1.0f, 3.0f,
- 3.0f, 5.0f
- }));
+ result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -3206,47 +3224,46 @@ LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& w
return result;
}
-LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
- const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
+ const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
- // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
- const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
+ std::vector<float> inputData({
+ 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
+ 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
+ 144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
- return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
-}
+ 987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
+ 89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
+ 8.0f, 5.0f, 3.0f, 2.0f, 1.0f
+ });
-LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
-{
- // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
- const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
+ std::vector<float> outputData({
+ 1.0f, 2.6666f, 6.00f,
+ 78.5f, 179.3333f, 401.00f,
- // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
- const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
+ 987.0f, 454.6670f, 203.33f,
+ 48.5f, 22.3333f, 10.00f
+ });
- return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
-}
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+ inputData = tmp;
-LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
- const armnn::TensorShape& inputTensorShape,
- const armnn::TensorShape& outputTensorShape,
- armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
+ std::vector<float> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+ outputData = tmp1;
+ }
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
- 13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
- 144.0f, 233.0f, 377.0f, 610.0f, 987.0f
- }));
+ auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
- 1.0f, 2.6666f, 6.0f,
- 78.5f, 179.3333f, 401.0f
- }));
+ result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -3270,48 +3287,48 @@ LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& wor
return result;
}
-LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout)
{
- // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
- const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
+ const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
- // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
- const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
+ std::vector<float> inputData({
+ 1.0f, 2.0f,
+ 13.0f, 21.0f,
+ 144.0f, 233.0f,
- return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
-}
+ 233.0f, 144.0f,
+ 21.0f, 13.0f,
+ 2.0f, 1.0f
+ });
-LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
-{
- // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
- const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
+ std::vector<float> outputData({
+ 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
+ 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
+ 144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
- // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
- const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
+ 233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
+ 21.0f, 17.8f, 14.6f, 13.0f, 13.0f,
+ 2.0f, 1.6f, 1.2f, 1.0f, 1.0f
+ });
- return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
-}
+ const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+ if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+ {
+ std::vector<float> tmp(inputData.size());
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+ inputData = tmp;
-LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
- const armnn::TensorShape& inputTensorShape,
- const armnn::TensorShape& outputTensorShape,
- armnn::DataLayout dataLayout)
-{
- const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
+ std::vector<float> tmp1(outputData.size());
+ armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+ outputData = tmp1;
+ }
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f, 2.0f,
- 13.0f, 21.0f,
- 144.0f, 233.0f
- }));
+ auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
- 1.0f, 1.4f, 1.8f, 2.0f, 2.0f,
- 13.0f, 16.2f, 19.4f, 21.0f, 21.0f,
- 144.0f, 179.6f, 215.2f, 233.0f, 233.0f
- }));
+ result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -3335,28 +3352,6 @@ LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& wor
return result;
}
-LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
-{
- // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
- const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
-
- // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
- const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
-
- return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
-}
-
-LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
-{
- // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
- const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
-
- // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
- const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
-
- return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
-}
-
LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
{
constexpr unsigned int width = 2;
diff --git a/src/backends/test/LayerTests.hpp b/src/backends/test/LayerTests.hpp
index 39cd4c4a0b..26dec60e0b 100644
--- a/src/backends/test/LayerTests.hpp
+++ b/src/backends/test/LayerTests.hpp
@@ -253,27 +253,25 @@ LayerTestResult<float, 4> CompareBoundedReLuTest(armnn::IWorkloadFactory& worklo
float lowerBound);
// Tests that the output should be identical to the input when the output dimensions match the input ones.
-LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
// Tests the behaviour of the resize bilinear operation when rescaling a 2x2 image into a 1x1 image.
-LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
// Tests the resize bilinear for minification of a square input matrix (also: input dimensions are a
// multiple of output dimensions).
-LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
// Tests the resize bilinear for minification (output dimensions smaller than input dimensions).
-LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
// Tests the resize bilinear for magnification (output dimensions bigger than input dimensions).
-LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory);
-
-// Tests that execute Resize Bilinear with NHWC data layout
-LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory);
-LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::DataLayoutIndexed& dataLayout);
LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory);