From 6b9658239d377372523fe49c71fde31701d986e3 Mon Sep 17 00:00:00 2001 From: James Conroy Date: Thu, 1 Nov 2018 11:33:09 +0000 Subject: IVGCVSW-2103: Add 2-Channel unit tests ResizeBilinear * Modifies ResizeBilinear unit tests to use 2-Channel tensor shapes for input and output data, to improve test coverage when exercising NHWC data layout. * Refactors unit tests to permute input and output data when exercising NHWC data layout. Change-Id: Ib7fb438cac23e78ff0104c895c3b7596bf7c3aa7 --- src/backends/cl/test/ClLayerTests.cpp | 20 +- src/backends/reference/test/RefLayerTests.cpp | 20 +- src/backends/test/LayerTests.cpp | 303 +++++++++++++------------- src/backends/test/LayerTests.hpp | 22 +- 4 files changed, 179 insertions(+), 186 deletions(-) diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 4e7f785bbb..3ff3c093be 100755 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -231,18 +231,18 @@ ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest) ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest) // Resize Bilinear - NCHW data layout -ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW) // Resize Bilinear - NHWC data layout -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC) // Constant ARMNN_AUTO_TEST_CASE(Constant, ConstantTest) diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 00fba20c5d..d481b827e0 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -246,23 +246,23 @@ ARMNN_AUTO_TEST_CASE(BatchNormUint8, BatchNormUint8Test) ARMNN_AUTO_TEST_CASE(BatchNormUint8Nhwc, BatchNormUint8NhwcTest) // Resize Bilinear - NCHW -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW) ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test) // Resize Bilinear - NHWC -ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest) -ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest) -ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest) +ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC) // Fake Quantization ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest) diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp index 3a4e95ce94..b5fd629d66 100755 --- a/src/backends/test/LayerTests.cpp +++ b/src/backends/test/LayerTests.cpp @@ -3041,19 +3041,33 @@ LayerTestResult Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadF return Concatenation3dDim2DiffInputDimsTestImpl(workloadFactory, 0.0f, 0); } -LayerTestResult ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory, - const armnn::TensorShape& inputOutputTensorShape, - armnn::DataLayout dataLayout) +LayerTestResult ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32); + const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); + const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); + + std::vector inputData({ + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f, - auto input = MakeTensor(inputTensorInfo, std::vector({ 1.0f, 2.0f, 3.0f, 4.0f, 2.0f, 3.0f, 4.0f, 5.0f, 3.0f, 4.0f, 5.0f, 6.0f, 4.0f, 5.0f, 6.0f, 7.0f - })); + }); + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); + inputData = tmp; + } + + auto input = MakeTensor(inputTensorInfo, inputData); LayerTestResult result(outputTensorInfo); result.outputExpected = input; @@ -3080,44 +3094,48 @@ LayerTestResult ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& wor return result; } -LayerTestResult ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - // BatchSize = 1, Channels = 1, Height = 4, Width = 4 - const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 }; + const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); + const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 1, 1, dataLayout); - return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW); -} + std::vector inputData({ + 1.0f, 255.0f, + 200.0f, 250.0f, -LayerTestResult ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory) -{ - // BatchSize = 1, Height = 4, Width = 4, Channels = 1 - const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 }; + 250.0f, 200.0f, + 250.0f, 1.0f + }); - return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC); -} + // The 'resize bilinear' operation projects the top-left corner of output texels into the input image, + // then figures out the interpolants and weights. Note this is different to projecting the centre of the + // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as + // its single element, the value that was at position (0,0) of the input matrix (rather than an average, + // which we would expect if projecting the centre). -LayerTestResult SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory, - const armnn::TensorShape& inputTensorShape, - const armnn::TensorShape& outputTensorShape, - armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32); + std::vector outputData({ + 1.0f, - auto input = MakeTensor(inputTensorInfo, std::vector({ - 1.0f, 255.0f, - 200.0f, 250.0f - })); + 250.0f + }); + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data()); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, inputData); - // The 'resize bilinear' operation projects the top-left corner of output texels into the input image, - // then figures out the interpolants and weights. Note this is different to projecting the centre of the - // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value - // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting - // the centre). LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector({ - 1.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, outputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -3141,48 +3159,48 @@ LayerTestResult SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& return result; } -LayerTestResult SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2 - const armnn::TensorShape inputShape{ 1, 1, 2, 2 }; + const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 4, 4, dataLayout); + const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 2, dataLayout); - // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1 - const armnn::TensorShape outputShape{ 1, 1, 1, 1 }; + std::vector inputData({ + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f, - return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW); -} + 7.0f, 6.0f, 5.0f, 4.0f, + 6.0f, 5.0f, 4.0f, 3.0f, + 5.0f, 4.0f, 3.0f, 2.0f, + 4.0f, 3.0f, 2.0f, 1.0f + }); -LayerTestResult SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory) -{ - // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1 - const armnn::TensorShape inputShape{ 1, 2, 2, 1 }; + std::vector outputData({ + 1.0f, 3.0f, + 3.0f, 5.0f, - // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1 - const armnn::TensorShape outputShape{ 1, 1, 1, 1 }; + 7.0f, 5.0f, + 5.0f, 3.0f + }); - return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC); -} + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); + inputData = tmp; -LayerTestResult ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory, - const armnn::TensorShape& inputTensorShape, - const armnn::TensorShape& outputTensorShape, - armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32); + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data()); + outputData = tmp1; + } - auto input = MakeTensor(inputTensorInfo, std::vector({ - 1.0f, 2.0f, 3.0f, 4.0f, - 2.0f, 3.0f, 4.0f, 5.0f, - 3.0f, 4.0f, 5.0f, 6.0f, - 4.0f, 5.0f, 6.0f, 7.0f - })); + auto input = MakeTensor(inputTensorInfo, inputData); LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector({ - 1.0f, 3.0f, - 3.0f, 5.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, outputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -3206,47 +3224,46 @@ LayerTestResult ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& w return result; } -LayerTestResult ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4 - const armnn::TensorShape inputShape{ 1, 1, 4, 4 }; + const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 3, 5, dataLayout); + const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 2, 3, dataLayout); - // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2 - const armnn::TensorShape outputShape{ 1, 1, 2, 2 }; + std::vector inputData({ + 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, + 13.0f, 21.0f, 34.0f, 55.0f, 89.0f, + 144.0f, 233.0f, 377.0f, 610.0f, 987.0f, - return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW); -} + 987.0f, 610.0f, 377.0f, 233.0f, 144.0f, + 89.0f, 55.0f, 34.0f, 21.0f, 13.0f, + 8.0f, 5.0f, 3.0f, 2.0f, 1.0f + }); -LayerTestResult ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory) -{ - // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1 - const armnn::TensorShape inputShape{ 1, 4, 4, 1 }; + std::vector outputData({ + 1.0f, 2.6666f, 6.00f, + 78.5f, 179.3333f, 401.00f, - // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1 - const armnn::TensorShape outputShape{ 1, 2, 2, 1 }; + 987.0f, 454.6670f, 203.33f, + 48.5f, 22.3333f, 10.00f + }); - return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC); -} + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); + inputData = tmp; -LayerTestResult ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory, - const armnn::TensorShape& inputTensorShape, - const armnn::TensorShape& outputTensorShape, - armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32); + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data()); + outputData = tmp1; + } - auto input = MakeTensor(inputTensorInfo, std::vector({ - 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, - 13.0f, 21.0f, 34.0f, 55.0f, 89.0f, - 144.0f, 233.0f, 377.0f, 610.0f, 987.0f - })); + auto input = MakeTensor(inputTensorInfo, inputData); LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector({ - 1.0f, 2.6666f, 6.0f, - 78.5f, 179.3333f, 401.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, outputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -3270,48 +3287,48 @@ LayerTestResult ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& wor return result; } -LayerTestResult ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory) +LayerTestResult ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout) { - // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5 - const armnn::TensorShape inputShape{ 1, 1, 3, 5 }; + const armnn::TensorInfo inputTensorInfo = GetTensorInfo(1, 2, 3, 2, dataLayout); + const armnn::TensorInfo outputTensorInfo = GetTensorInfo(1, 2, 3, 5, dataLayout); - // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3 - const armnn::TensorShape outputShape{ 1, 1, 2, 3 }; + std::vector inputData({ + 1.0f, 2.0f, + 13.0f, 21.0f, + 144.0f, 233.0f, - return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW); -} + 233.0f, 144.0f, + 21.0f, 13.0f, + 2.0f, 1.0f + }); -LayerTestResult ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory) -{ - // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1 - const armnn::TensorShape inputShape{ 1, 3, 5, 1 }; + std::vector outputData({ + 1.0f, 1.4f, 1.8f, 2.0f, 2.0f, + 13.0f, 16.2f, 19.4f, 21.0f, 21.0f, + 144.0f, 179.6f, 215.2f, 233.0f, 233.0f, - // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1 - const armnn::TensorShape outputShape{ 1, 2, 3, 1 }; + 233.0f, 197.4f, 161.8f, 144.0f, 144.0f, + 21.0f, 17.8f, 14.6f, 13.0f, 13.0f, + 2.0f, 1.6f, 1.2f, 1.0f, 1.0f + }); - return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC); -} + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data()); + inputData = tmp; -LayerTestResult ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory, - const armnn::TensorShape& inputTensorShape, - const armnn::TensorShape& outputTensorShape, - armnn::DataLayout dataLayout) -{ - const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32); + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data()); + outputData = tmp1; + } - auto input = MakeTensor(inputTensorInfo, std::vector({ - 1.0f, 2.0f, - 13.0f, 21.0f, - 144.0f, 233.0f - })); + auto input = MakeTensor(inputTensorInfo, inputData); LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, std::vector({ - 1.0f, 1.4f, 1.8f, 2.0f, 2.0f, - 13.0f, 16.2f, 19.4f, 21.0f, 21.0f, - 144.0f, 179.6f, 215.2f, 233.0f, 233.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, outputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -3335,28 +3352,6 @@ LayerTestResult ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& wor return result; } -LayerTestResult ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory) -{ - // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2 - const armnn::TensorShape inputShape{ 1, 1, 3, 2 }; - - // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5 - const armnn::TensorShape outputShape{ 1, 1, 3, 5 }; - - return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW); -} - -LayerTestResult ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory) -{ - // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1 - const armnn::TensorShape inputShape{ 1, 3, 2, 1 }; - - // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1 - const armnn::TensorShape outputShape{ 1, 3, 5, 1 }; - - return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC); -} - LayerTestResult FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory) { constexpr unsigned int width = 2; diff --git a/src/backends/test/LayerTests.hpp b/src/backends/test/LayerTests.hpp index 39cd4c4a0b..26dec60e0b 100644 --- a/src/backends/test/LayerTests.hpp +++ b/src/backends/test/LayerTests.hpp @@ -253,27 +253,25 @@ LayerTestResult CompareBoundedReLuTest(armnn::IWorkloadFactory& worklo float lowerBound); // Tests that the output should be identical to the input when the output dimensions match the input ones. -LayerTestResult ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); // Tests the behaviour of the resize bilinear operation when rescaling a 2x2 image into a 1x1 image. -LayerTestResult SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); // Tests the resize bilinear for minification of a square input matrix (also: input dimensions are a // multiple of output dimensions). -LayerTestResult ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); // Tests the resize bilinear for minification (output dimensions smaller than input dimensions). -LayerTestResult ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); // Tests the resize bilinear for magnification (output dimensions bigger than input dimensions). -LayerTestResult ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory); - -// Tests that execute Resize Bilinear with NHWC data layout -LayerTestResult ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory); -LayerTestResult ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::DataLayoutIndexed& dataLayout); LayerTestResult BatchNormTest(armnn::IWorkloadFactory& workloadFactory); LayerTestResult BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory); -- cgit v1.2.1