From 7420e55aefe545452639992ab1972fd355a9ed30 Mon Sep 17 00:00:00 2001 From: Mohamed Nour Abouelseoud Date: Fri, 12 Oct 2018 12:26:24 +0100 Subject: IVGCVSW-1885 add RefPadWorkload implementation and associated unit tests * Added RefPadWorkload implementation * Added unit tests and applied them to CL and Ref backends * Fixed a bug in ClPadWorkload Change-Id: I8cb76bc9d60ae8a39b08d40f05d628e3b72f6410 --- src/backends/test/LayerTests.cpp | 402 +++++++++++++++++++++++++++++++++++++++ src/backends/test/LayerTests.hpp | 5 + 2 files changed, 407 insertions(+) (limited to 'src/backends/test') diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp index d955e42c36..c28a1d46ad 100755 --- a/src/backends/test/LayerTests.cpp +++ b/src/backends/test/LayerTests.cpp @@ -3443,6 +3443,408 @@ float CalcInvL2Norm(std::initializer_list elements) } // anonymous namespace +LayerTestResult Pad2dTest(armnn::IWorkloadFactory& workloadFactory) +{ + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 7, 7 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); + + + std::vector inputValues + { + + // Height (3) x Width (3) + 4.0f, 8.0f, 6.0f, + 7.0f, 4.0f, 4.0f, + 3.0f, 2.0f, 4.0f + + }; + + std::vector expectedOutputValues + { + + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 4.0f, 8.0f, 6.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 7.0f, 4.0f, 4.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 3.0f, 2.0f, 4.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + + }; + + auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, std::vector(expectedOutputValues)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + + std::vector> PadList; + PadList.push_back(std::pair(2,2)); + PadList.push_back(std::pair(2,2)); + + descriptor.m_Parameters.m_PadList = PadList; + armnn::WorkloadInfo info; + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); + + workloadFactory.Finalize(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get()); + + return result; +}; + +LayerTestResult Pad3dTest(armnn::IWorkloadFactory& workloadFactory) +{ + const armnn::TensorShape inputShape{ 2, 2, 2 }; + const armnn::TensorShape outputShape{ 3, 5, 6 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); + + + std::vector inputValues + { + + // Channel 0, Height (2) x Width (2) + 0.0f, 4.0f, + 2.0f, 5.0f, + + // Channel 1, Height (2) x Width (2) + 6.0f, 1.0f, + 5.0f, 2.0f + }; + + std::vector expectedOutputValues + { + + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 4.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 2.0f, 5.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 6.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 5.0f, 2.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + + }; + + auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, std::vector(expectedOutputValues)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + + std::vector> PadList; + PadList.push_back(std::pair(0,1)); + PadList.push_back(std::pair(2,1)); + PadList.push_back(std::pair(2,2)); + + descriptor.m_Parameters.m_PadList = PadList; + armnn::WorkloadInfo info; + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); + + workloadFactory.Finalize(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); + + return result; +}; + +LayerTestResult Pad4dTest(armnn::IWorkloadFactory& workloadFactory) +{ + const armnn::TensorShape inputShape{ 2, 2, 3, 2 }; + const armnn::TensorShape outputShape{ 4, 5, 7, 4 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); + + std::vector inputValues + { + // Batch 0, Channel 0, Height (3) x Width (2) + 0.0f, 1.0f, + 2.0f, 3.0f, + 4.0f, 5.0f, + + // Batch 0, Channel 1, Height (3) x Width (2) + 6.0f, 7.0f, + 8.0f, 9.0f, + 10.0f, 11.0f, + + // Batch 1, Channel 0, Height (3) x Width (2) + 12.0f, 13.0f, + 14.0f, 15.0f, + 16.0f, 17.0f, + + // Batch 1, Channel 1, Height (3) x Width (2) + 18.0f, 19.0f, + 20.0f, 21.0f, + 22.0f, 23.0f + + }; + + std::vector expectedOutputValues + { + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 2.0f, 3.0f, 0.0f, + 0.0f, 4.0f, 5.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 6.0f, 7.0f, 0.0f, + 0.0f, 8.0f, 9.0f, 0.0f, + 0.0f, 10.0f, 11.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 12.0f, 13.0f, 0.0f, + 0.0f, 14.0f, 15.0f, 0.0f, + 0.0f, 16.0f, 17.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 18.0f, 19.0f, 0.0f, + 0.0f, 20.0f, 21.0f, 0.0f, + 0.0f, 22.0f, 23.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + + + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f + + }; + + auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, std::vector(expectedOutputValues)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + + std::vector> PadList; + PadList.push_back(std::pair(1,1)); + PadList.push_back(std::pair(2,1)); + PadList.push_back(std::pair(3,1)); + PadList.push_back(std::pair(1,1)); + + descriptor.m_Parameters.m_PadList = PadList; + armnn::WorkloadInfo info; + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + + workloadFactory.Finalize(); + + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + return result; +}; + LayerTestResult L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory) { // Width: 1 diff --git a/src/backends/test/LayerTests.hpp b/src/backends/test/LayerTests.hpp index 6687439ddf..d9d4fb909e 100644 --- a/src/backends/test/LayerTests.hpp +++ b/src/backends/test/LayerTests.hpp @@ -350,6 +350,11 @@ LayerTestResult FullyConnectedLargeTest(armnn::IWorkloadFactory& workl LayerTestResult SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult Pad2dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult Pad3dTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult Pad4dTest(armnn::IWorkloadFactory& workloadFactory); + + LayerTestResult PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory); -- cgit v1.2.1