From 2e5d0b2e2a212ceb803681b717cbaf821f5e0929 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 21 Oct 2021 14:05:31 +0100 Subject: IVGCVSW-6469 Add MirrorPad FrontEnd and Ref Support * Added PaddingMode enum to PaddingDescriptor to enable Symmetric and Reflect padding. * Added Symmetric and Reflect Ref implementation. * Added Serializer & Deserializer support. * Added unit tests. Signed-off-by: Matthew Sloyan Change-Id: I4bed907b31742b32ccefe5e8ca39a6f1e5bd9dee --- .../test/layerTests/MirrorPadTestImpl.cpp | 1091 ++++++++++++++++++++ .../test/layerTests/MirrorPadTestImpl.hpp | 117 +++ 2 files changed, 1208 insertions(+) create mode 100644 src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp (limited to 'src/backends/backendsCommon/test/layerTests') diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp new file mode 100644 index 0000000000..61899db00e --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp @@ -0,0 +1,1091 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "MirrorPadTestImpl.hpp" + +#include + +#include +#include + +#include + +// +// Implementation templates +// + +template +LayerTestResult MirrorPad2dTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector& inputValues, + const std::vector& expectedOutputValues, + const std::vector>& padList, + const armnn::PaddingMode paddingMode) +{ + IgnoreUnused(memoryManager); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + + descriptor.m_Parameters.m_PadList = padList; + descriptor.m_Parameters.m_PaddingMode = paddingMode; + armnn::WorkloadInfo info; + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + +template +LayerTestResult MirrorPad3dTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector& inputValues, + const std::vector& expectedOutputValues, + const std::vector>& padList, + const armnn::PaddingMode paddingMode) +{ + IgnoreUnused(memoryManager); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + descriptor.m_Parameters.m_PadList = padList; + descriptor.m_Parameters.m_PaddingMode = paddingMode; + + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + +template +LayerTestResult MirrorPad4dTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector& inputValues, + const std::vector& expectedOutputValues, + const std::vector>& padList, + const armnn::PaddingMode paddingMode) +{ + IgnoreUnused(memoryManager); + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::PadQueueDescriptor descriptor; + descriptor.m_Parameters.m_PadList = padList; + descriptor.m_Parameters.m_PaddingMode = paddingMode; + + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreatePad(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + expectedOutputValues, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + +template> +LayerTestResult PadSymmetric2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 7, 7 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Height (3) x Width (3) + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 5, 4, 4, 5, 6, 6, 5, + 2, 1, 1, 2, 3, 3, 2, + 2, 1, 1, 2, 3, 3, 2, + 5, 4, 4, 5, 6, 6, 5, + 8, 7, 7, 8, 9, 9, 8, + 8, 7, 7, 8, 9, 9, 8, + 5, 4, 4, 5, 6, 6, 5 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + + return MirrorPad2dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Symmetric); +} + +template> +LayerTestResult PadReflect2dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 7, 7 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Height (3) x Width (3) + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 9, 8, 7, 8, 9, 8, 7, + 6, 5, 4, 5, 6, 5, 4, + 3, 2, 1, 2, 3, 2, 1, + 6, 5, 4, 5, 6, 5, 4, + 9, 8, 7, 8, 9, 8, 7, + 6, 5, 4, 5, 6, 5, 4, + 3, 2, 1, 2, 3, 2, 1 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + + return MirrorPad2dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Reflect); +} + +template> +LayerTestResult PadSymmetric3dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 2, 2, 2 }; + const armnn::TensorShape outputShape{ 4, 4, 4 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Channel 0, Height (2) x Width (2) + 1, 2, + 3, 4, + + // Channel 1, Height (2) x Width (2) + 5, 6, + 7, 8 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 1, 1, 2, 2, + 1, 1, 2, 2, + 3, 3, 4, 4, + 3, 3, 4, 4, + + 1, 1, 2, 2, + 1, 1, 2, 2, + 3, 3, 4, 4, + 3, 3, 4, 4, + + 5, 5, 6, 6, + 5, 5, 6, 6, + 7, 7, 8, 8, + 7, 7, 8, 8, + + 5, 5, 6, 6, + 5, 5, 6, 6, + 7, 7, 8, 8, + 7, 7, 8, 8 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + + return MirrorPad3dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Symmetric); +} + +template> +LayerTestResult PadReflect3dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 2, 2, 2 }; + const armnn::TensorShape outputShape{ 4, 4, 4 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Channel 0, Height (2) x Width (2) + 1, 2, + 3, 4, + + // Channel 1, Height (2) x Width (2) + 5, 6, + 7, 8 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + + return MirrorPad3dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Reflect); +} + +template> +LayerTestResult PadSymmetric4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 2, 2, 2, 2 }; + const armnn::TensorShape outputShape{ 6, 6, 6, 6 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Batch 0, Channel 0, Height (2) x Width (2) + 1, 2, + 3, 4, + + // Batch 0, Channel 1, Height (2) x Width (2) + 5, 6, + 7, 8, + + // Batch 1, Channel 0, Height (2) x Width (2) + 9, 10, + 11, 12, + + // Batch 1, Channel 1, Height (2) x Width (2) + 13, 14, + 15, 16, + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + 14, 13, 13, 14, 14, 13, + 16, 15, 15, 16, 16, 15, + 16, 15, 15, 16, 16, 15, + 14, 13, 13, 14, 14, 13, + + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + 10, 9, 9, 10, 10, 9, + 12, 11, 11, 12, 12, 11, + 12, 11, 11, 12, 12, 11, + 10, 9, 9, 10, 10, 9, + + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + 6, 5, 5, 6, 6, 5, + 8, 7, 7, 8, 8, 7, + 8, 7, 7, 8, 8, 7, + 6, 5, 5, 6, 6, 5, + + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1, + 2, 1, 1, 2, 2, 1, + 4, 3, 3, 4, 4, 3, + 4, 3, 3, 4, 4, 3, + 2, 1, 1, 2, 2, 1 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(2,2)); + + return MirrorPad4dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Symmetric); +} + +template> +LayerTestResult PadReflect4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + float qScale, + int32_t qOffset) +{ + const armnn::TensorShape inputShape{ 2, 2, 2, 2 }; + const armnn::TensorShape outputShape{ 4, 4, 4, 4 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); + + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Batch 0, Channel 0, Height (2) x Width (2) + 1, 2, + 3, 4, + + // Batch 0, Channel 1, Height (2) x Width (2) + 5, 6, + 7, 8, + + // Batch 1, Channel 0, Height (2) x Width (2) + 9, 10, + 11, 12, + + // Batch 1, Channel 1, Height (2) x Width (2) + 13, 14, + 15, 16, + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 16, 15, 16, 15, + 14, 13, 14, 13, + 16, 15, 16, 15, + 14, 13, 14, 13, + + 12, 11, 12, 11, + 10, 9, 10, 9, + 12, 11, 12, 11, + 10, 9, 10, 9, + + 16, 15, 16, 15, + 14, 13, 14, 13, + 16, 15, 16, 15, + 14, 13, 14, 13, + + 12, 11, 12, 11, + 10, 9, 10, 9, + 12, 11, 12, 11, + 10, 9, 10, 9, + + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + + 16, 15, 16, 15, + 14, 13, 14, 13, + 16, 15, 16, 15, + 14, 13, 14, 13, + + 12, 11, 12, 11, + 10, 9, 10, 9, + 12, 11, 12, 11, + 10, 9, 10, 9, + + 16, 15, 16, 15, + 14, 13, 14, 13, + 16, 15, 16, 15, + 14, 13, 14, 13, + + 12, 11, 12, 11, + 10, 9, 10, 9, + 12, 11, 12, 11, + 10, 9, 10, 9, + + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1, + + 8, 7, 8, 7, + 6, 5, 6, 5, + 8, 7, 8, 7, + 6, 5, 6, 5, + + 4, 3, 4, 3, + 2, 1, 2, 1, + 4, 3, 4, 3, + 2, 1, 2, 1 + }, + qScale, qOffset); + + std::vector> padList; + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(1,1)); + + return MirrorPad4dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Reflect); +} + +LayerTestResult PadSymmetricFloat16( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + using namespace half_float::literal; + + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 5, 7 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float16); + const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float16); + + const std::vector inputValues = + { + 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h + }; + + std::vector expectedOutputValues = + { + 2._h, 1._h, 1._h, 2._h, 3._h, 3._h, 2._h, + 2._h, 1._h, 1._h, 2._h, 3._h, 3._h, 2._h, + 5._h, 4._h, 4._h, 5._h, 6._h, 6._h, 5._h, + 8._h, 7._h, 7._h, 8._h, 9._h, 9._h, 8._h, + 8._h, 7._h, 7._h, 8._h, 9._h, 9._h, 8._h, + }; + + std::vector> padList; + padList.push_back(std::pair(1,1)); + padList.push_back(std::pair(2,2)); + + return MirrorPad2dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Symmetric); +} + +LayerTestResult PadReflectFloat16( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + using namespace half_float::literal; + + const armnn::TensorShape inputShape{ 3, 3 }; + const armnn::TensorShape outputShape{ 7, 5 }; + + const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float16); + const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float16); + + const std::vector inputValues = + { + 1._h, 2._h, 3._h, + 4._h, 5._h, 6._h, + 7._h, 8._h, 9._h + }; + + std::vector expectedOutputValues = + { + 8._h, 7._h, 8._h, 9._h, 8._h, + 5._h, 4._h, 5._h, 6._h, 5._h, + 2._h, 1._h, 2._h, 3._h, 2._h, + 5._h, 4._h, 5._h, 6._h, 5._h, + 8._h, 7._h, 8._h, 9._h, 8._h, + 5._h, 4._h, 5._h, 6._h, 5._h, + 2._h, 1._h, 2._h, 3._h, 2._h, + }; + + std::vector> padList; + padList.push_back(std::pair(2,2)); + padList.push_back(std::pair(1,1)); + + return MirrorPad2dTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + expectedOutputValues, + padList, + armnn::PaddingMode::Reflect); +} + +// +// Implementation functions +// + +LayerTestResult PadSymmetric2dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric2dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadReflect2dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect2dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadSymmetric3dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadReflect3dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadSymmetric3dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric3dTest( + workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128); +} + +LayerTestResult PadReflect3dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128); +} + +LayerTestResult PadSymmetric3dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64); +} + +LayerTestResult PadReflect3dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect3dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64); +} + +LayerTestResult PadSymmetric4dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadReflect4dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadSymmetric4dBFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadReflect4dBFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); +} + +LayerTestResult PadSymmetric4dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest( + workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128); +} + +LayerTestResult PadReflect4dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128); +} + +LayerTestResult PadSymmetric4dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64); +} + +LayerTestResult PadReflect4dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64); +} + +LayerTestResult PadSymmetric4dInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetric4dTest(workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 0); +} + +LayerTestResult PadReflect4dInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflect4dTest(workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 0); +} + +LayerTestResult PadSymmetricFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadSymmetricFloat16(workloadFactory, memoryManager, tensorHandleFactory); +} + +LayerTestResult PadReflectFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + return PadReflectFloat16(workloadFactory, memoryManager, tensorHandleFactory); +} diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp new file mode 100644 index 0000000000..52898b820c --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp @@ -0,0 +1,117 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include + +#include + +#include + +#include +#include + +LayerTestResult PadSymmetric2dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect2dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric3dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect3dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric3dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect3dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric3dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect3dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dFloat32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dBFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dBFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dInt8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetric4dInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflect4dInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadSymmetricFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +LayerTestResult PadReflectFloat16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file -- cgit v1.2.1