aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.cpp6
-rw-r--r--src/backends/aclCommon/ArmComputeTensorUtils.hpp4
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp11
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp1091
-rw-r--r--src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp117
-rw-r--r--src/backends/cl/workloads/ClFillWorkload.cpp2
-rw-r--r--src/backends/cl/workloads/ClPadWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonFillWorkload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonPadWorkload.cpp2
-rw-r--r--src/backends/reference/backend.mk1
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp27
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt2
-rw-r--r--src/backends/reference/workloads/MirrorPad.cpp199
-rw-r--r--src/backends/reference/workloads/MirrorPad.hpp22
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp19
18 files changed, 1496 insertions, 15 deletions
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 62f3263a0c..8bbaea71b3 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -254,9 +254,9 @@ arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsign
return arm_compute::Size2D(width, height);
}
-arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue)
+arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue)
{
- switch (input.info()->data_type())
+ switch (tensorInfo->data_type())
{
case arm_compute::DataType::F16:
return arm_compute::PixelValue(static_cast<Half>(pixelValue));
@@ -273,7 +273,7 @@ arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelVa
return arm_compute::PixelValue(static_cast<int32_t>(pixelValue));
default:
throw InvalidArgumentException("Unsupported DataType: [" +
- std::to_string(static_cast<int>(input.info()->data_type())) + "]");
+ std::to_string(static_cast<int>(tensorInfo->data_type())) + "]");
}
}
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index ad5d4614fe..30df31b79d 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -65,8 +65,8 @@ arm_compute::PermutationVector BuildArmComputeTransposeVector(const armnn::Permu
/// Utility function used to setup an arm_compute::Size2D object from width and height values.
arm_compute::Size2D BuildArmComputeSize2D(const unsigned int width, const unsigned int height);
-/// Gets the appropriate PixelValue for the input DataType
-arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input, float pixelValue);
+/// Gets the appropriate PixelValue for the TensorInfo DataType
+arm_compute::PixelValue GetPixelValue(const arm_compute::ITensorInfo* tensorInfo, float pixelValue);
/// Utility function used to setup an arm_compute::PadStrideInfo object from an armnn layer descriptor.
template <typename Descriptor>
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 2f767891a1..f096346c38 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -300,6 +300,17 @@ inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dQueue
return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
}
+inline arm_compute::PaddingMode ConvertPaddingModeToAcl(const PaddingMode& paddingMode)
+{
+ switch (paddingMode)
+ {
+ case PaddingMode::Constant: return arm_compute::PaddingMode::CONSTANT;
+ case PaddingMode::Reflect: return arm_compute::PaddingMode::REFLECT;
+ case PaddingMode::Symmetric: return arm_compute::PaddingMode::SYMMETRIC;
+ default: throw InvalidArgumentException("Unsupported Padding Mode");
+ }
+}
+
inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor)
{
switch (descriptor.m_ReduceOperation)
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index f90a7c855e..a77ec06035 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -77,6 +77,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/LstmTestImpl.cpp \
test/layerTests/MaximumTestImpl.cpp \
test/layerTests/MinimumTestImpl.cpp \
+ test/layerTests/MirrorPadTestImpl.cpp \
test/layerTests/MultiplicationTestImpl.cpp \
test/layerTests/NegTestImpl.cpp \
test/layerTests/NormalizationTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 9272ae749c..cd62242421 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -129,6 +129,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/MeanTestImpl.hpp
layerTests/MinimumTestImpl.cpp
layerTests/MinimumTestImpl.hpp
+ layerTests/MirrorPadTestImpl.cpp
+ layerTests/MirrorPadTestImpl.hpp
layerTests/MultiplicationTestImpl.cpp
layerTests/MultiplicationTestImpl.hpp
layerTests/NegTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 0dcd3d1564..b51ff3357f 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -43,6 +43,7 @@
#include <backendsCommon/test/layerTests/MaximumTestImpl.hpp>
#include <backendsCommon/test/layerTests/MeanTestImpl.hpp>
#include <backendsCommon/test/layerTests/MinimumTestImpl.hpp>
+#include <backendsCommon/test/layerTests/MirrorPadTestImpl.hpp>
#include <backendsCommon/test/layerTests/MultiplicationTestImpl.hpp>
#include <backendsCommon/test/layerTests/NegTestImpl.hpp>
#include <backendsCommon/test/layerTests/NormalizationTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
new file mode 100644
index 0000000000..61899db00e
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.cpp
@@ -0,0 +1,1091 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MirrorPadTestImpl.hpp"
+
+#include <QuantizeHelper.hpp>
+
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+//
+// Implementation templates
+//
+
+template<typename T>
+LayerTestResult<T, 2> MirrorPad2dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const std::vector<T>& inputValues,
+ const std::vector<T>& expectedOutputValues,
+ const std::vector<std::pair<unsigned int, unsigned int>>& padList,
+ const armnn::PaddingMode paddingMode)
+{
+ IgnoreUnused(memoryManager);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::PadQueueDescriptor descriptor;
+
+ descriptor.m_Parameters.m_PadList = padList;
+ descriptor.m_Parameters.m_PaddingMode = paddingMode;
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
+
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 2>(actualOutput,
+ expectedOutputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
+}
+
+template<typename T>
+LayerTestResult<T, 3> MirrorPad3dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const std::vector<T>& inputValues,
+ const std::vector<T>& expectedOutputValues,
+ const std::vector<std::pair<unsigned int, unsigned int>>& padList,
+ const armnn::PaddingMode paddingMode)
+{
+ IgnoreUnused(memoryManager);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::PadQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_PadList = padList;
+ descriptor.m_Parameters.m_PaddingMode = paddingMode;
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
+
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 3>(actualOutput,
+ expectedOutputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
+}
+
+template<typename T>
+LayerTestResult<T, 4> MirrorPad4dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const std::vector<T>& inputValues,
+ const std::vector<T>& expectedOutputValues,
+ const std::vector<std::pair<unsigned int, unsigned int>>& padList,
+ const armnn::PaddingMode paddingMode)
+{
+ IgnoreUnused(memoryManager);
+ std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::PadQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_PadList = padList;
+ descriptor.m_Parameters.m_PaddingMode = paddingMode;
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputValues.data());
+
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<T, 4>(actualOutput,
+ expectedOutputValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape());
+}
+
+template<armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> PadSymmetric2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset)
+{
+ const armnn::TensorShape inputShape{ 3, 3 };
+ const armnn::TensorShape outputShape{ 7, 7 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+ {
+ // Height (3) x Width (3)
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9
+ },
+ qScale, qOffset);
+
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+ {
+ 5, 4, 4, 5, 6, 6, 5,
+ 2, 1, 1, 2, 3, 3, 2,
+ 2, 1, 1, 2, 3, 3, 2,
+ 5, 4, 4, 5, 6, 6, 5,
+ 8, 7, 7, 8, 9, 9, 8,
+ 8, 7, 7, 8, 9, 9, 8,
+ 5, 4, 4, 5, 6, 6, 5
+ },
+ qScale, qOffset);
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+ return MirrorPad2dTestCommon<T>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ padList,
+ armnn::PaddingMode::Symmetric);
+}
+
+template<armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> PadReflect2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset)
+{
+ const armnn::TensorShape inputShape{ 3, 3 };
+ const armnn::TensorShape outputShape{ 7, 7 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+ {
+ // Height (3) x Width (3)
+ 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9
+ },
+ qScale, qOffset);
+
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+ {
+ 9, 8, 7, 8, 9, 8, 7,
+ 6, 5, 4, 5, 6, 5, 4,
+ 3, 2, 1, 2, 3, 2, 1,
+ 6, 5, 4, 5, 6, 5, 4,
+ 9, 8, 7, 8, 9, 8, 7,
+ 6, 5, 4, 5, 6, 5, 4,
+ 3, 2, 1, 2, 3, 2, 1
+ },
+ qScale, qOffset);
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+ return MirrorPad2dTestCommon<T>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ padList,
+ armnn::PaddingMode::Reflect);
+}
+
+template<armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> PadSymmetric3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset)
+{
+ const armnn::TensorShape inputShape{ 2, 2, 2 };
+ const armnn::TensorShape outputShape{ 4, 4, 4 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+ {
+ // Channel 0, Height (2) x Width (2)
+ 1, 2,
+ 3, 4,
+
+ // Channel 1, Height (2) x Width (2)
+ 5, 6,
+ 7, 8
+ },
+ qScale, qOffset);
+
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+ {
+ 1, 1, 2, 2,
+ 1, 1, 2, 2,
+ 3, 3, 4, 4,
+ 3, 3, 4, 4,
+
+ 1, 1, 2, 2,
+ 1, 1, 2, 2,
+ 3, 3, 4, 4,
+ 3, 3, 4, 4,
+
+ 5, 5, 6, 6,
+ 5, 5, 6, 6,
+ 7, 7, 8, 8,
+ 7, 7, 8, 8,
+
+ 5, 5, 6, 6,
+ 5, 5, 6, 6,
+ 7, 7, 8, 8,
+ 7, 7, 8, 8
+ },
+ qScale, qOffset);
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+ return MirrorPad3dTestCommon<T>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ padList,
+ armnn::PaddingMode::Symmetric);
+}
+
+template<armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> PadReflect3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset)
+{
+ const armnn::TensorShape inputShape{ 2, 2, 2 };
+ const armnn::TensorShape outputShape{ 4, 4, 4 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+ {
+ // Channel 0, Height (2) x Width (2)
+ 1, 2,
+ 3, 4,
+
+ // Channel 1, Height (2) x Width (2)
+ 5, 6,
+ 7, 8
+ },
+ qScale, qOffset);
+
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+ {
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+ 4, 3, 4, 3,
+ 2, 1, 2, 1
+ },
+ qScale, qOffset);
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+ return MirrorPad3dTestCommon<T>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ padList,
+ armnn::PaddingMode::Reflect);
+}
+
+template<armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> PadSymmetric4dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset)
+{
+ const armnn::TensorShape inputShape{ 2, 2, 2, 2 };
+ const armnn::TensorShape outputShape{ 6, 6, 6, 6 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+ {
+ // Batch 0, Channel 0, Height (2) x Width (2)
+ 1, 2,
+ 3, 4,
+
+ // Batch 0, Channel 1, Height (2) x Width (2)
+ 5, 6,
+ 7, 8,
+
+ // Batch 1, Channel 0, Height (2) x Width (2)
+ 9, 10,
+ 11, 12,
+
+ // Batch 1, Channel 1, Height (2) x Width (2)
+ 13, 14,
+ 15, 16,
+ },
+ qScale, qOffset);
+
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+ {
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+
+
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+ 14, 13, 13, 14, 14, 13,
+ 16, 15, 15, 16, 16, 15,
+ 16, 15, 15, 16, 16, 15,
+ 14, 13, 13, 14, 14, 13,
+
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+ 10, 9, 9, 10, 10, 9,
+ 12, 11, 11, 12, 12, 11,
+ 12, 11, 11, 12, 12, 11,
+ 10, 9, 9, 10, 10, 9,
+
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+ 6, 5, 5, 6, 6, 5,
+ 8, 7, 7, 8, 8, 7,
+ 8, 7, 7, 8, 8, 7,
+ 6, 5, 5, 6, 6, 5,
+
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1,
+ 2, 1, 1, 2, 2, 1,
+ 4, 3, 3, 4, 4, 3,
+ 4, 3, 3, 4, 4, 3,
+ 2, 1, 1, 2, 2, 1
+ },
+ qScale, qOffset);
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+ return MirrorPad4dTestCommon<T>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ padList,
+ armnn::PaddingMode::Symmetric);
+}
+
+template<armnn::DataType ArmnnType,
+ typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> PadReflect4dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset)
+{
+ const armnn::TensorShape inputShape{ 2, 2, 2, 2 };
+ const armnn::TensorShape outputShape{ 4, 4, 4, 4 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+ {
+ // Batch 0, Channel 0, Height (2) x Width (2)
+ 1, 2,
+ 3, 4,
+
+ // Batch 0, Channel 1, Height (2) x Width (2)
+ 5, 6,
+ 7, 8,
+
+ // Batch 1, Channel 0, Height (2) x Width (2)
+ 9, 10,
+ 11, 12,
+
+ // Batch 1, Channel 1, Height (2) x Width (2)
+ 13, 14,
+ 15, 16,
+ },
+ qScale, qOffset);
+
+ std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+ {
+ 16, 15, 16, 15,
+ 14, 13, 14, 13,
+ 16, 15, 16, 15,
+ 14, 13, 14, 13,
+
+ 12, 11, 12, 11,
+ 10, 9, 10, 9,
+ 12, 11, 12, 11,
+ 10, 9, 10, 9,
+
+ 16, 15, 16, 15,
+ 14, 13, 14, 13,
+ 16, 15, 16, 15,
+ 14, 13, 14, 13,
+
+ 12, 11, 12, 11,
+ 10, 9, 10, 9,
+ 12, 11, 12, 11,
+ 10, 9, 10, 9,
+
+
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+
+
+ 16, 15, 16, 15,
+ 14, 13, 14, 13,
+ 16, 15, 16, 15,
+ 14, 13, 14, 13,
+
+ 12, 11, 12, 11,
+ 10, 9, 10, 9,
+ 12, 11, 12, 11,
+ 10, 9, 10, 9,
+
+ 16, 15, 16, 15,
+ 14, 13, 14, 13,
+ 16, 15, 16, 15,
+ 14, 13, 14, 13,
+
+ 12, 11, 12, 11,
+ 10, 9, 10, 9,
+ 12, 11, 12, 11,
+ 10, 9, 10, 9,
+
+
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+ 8, 7, 8, 7,
+ 6, 5, 6, 5,
+
+ 4, 3, 4, 3,
+ 2, 1, 2, 1,
+ 4, 3, 4, 3,
+ 2, 1, 2, 1
+ },
+ qScale, qOffset);
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+ return MirrorPad4dTestCommon<T>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ padList,
+ armnn::PaddingMode::Reflect);
+}
+
+LayerTestResult<armnn::Half, 2> PadSymmetricFloat16(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ using namespace half_float::literal;
+
+ const armnn::TensorShape inputShape{ 3, 3 };
+ const armnn::TensorShape outputShape{ 5, 7 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float16);
+ const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float16);
+
+ const std::vector<armnn::Half> inputValues =
+ {
+ 1._h, 2._h, 3._h,
+ 4._h, 5._h, 6._h,
+ 7._h, 8._h, 9._h
+ };
+
+ std::vector<armnn::Half> expectedOutputValues =
+ {
+ 2._h, 1._h, 1._h, 2._h, 3._h, 3._h, 2._h,
+ 2._h, 1._h, 1._h, 2._h, 3._h, 3._h, 2._h,
+ 5._h, 4._h, 4._h, 5._h, 6._h, 6._h, 5._h,
+ 8._h, 7._h, 7._h, 8._h, 9._h, 9._h, 8._h,
+ 8._h, 7._h, 7._h, 8._h, 9._h, 9._h, 8._h,
+ };
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+ return MirrorPad2dTestCommon<armnn::Half>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ padList,
+ armnn::PaddingMode::Symmetric);
+}
+
+LayerTestResult<armnn::Half, 2> PadReflectFloat16(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ using namespace half_float::literal;
+
+ const armnn::TensorShape inputShape{ 3, 3 };
+ const armnn::TensorShape outputShape{ 7, 5 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float16);
+ const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float16);
+
+ const std::vector<armnn::Half> inputValues =
+ {
+ 1._h, 2._h, 3._h,
+ 4._h, 5._h, 6._h,
+ 7._h, 8._h, 9._h
+ };
+
+ std::vector<armnn::Half> expectedOutputValues =
+ {
+ 8._h, 7._h, 8._h, 9._h, 8._h,
+ 5._h, 4._h, 5._h, 6._h, 5._h,
+ 2._h, 1._h, 2._h, 3._h, 2._h,
+ 5._h, 4._h, 5._h, 6._h, 5._h,
+ 8._h, 7._h, 8._h, 9._h, 8._h,
+ 5._h, 4._h, 5._h, 6._h, 5._h,
+ 2._h, 1._h, 2._h, 3._h, 2._h,
+ };
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+ return MirrorPad2dTestCommon<armnn::Half>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ padList,
+ armnn::PaddingMode::Reflect);
+}
+
+//
+// Implementation functions
+//
+
+LayerTestResult<float, 2> PadSymmetric2dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 2> PadReflect2dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 3> PadSymmetric3dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 3> PadReflect3dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 3> PadSymmetric3dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric3dTest<armnn::DataType::QAsymmU8>(
+ workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128);
+}
+
+LayerTestResult<uint8_t, 3> PadReflect3dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128);
+}
+
+LayerTestResult<int8_t, 3> PadSymmetric3dInt8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric3dTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
+}
+
+LayerTestResult<int8_t, 3> PadReflect3dInt8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect3dTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
+}
+
+LayerTestResult<float, 4> PadSymmetric4dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 4> PadReflect4dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<armnn::BFloat16, 4> PadSymmetric4dBFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric4dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<armnn::BFloat16, 4> PadReflect4dBFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect4dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> PadSymmetric4dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric4dTest<armnn::DataType::QAsymmU8>(
+ workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128);
+}
+
+LayerTestResult<uint8_t, 4> PadReflect4dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 128);
+}
+
+LayerTestResult<int8_t, 4> PadSymmetric4dInt8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric4dTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
+}
+
+LayerTestResult<int8_t, 4> PadReflect4dInt8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect4dTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
+}
+
+LayerTestResult<int16_t, 4> PadSymmetric4dInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetric4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 0);
+}
+
+LayerTestResult<int16_t, 4> PadReflect4dInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflect4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 0);
+}
+
+LayerTestResult<armnn::Half, 2> PadSymmetricFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadSymmetricFloat16(workloadFactory, memoryManager, tensorHandleFactory);
+}
+
+LayerTestResult<armnn::Half, 2> PadReflectFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadReflectFloat16(workloadFactory, memoryManager, tensorHandleFactory);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp
new file mode 100644
index 0000000000..52898b820c
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/MirrorPadTestImpl.hpp
@@ -0,0 +1,117 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <Half.hpp>
+
+#include <ResolveType.hpp>
+
+#include <armnn/Types.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 2> PadSymmetric2dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 2> PadReflect2dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> PadSymmetric3dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 3> PadReflect3dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 3> PadSymmetric3dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 3> PadReflect3dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 3> PadSymmetric3dInt8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 3> PadReflect3dInt8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> PadSymmetric4dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> PadReflect4dFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::BFloat16, 4> PadSymmetric4dBFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::BFloat16, 4> PadReflect4dBFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> PadSymmetric4dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> PadReflect4dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 4> PadSymmetric4dInt8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 4> PadReflect4dInt8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> PadSymmetric4dInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> PadReflect4dInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 2> PadSymmetricFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 2> PadReflectFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/cl/workloads/ClFillWorkload.cpp b/src/backends/cl/workloads/ClFillWorkload.cpp
index 8cb2db4b25..ea42dcfc8b 100644
--- a/src/backends/cl/workloads/ClFillWorkload.cpp
+++ b/src/backends/cl/workloads/ClFillWorkload.cpp
@@ -29,7 +29,7 @@ ClFillWorkload::ClFillWorkload(const FillQueueDescriptor& descriptor,
m_Data.ValidateInputsOutputs("ClFillWorkload", 1, 1);
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
- arm_compute::PixelValue pixelValue = GetPixelValue(output, descriptor.m_Parameters.m_Value);
+ arm_compute::PixelValue pixelValue = GetPixelValue(output.info(), descriptor.m_Parameters.m_Value);
m_Layer.configure(clCompileContext, &output, pixelValue);
}
diff --git a/src/backends/cl/workloads/ClPadWorkload.cpp b/src/backends/cl/workloads/ClPadWorkload.cpp
index 10c8907d43..46975102db 100644
--- a/src/backends/cl/workloads/ClPadWorkload.cpp
+++ b/src/backends/cl/workloads/ClPadWorkload.cpp
@@ -39,7 +39,7 @@ ClPadWorkload::ClPadWorkload(const PadQueueDescriptor& descriptor,
arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(reversed_PadList);
- arm_compute::PixelValue pixelValue = GetPixelValue(input, descriptor.m_Parameters.m_PadValue);
+ arm_compute::PixelValue pixelValue = GetPixelValue(input.info(), descriptor.m_Parameters.m_PadValue);
m_Layer.configure(clCompileContext, &input, &output, padList, pixelValue);
}
diff --git a/src/backends/neon/workloads/NeonFillWorkload.cpp b/src/backends/neon/workloads/NeonFillWorkload.cpp
index 0a3c7f0c88..3cfa56ab54 100644
--- a/src/backends/neon/workloads/NeonFillWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFillWorkload.cpp
@@ -28,7 +28,7 @@ NeonFillWorkload::NeonFillWorkload(const FillQueueDescriptor& descriptor, const
m_Data.ValidateInputsOutputs("NeonFillWorkload", 1, 1);
arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- arm_compute::PixelValue pixelValue = GetPixelValue(output, descriptor.m_Parameters.m_Value);
+ arm_compute::PixelValue pixelValue = GetPixelValue(output.info(), descriptor.m_Parameters.m_Value);
auto layer = std::make_unique<arm_compute::NEFill>();
layer->configure(&output, pixelValue);
diff --git a/src/backends/neon/workloads/NeonPadWorkload.cpp b/src/backends/neon/workloads/NeonPadWorkload.cpp
index b378d5f843..42fc42ba5c 100644
--- a/src/backends/neon/workloads/NeonPadWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPadWorkload.cpp
@@ -38,7 +38,7 @@ NeonPadWorkload::NeonPadWorkload(const PadQueueDescriptor& descriptor, const Wor
arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(reversed_PadList);
- arm_compute::PixelValue pixelValue = GetPixelValue(input, descriptor.m_Parameters.m_PadValue);
+ arm_compute::PixelValue pixelValue = GetPixelValue(input.info(), descriptor.m_Parameters.m_PadValue);
auto layer = std::make_unique<arm_compute::NEPadLayer>();
layer->configure(&input, &output, padList, pixelValue);
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index f8169a6c0c..7049279557 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -41,6 +41,7 @@ BACKEND_SOURCES := \
workloads/Lstm.cpp \
workloads/LstmUtils.cpp \
workloads/Concatenate.cpp \
+ workloads/MirrorPad.cpp \
workloads/Pad.cpp \
workloads/Pooling2d.cpp \
workloads/PreluImpl.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index cb31b37161..5993270173 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1415,7 +1415,7 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_2, LogSoftmaxTest2<DataType::Flo
ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_3, LogSoftmaxTest3<DataType::Float16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_4, LogSoftmaxTest4<DataType::Float16>)
-// Pad
+// Pad - Constant
ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162d, PadBFloat162dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162dCustomPadding, PadBFloat162dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat163d, PadBFloat163dTest)
@@ -1445,6 +1445,31 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt84d, PadInt84dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8, PadQAsymmTestCommon<DataType::QAsymmS8>, -2.0f, 3, 0.0f)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8CustomPadding, PadQAsymmTestCommon<DataType::QAsymmS8>, -2.0f, 3, 2.0f)
+// Pad - Symmetric & Reflect
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric2dFloat32, PadSymmetric2dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect2dFloat32, PadReflect2dFloat32Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dFloat32, PadSymmetric3dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dFloat32, PadReflect3dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dUint8, PadSymmetric3dUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dUint8, PadReflect3dUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric3dInt8, PadSymmetric3dInt8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dInt8, PadReflect3dInt8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dFloat32, PadSymmetric4dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dFloat32, PadReflect4dFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dBFloat16, PadSymmetric4dBFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dBFloat16, PadReflect4dBFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dUint8, PadSymmetric4dUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dUint8, PadReflect4dUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt8, PadSymmetric4dInt8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dInt8, PadReflect4dInt8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt16, PadSymmetric4dInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dInt16, PadReflect4dInt16Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetricFloat16, PadSymmetricFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflectFloat16, PadReflectFloat16Test)
+
// Constant
ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantUint8, ConstantUint8CustomQuantizationScaleAndOffsetTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 5727291be3..f212522895 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -52,6 +52,8 @@ list(APPEND armnnRefBackendWorkloads_sources
Concatenate.hpp
Concatenate.cpp
Minimum.hpp
+ MirrorPad.cpp
+ MirrorPad.hpp
Pad.cpp
Pad.hpp
Pooling2d.cpp
diff --git a/src/backends/reference/workloads/MirrorPad.cpp b/src/backends/reference/workloads/MirrorPad.cpp
new file mode 100644
index 0000000000..7388fed147
--- /dev/null
+++ b/src/backends/reference/workloads/MirrorPad.cpp
@@ -0,0 +1,199 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MirrorPad.hpp"
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace
+{
+
+// Convert a linear index into n-dimensional coordinates.
+// E.g. index = 2 returns [0, 0, 2].
+inline std::vector<unsigned int> IndexToCoord(const armnn::TensorShape& shape, unsigned int index)
+{
+ unsigned int numOfElements = shape.GetNumElements();
+
+ ARMNN_ASSERT_MSG(index <= numOfElements, "Index has to be in [0, num_elements]");
+ ARMNN_ASSERT_MSG(numOfElements != 0, "Cannot create coordinate from empty shape");
+
+ std::vector<unsigned int> coord(shape.GetNumDimensions());
+ for(unsigned int i = 0; i < shape.GetNumDimensions(); ++i)
+ {
+ numOfElements /= shape[i];
+ coord[i] = index / numOfElements;
+ index %= numOfElements;
+ }
+
+ return coord;
+}
+
+// Returns the index of a given coordinate.
+// E.g. [0, 0, 2] returns 2.
+inline unsigned int CoordToIndex(const armnn::TensorShape& shape, const std::vector<unsigned int>& coord)
+{
+ ARMNN_ASSERT_MSG(shape.GetNumDimensions() != 0, "Cannot get index from empty shape");
+ ARMNN_ASSERT_MSG(coord.size() != 0, "Cannot get index of empty coordinate");
+
+ unsigned int index = 0;
+ unsigned int dimSize = 1;
+
+ for (unsigned int i = shape.GetNumDimensions(); i > 0; --i)
+ {
+ index += coord[i - 1] * dimSize;
+ dimSize *= shape[i - 1];
+ }
+
+ return index;
+}
+
+} // anonymous namespace
+
+namespace armnn
+{
+
+void MirrorPad(const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo,
+ const ITensorHandle* inputHandle,
+ ITensorHandle* outputHandle,
+ const PadQueueDescriptor& data)
+{
+ auto padList = data.m_Parameters.m_PadList;
+ PaddingMode paddingMode = data.m_Parameters.m_PaddingMode;
+
+ TensorShape outputShape = outputInfo.GetShape();
+ TensorShape inputShape = inputInfo.GetShape();
+
+ unsigned int numOutputElements = outputInfo.GetNumElements();
+ unsigned int numInputDimensions = inputShape.GetNumDimensions();
+ assert(numInputDimensions == outputShape.GetNumDimensions());
+
+ // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
+ // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
+ const unsigned int isReflect = static_cast<unsigned int>(paddingMode == PaddingMode::Reflect);
+ for(unsigned int i = 0; i < padList.size(); ++i)
+ {
+ if(padList.at(i).first > (inputShape[i] - isReflect) ||
+ padList.at(i).second > (inputShape[i] - isReflect))
+ {
+ throw armnn::InvalidArgumentException("Paddings must be less (Reflect) or "
+ "equal (Symmetric) to the dimension size.");
+ }
+ }
+
+ auto inputData = MakeDecoder<float>(inputInfo, inputHandle->Map());
+ auto outData = MakeEncoder<float>(outputInfo, outputHandle->Map());
+
+ Decoder<float>& input = *inputData;
+ Encoder<float>& output = *outData;
+
+ for(unsigned int idx = 0; idx < numOutputElements; ++idx)
+ {
+ // Get the coordinates of the current index in vector form. E.g inx 1 = [0, 0, 0, 1 ]
+ const std::vector<unsigned int> coord = IndexToCoord(outputShape, idx);
+
+ std::vector<unsigned int> dimensions;
+ std::vector<unsigned int> coords;
+
+ for(unsigned int i = 0; i < numInputDimensions; ++i)
+ {
+ dimensions.emplace_back(i);
+ coords.emplace_back(coord[i]);
+ }
+
+ auto isInPadding = [&](unsigned int i)
+ {
+ return (coords[i] < padList[i].first || coords[i] > inputShape[i] + padList[i].first - 1);
+ };
+
+ auto getReflectIndex = [&](unsigned int i) -> unsigned int
+ {
+ if(isInPadding(i))
+ {
+ if(coords[i] < padList[i].first)
+ {
+ return padList[i].first - coords[i];
+ }
+ else
+ {
+ return 2 * inputShape[i] + padList[i].first - 2 - coords[i];
+ }
+ }
+ return coords[i] - padList[i].first;
+ };
+
+ auto getSymmetricIndex = [&](unsigned int i) -> unsigned int
+ {
+ if(isInPadding(i))
+ {
+ if(coords[i] < padList[i].first)
+ {
+ return padList[i].first - coords[i] - 1;
+ }
+ else
+ {
+ return 2 * inputShape[i] + padList[i].first - 1 - coords[i];
+ }
+ }
+ return coords[i] - padList[i].first;
+ };
+
+ // Location of the value in the input tensor to use in the output.
+ std::vector<unsigned int> coordOfInput;
+
+ // any_of works as a loop here to check if any of the dimensions are in the padding.
+ // If dimensions is in the padding area, then create the coordinates of the location in the
+ // input tensor to use in the output.
+ // E.g.
+ // Input tensor = [ 1, 2, 3 ], Rank = 1.
+ // Output tensor = [ 2, 1, 2, 3, 1 ] if Reflect or [ 1, 1, 2, 3, 3 ] if Symmetric with a padding of (1, 1).
+ // So it will either return [ 1 ] or [ 0 ] which is used to set the first value in the output tensor and so on.
+ if(std::any_of(dimensions.begin(), dimensions.end(), isInPadding))
+ {
+ switch(paddingMode)
+ {
+ case PaddingMode::Reflect:
+ {
+ for(unsigned int i = 0; i < numInputDimensions; ++i)
+ {
+ coordOfInput.emplace_back(getReflectIndex(i));
+ }
+ break;
+ }
+ case PaddingMode::Symmetric:
+ {
+ for(unsigned int i = 0; i < numInputDimensions; ++i)
+ {
+ coordOfInput.emplace_back(getSymmetricIndex(i));
+ }
+ break;
+ }
+ default:
+ throw InvalidArgumentException("Padding mode not supported.");
+ break;
+ }
+ }
+ else
+ {
+ for(unsigned int i = 0; i < numInputDimensions; ++i)
+ {
+ coordOfInput.emplace_back(coord[i] - padList[i].first);
+ }
+ }
+
+ // Set output value using the coordinate of the input value to use.
+ const unsigned int indexOfInput = CoordToIndex(inputShape, coordOfInput);
+
+ input[indexOfInput];
+ auto inputValue = input.Get();
+
+ output[idx];
+ output.Set(inputValue);
+ }
+}
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/workloads/MirrorPad.hpp b/src/backends/reference/workloads/MirrorPad.hpp
new file mode 100644
index 0000000000..3deaf1d5fd
--- /dev/null
+++ b/src/backends/reference/workloads/MirrorPad.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/Tensor.hpp"
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+void MirrorPad(const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo,
+ const ITensorHandle* inputHandle,
+ ITensorHandle* outputHandle,
+ const PadQueueDescriptor& data);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index f15306d1af..fd0728c8cd 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -5,6 +5,7 @@
#include "RefPadWorkload.hpp"
+#include "MirrorPad.hpp"
#include "Pad.hpp"
#include "Profiling.hpp"
#include "RefWorkloadUtils.hpp"
@@ -29,11 +30,19 @@ void RefPadWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITe
const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
- armnn::Pad(inputInfo,
- outputInfo,
- inputs[0],
- outputs[0],
- m_Data);
+ PaddingMode paddingMode = m_Data.m_Parameters.m_PaddingMode;
+ if (paddingMode == PaddingMode::Constant)
+ {
+ armnn::Pad(inputInfo, outputInfo, inputs[0], outputs[0], m_Data);
+ }
+ else if(paddingMode == PaddingMode::Reflect || paddingMode == PaddingMode::Symmetric)
+ {
+ armnn::MirrorPad(inputInfo, outputInfo, inputs[0], outputs[0], m_Data);
+ }
+ else
+ {
+ throw InvalidArgumentException("Padding mode not supported.");
+ }
}
} //namespace armnn \ No newline at end of file