aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorÉanna Ó Catháin <eanna.ocathain@arm.com>2019-01-25 10:01:40 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-01-25 14:51:53 +0000
commit12055747d47657a89d60748a078897f6436e6aa0 (patch)
tree0ac15d4e321ec5e2eaf0cbc074ac95c061289798
parente9e7bfd09e3435d2bbb334e07c6a0a2514c80048 (diff)
downloadarmnn-12055747d47657a89d60748a078897f6436e6aa0.tar.gz
MLCE-84 Add Neon Pad support and unit tests
Change-Id: I0d949a9f23a61af5013efdd18572b29fae585f2a
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp13
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp5
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp5
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt2
-rw-r--r--src/backends/neon/workloads/NeonPadWorkload.cpp57
-rw-r--r--src/backends/neon/workloads/NeonPadWorkload.hpp30
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp1
9 files changed, 115 insertions, 1 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 9db7354e9e..3b0967628d 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -30,6 +30,7 @@
#include "workloads/NeonMultiplicationFloatWorkload.hpp"
#include "workloads/NeonNormalizationFloatWorkload.hpp"
#include "workloads/NeonFullyConnectedWorkload.hpp"
+#include "workloads/NeonPadWorkload.hpp"
#include "workloads/NeonPermuteWorkload.hpp"
#include "workloads/NeonPooling2dWorkload.hpp"
#include "workloads/NeonResizeBilinearWorkload.hpp"
@@ -356,6 +357,18 @@ bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
&TrueFunc<>);
}
+bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonPadWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
bool NeonLayerSupport::IsPermuteSupported(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 631632485c..6de5c784cc 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -112,6 +112,11 @@ public:
bool IsOutputSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsPadSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsPermuteSupported(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 76e1dd0118..5c7eeccb26 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -286,7 +286,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescri
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+ return std::make_unique<NeonPadWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 1ebeae54f9..09dca63431 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -31,6 +31,7 @@ BACKEND_SOURCES := \
workloads/NeonMinimumWorkload.cpp \
workloads/NeonMultiplicationFloatWorkload.cpp \
workloads/NeonNormalizationFloatWorkload.cpp \
+ workloads/NeonPadWorkload.cpp \
workloads/NeonPermuteWorkload.cpp \
workloads/NeonPooling2dWorkload.cpp \
workloads/NeonReshapeWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 6975374bdd..b229cd020c 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -386,6 +386,11 @@ ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeFloat32Test)
ARMNN_AUTO_TEST_CASE(SimpleReshapeUint8, SimpleReshapeUint8Test)
+// Pad
+ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat323d, PadFloat323dTest)
+ARMNN_AUTO_TEST_CASE(PadFloat324d, PadFloat324dTest)
+
// Permute
ARMNN_AUTO_TEST_CASE(SimplePermuteFloat32, SimplePermuteFloat32Test)
ARMNN_AUTO_TEST_CASE(SimplePermuteUint8, SimplePermuteUint8Test)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 713418da30..1a1ed74840 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -40,6 +40,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonMultiplicationFloatWorkload.hpp
NeonNormalizationFloatWorkload.cpp
NeonNormalizationFloatWorkload.hpp
+ NeonPadWorkload.cpp
+ NeonPadWorkload.hpp
NeonPermuteWorkload.cpp
NeonPermuteWorkload.hpp
NeonPooling2dWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonPadWorkload.cpp b/src/backends/neon/workloads/NeonPadWorkload.cpp
new file mode 100644
index 0000000000..60d6b8a537
--- /dev/null
+++ b/src/backends/neon/workloads/NeonPadWorkload.cpp
@@ -0,0 +1,57 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonPadWorkload.hpp"
+
+#include <neon/NeonTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <arm_compute/core/Types.h>
+#include <arm_compute/runtime/NEON/functions/NEPadLayer.h>
+
+#include "NeonWorkloadUtils.hpp"
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+NeonPadWorkload::NeonPadWorkload(const PadQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<PadQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonPadWorkload", 1, 1);
+
+ arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ std::vector<std::pair<unsigned int, unsigned int>> reversed_PadList(descriptor.m_Parameters.m_PadList.size());
+
+ std::reverse_copy(std::begin(descriptor.m_Parameters.m_PadList),
+ std::end(descriptor.m_Parameters.m_PadList),
+ std::begin(reversed_PadList));
+
+ arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(reversed_PadList);
+
+ auto layer = std::make_unique<arm_compute::NEPadLayer>();
+ layer->configure(&input, &output, padList);
+ m_Layer.reset(layer.release());
+}
+
+void NeonPadWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonPadWorkload_Execute");
+ m_Layer->run();
+}
+
+arm_compute::Status NeonPadWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+ arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(descriptor.m_PadList);
+
+ return arm_compute::NEPadLayer::validate(&aclInputInfo, &aclOutputInfo, padList);
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonPadWorkload.hpp b/src/backends/neon/workloads/NeonPadWorkload.hpp
new file mode 100644
index 0000000000..ab0e821ae5
--- /dev/null
+++ b/src/backends/neon/workloads/NeonPadWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/Workload.hpp>
+#include <arm_compute/runtime/IFunction.h>
+#include <arm_compute/core/Error.h>
+
+namespace armnn {
+
+class NeonPadWorkload : public BaseWorkload<PadQueueDescriptor>
+{
+public:
+ NeonPadWorkload(const PadQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ void Execute() const override;
+
+private:
+ std::unique_ptr<arm_compute::IFunction> m_Layer;
+};
+
+arm_compute::Status NeonPadWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor);
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index e034cc94dd..e4f4fcfc53 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -22,6 +22,7 @@
#include "NeonMinimumWorkload.hpp"
#include "NeonMultiplicationFloatWorkload.hpp"
#include "NeonNormalizationFloatWorkload.hpp"
+#include "NeonPadWorkload.hpp"
#include "NeonPermuteWorkload.hpp"
#include "NeonPooling2dWorkload.hpp"
#include "NeonReshapeWorkload.hpp"