aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon')
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp8
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp9
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp6
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt2
-rw-r--r--src/backends/neon/workloads/NeonNegWorkload.cpp43
-rw-r--r--src/backends/neon/workloads/NeonNegWorkload.hpp28
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp1
8 files changed, 95 insertions, 3 deletions
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 78776124ec..999b158f74 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -38,6 +38,7 @@
#include "workloads/NeonMinimumWorkload.hpp"
#include "workloads/NeonMultiplicationWorkload.hpp"
#include "workloads/NeonDivisionWorkload.hpp"
+#include "workloads/NeonNegWorkload.hpp"
#include "workloads/NeonNormalizationFloatWorkload.hpp"
#include "workloads/NeonFullyConnectedWorkload.hpp"
#include "workloads/NeonPadWorkload.hpp"
@@ -370,6 +371,13 @@ bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
input,
output);
}
+ else if (descriptor.m_Operation == UnaryOperation::Neg)
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNegWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+ }
return false;
}
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index cc7dca031d..bf26d82a76 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -218,9 +218,8 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
return std::make_unique<NeonDivisionWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor&
- descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(
+ const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
{
@@ -238,6 +237,10 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const Ele
return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
}
+ else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Neg)
+ {
+ return std::make_unique<NeonNegWorkload>(descriptor, info);
+ }
return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index d9a5405983..3cb8bd582a 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -47,6 +47,7 @@ BACKEND_SOURCES := \
workloads/NeonMinimumWorkload.cpp \
workloads/NeonMultiplicationWorkload.cpp \
workloads/NeonDivisionWorkload.cpp \
+ workloads/NeonNegWorkload.cpp \
workloads/NeonNormalizationFloatWorkload.cpp \
workloads/NeonPadWorkload.cpp \
workloads/NeonPermuteWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index d3ca675679..06b5597fcf 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -1069,6 +1069,12 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>
ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
+// Neg
+ARMNN_AUTO_TEST_CASE(Neg2d, Neg2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Neg3d, Neg3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest<DataType::Float32>)
+
#if defined(ARMNNREF_ENABLED)
// The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index a932f8b852..8160710f65 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -56,6 +56,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonMultiplicationWorkload.hpp
NeonDivisionWorkload.cpp
NeonDivisionWorkload.hpp
+ NeonNegWorkload.cpp
+ NeonNegWorkload.hpp
NeonNormalizationFloatWorkload.cpp
NeonNormalizationFloatWorkload.hpp
NeonPadWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonNegWorkload.cpp b/src/backends/neon/workloads/NeonNegWorkload.cpp
new file mode 100644
index 0000000000..afe05583fd
--- /dev/null
+++ b/src/backends/neon/workloads/NeonNegWorkload.cpp
@@ -0,0 +1,43 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonNegWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <boost/cast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonNegWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ return arm_compute::NENegLayer::validate(&aclInput, &aclOutput);
+}
+
+NeonNegWorkload::NeonNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonNegWorkload", 1, 1);
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_NegLayer.configure(&input, &output);
+}
+
+void NeonNegWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNegWorkload_Execute");
+ m_NegLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonNegWorkload.hpp b/src/backends/neon/workloads/NeonNegWorkload.hpp
new file mode 100644
index 0000000000..7b2ed17a3e
--- /dev/null
+++ b/src/backends/neon/workloads/NeonNegWorkload.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonNegWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class NeonNegWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+ NeonNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NENegLayer m_NegLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 52cd76f14b..e28d120de7 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -31,6 +31,7 @@
#include "NeonConcatWorkload.hpp"
#include "NeonMinimumWorkload.hpp"
#include "NeonMultiplicationWorkload.hpp"
+#include "NeonNegWorkload.hpp"
#include "NeonNormalizationFloatWorkload.hpp"
#include "NeonPadWorkload.hpp"
#include "NeonPermuteWorkload.hpp"