aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-03-24 09:54:36 +0000
committerSadik Armagan <sadik.armagan@arm.com>2020-03-24 14:28:53 +0000
commitac47210621a601a2b6ded78e003d136626434f50 (patch)
tree7bb4940dd25ac2dac85c97227b82d567bd2a4d87
parent957b844f130066f00c8adbafae36cec2ab3e9548 (diff)
downloadarmnn-ac47210621a601a2b6ded78e003d136626434f50.tar.gz
IVGCVSW-3813 Add Unary Elementwise Operation 'NEG' support to the android-nn-driver
* Implemented ClNegWorkload * Implemented NeonNegWorkload * Enabled 'NEG' operator on CL and Neon as well as Ref * Implemented unit tests for 'NEG' operator Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I3d7a892692716636cae6bdf8ddd238e3d1ea064f
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp3
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp174
-rw-r--r--src/backends/backendsCommon/test/layerTests/NegTestImpl.hpp33
-rw-r--r--src/backends/cl/ClLayerSupport.cpp8
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp4
-rw-r--r--src/backends/cl/backend.mk1
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp8
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt2
-rw-r--r--src/backends/cl/workloads/ClNegWorkload.cpp44
-rw-r--r--src/backends/cl/workloads/ClNegWorkload.hpp28
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp1
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp8
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp9
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp6
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt2
-rw-r--r--src/backends/neon/workloads/NeonNegWorkload.cpp43
-rw-r--r--src/backends/neon/workloads/NeonNegWorkload.hpp28
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp1
-rw-r--r--src/backends/reference/RefLayerSupport.cpp5
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp12
24 files changed, 419 insertions, 6 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index bf26056a97..85c074a500 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -3142,7 +3142,8 @@ void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo)
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::QSymmS16,
+ DataType::Signed32
};
ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index ba5518010d..09783ee7ea 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -64,6 +64,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/MaximumTestImpl.cpp \
test/layerTests/MinimumTestImpl.cpp \
test/layerTests/MultiplicationTestImpl.cpp \
+ test/layerTests/NegTestImpl.cpp \
test/layerTests/NormalizationTestImpl.cpp \
test/layerTests/PadTestImpl.cpp \
test/layerTests/Pooling2dTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index bfaca6c44b..bfd44ef4f8 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -113,6 +113,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/MinimumTestImpl.hpp
layerTests/MultiplicationTestImpl.cpp
layerTests/MultiplicationTestImpl.hpp
+ layerTests/NegTestImpl.cpp
+ layerTests/NegTestImpl.hpp
layerTests/NormalizationTestImpl.cpp
layerTests/NormalizationTestImpl.hpp
layerTests/PadTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 600a261fe3..247ed12048 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -37,6 +37,7 @@
#include <backendsCommon/test/layerTests/MeanTestImpl.hpp>
#include <backendsCommon/test/layerTests/MinimumTestImpl.hpp>
#include <backendsCommon/test/layerTests/MultiplicationTestImpl.hpp>
+#include <backendsCommon/test/layerTests/NegTestImpl.hpp>
#include <backendsCommon/test/layerTests/NormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/PadTestImpl.hpp>
#include <backendsCommon/test/layerTests/PermuteTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp
new file mode 100644
index 0000000000..aace926dcb
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/NegTestImpl.cpp
@@ -0,0 +1,174 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NegTestImpl.hpp"
+#include "ElementwiseUnaryTestImpl.hpp"
+
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> Neg2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int inputShape[] = { 2, 2 };
+
+ std::vector<float> inputValues
+ {
+ 1.f, 1.f,
+ 2.f, 25.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ -1.f, -1.f,
+ -2.f, -25.f
+ };
+
+ return ElementwiseUnaryTestHelper<2, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Neg,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> Neg3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int inputShape[] = { 3, 1, 2 };
+
+ std::vector<float> inputValues
+ {
+ 1.f, 0.f, 3.f,
+ 25.f, 64.f, 100.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ -1.f, 0.f, -3.f,
+ -25.f, -64.f, -100.f
+ };
+
+ return ElementwiseUnaryTestHelper<3, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Neg,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> NegZeroTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int inputShape[] = { 1, 2 };
+
+ std::vector<float> inputValues
+ {
+ 0.f, 0.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ 0.f, 0.f
+ };
+
+ return ElementwiseUnaryTestHelper<2, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Neg,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> NegNegativeTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int inputShape[] = { 1, 2 };
+
+ std::vector<float> inputValues
+ {
+ -25.f, -16.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ 25.f, 16.f
+ };
+
+ return ElementwiseUnaryTestHelper<2, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::Neg,
+ inputShape,
+ inputValues,
+ inputShape,
+ expectedOutputValues);
+}
+
+//
+// Explicit template specializations
+//
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+Neg2dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
+Neg2dTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+Neg2dTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+Neg2dTest<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+Neg3dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+Neg3dTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+Neg3dTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+Neg3dTest<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+NegZeroTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+NegNegativeTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/NegTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/NegTestImpl.hpp
new file mode 100644
index 0000000000..6ceacdebcf
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/NegTestImpl.hpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Neg2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Neg3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> NegZeroTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> NegNegativeTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index cdb93d7218..7f7554ab54 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -40,6 +40,7 @@
#include "workloads/ClConcatWorkload.hpp"
#include "workloads/ClMinimumWorkload.hpp"
#include "workloads/ClMultiplicationWorkload.hpp"
+#include "workloads/ClNegWorkload.hpp"
#include "workloads/ClNormalizationFloatWorkload.hpp"
#include "workloads/ClPadWorkload.hpp"
#include "workloads/ClPermuteWorkload.hpp"
@@ -415,6 +416,13 @@ bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
input,
output);
}
+ else if (descriptor.m_Operation == UnaryOperation::Neg)
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+ }
return false;
}
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 4f707beebe..ead0bc36a4 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -272,6 +272,10 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const Eleme
return MakeWorkload<ClRsqrtWorkload>(rsqrtQueueDescriptor, info);
}
+ else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Neg)
+ {
+ return MakeWorkload<ClNegWorkload>(descriptor, info);
+ }
return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index e326add9e9..c8da9b714b 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -47,6 +47,7 @@ BACKEND_SOURCES := \
workloads/ClMeanWorkload.cpp \
workloads/ClMinimumWorkload.cpp \
workloads/ClMultiplicationWorkload.cpp \
+ workloads/ClNegWorkload.cpp \
workloads/ClNormalizationFloatWorkload.cpp \
workloads/ClPadWorkload.cpp \
workloads/ClPermuteWorkload.cpp \
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index a79f1ca1ce..df80da215e 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -949,6 +949,14 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>
ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
+// Neg
+ARMNN_AUTO_TEST_CASE(Neg2d, Neg2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Neg3d, Neg3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Neg2dFloat16, Neg2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(Neg3dFloat16, Neg3dTest<DataType::Float16>)
+
#if defined(ARMNNREF_ENABLED)
// The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 17d69b1ed5..3f964eb1a6 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -54,6 +54,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClMinimumWorkload.hpp
ClMultiplicationWorkload.cpp
ClMultiplicationWorkload.hpp
+ ClNegWorkload.cpp
+ ClNegWorkload.hpp
ClNormalizationFloatWorkload.cpp
ClNormalizationFloatWorkload.hpp
ClPadWorkload.cpp
diff --git a/src/backends/cl/workloads/ClNegWorkload.cpp b/src/backends/cl/workloads/ClNegWorkload.cpp
new file mode 100644
index 0000000000..cc6333fff9
--- /dev/null
+++ b/src/backends/cl/workloads/ClNegWorkload.cpp
@@ -0,0 +1,44 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClNegWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+#include <boost/cast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status ClNegWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ return arm_compute::CLNegLayer::validate(&aclInput, &aclOutput);
+}
+
+ClNegWorkload::ClNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("ClNegWorkload", 1, 1);
+
+ arm_compute::ICLTensor& input = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_NegLayer.configure(&input, &output);
+}
+
+void ClNegWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClNegWorkload_Execute");
+ RunClFunction(m_NegLayer, CHECK_LOCATION());
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClNegWorkload.hpp b/src/backends/cl/workloads/ClNegWorkload.hpp
new file mode 100644
index 0000000000..9dbfa07665
--- /dev/null
+++ b/src/backends/cl/workloads/ClNegWorkload.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClNegWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class ClNegWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+ ClNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::CLNegLayer m_NegLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index ec193d5e3e..c7c016379e 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -27,6 +27,7 @@
#include "ClMeanWorkload.hpp"
#include "ClMinimumWorkload.hpp"
#include "ClMultiplicationWorkload.hpp"
+#include "ClNegWorkload.hpp"
#include "ClNormalizationFloatWorkload.hpp"
#include "ClPermuteWorkload.hpp"
#include "ClPadWorkload.hpp"
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 78776124ec..999b158f74 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -38,6 +38,7 @@
#include "workloads/NeonMinimumWorkload.hpp"
#include "workloads/NeonMultiplicationWorkload.hpp"
#include "workloads/NeonDivisionWorkload.hpp"
+#include "workloads/NeonNegWorkload.hpp"
#include "workloads/NeonNormalizationFloatWorkload.hpp"
#include "workloads/NeonFullyConnectedWorkload.hpp"
#include "workloads/NeonPadWorkload.hpp"
@@ -370,6 +371,13 @@ bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
input,
output);
}
+ else if (descriptor.m_Operation == UnaryOperation::Neg)
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonNegWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+ }
return false;
}
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index cc7dca031d..bf26d82a76 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -218,9 +218,8 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
return std::make_unique<NeonDivisionWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor&
- descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(
+ const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
{
@@ -238,6 +237,10 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const Ele
return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
}
+ else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Neg)
+ {
+ return std::make_unique<NeonNegWorkload>(descriptor, info);
+ }
return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index d9a5405983..3cb8bd582a 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -47,6 +47,7 @@ BACKEND_SOURCES := \
workloads/NeonMinimumWorkload.cpp \
workloads/NeonMultiplicationWorkload.cpp \
workloads/NeonDivisionWorkload.cpp \
+ workloads/NeonNegWorkload.cpp \
workloads/NeonNormalizationFloatWorkload.cpp \
workloads/NeonPadWorkload.cpp \
workloads/NeonPermuteWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index d3ca675679..06b5597fcf 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -1069,6 +1069,12 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>
ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
+// Neg
+ARMNN_AUTO_TEST_CASE(Neg2d, Neg2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Neg3d, Neg3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest<DataType::Float32>)
+
#if defined(ARMNNREF_ENABLED)
// The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index a932f8b852..8160710f65 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -56,6 +56,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonMultiplicationWorkload.hpp
NeonDivisionWorkload.cpp
NeonDivisionWorkload.hpp
+ NeonNegWorkload.cpp
+ NeonNegWorkload.hpp
NeonNormalizationFloatWorkload.cpp
NeonNormalizationFloatWorkload.hpp
NeonPadWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonNegWorkload.cpp b/src/backends/neon/workloads/NeonNegWorkload.cpp
new file mode 100644
index 0000000000..afe05583fd
--- /dev/null
+++ b/src/backends/neon/workloads/NeonNegWorkload.cpp
@@ -0,0 +1,43 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonNegWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <boost/cast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonNegWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ return arm_compute::NENegLayer::validate(&aclInput, &aclOutput);
+}
+
+NeonNegWorkload::NeonNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonNegWorkload", 1, 1);
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_NegLayer.configure(&input, &output);
+}
+
+void NeonNegWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNegWorkload_Execute");
+ m_NegLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonNegWorkload.hpp b/src/backends/neon/workloads/NeonNegWorkload.hpp
new file mode 100644
index 0000000000..7b2ed17a3e
--- /dev/null
+++ b/src/backends/neon/workloads/NeonNegWorkload.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonNegWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class NeonNegWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+ NeonNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NENegLayer m_NegLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 52cd76f14b..e28d120de7 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -31,6 +31,7 @@
#include "NeonConcatWorkload.hpp"
#include "NeonMinimumWorkload.hpp"
#include "NeonMultiplicationWorkload.hpp"
+#include "NeonNegWorkload.hpp"
#include "NeonNormalizationFloatWorkload.hpp"
#include "NeonPadWorkload.hpp"
#include "NeonPermuteWorkload.hpp"
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 7d3600cf09..551a7b5867 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -774,13 +774,14 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
{
IgnoreUnused(descriptor);
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::QSymmS16,
+ DataType::Signed32
};
bool supported = true;
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index f5c1c65c88..13b3ecfd4e 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1737,4 +1737,16 @@ ARMNN_AUTO_TEST_CASE(StackOutput3DInputs3, StackOutput3DInputs3Float32Test)
ARMNN_AUTO_TEST_CASE(StackOutput5D, StackOutput5DFloat32Test)
ARMNN_AUTO_TEST_CASE(StackFloat16, StackFloat16Test)
+// Neg
+ARMNN_AUTO_TEST_CASE(Neg2d, Neg2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Neg3d, Neg3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Neg2dFloat16, Neg2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(Neg3dFloat16, Neg3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(Neg2dQuantisedAsymm8, Neg2dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Neg3dQuantisedAsymm8, Neg3dTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(Neg2dQuantisedSymm16, Neg2dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(Neg3dQuantisedSymm16, Neg3dTest<DataType::QSymmS16>)
+
BOOST_AUTO_TEST_SUITE_END()