aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Mcloughlin <john.mcloughlin@arm.com>2023-05-17 15:08:36 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2023-05-18 08:20:01 +0000
commit34c1c38944b47b881febdfb9f98103dbdc949ed0 (patch)
tree9de33ff04c7c98fb917026690406ba178110ca95
parent0ec008761ab26110dcb108d544be4040a14fd403 (diff)
downloadarmnn-34c1c38944b47b881febdfb9f98103dbdc949ed0.tar.gz
IVGCVSW-7400 POW IVGCVSW-7278 SQUARED_DIFFERENCE to CpuAcc and GpuAcc
* Add POW SQUARED_DIFFERENCE and Unit tests for CpuAcc and GpuAcc Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com> Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ifa78af2a2fda2074586d8e4d9a506b1b13fa5755
-rw-r--r--src/backends/cl/ClLayerSupport.cpp10
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp7
-rw-r--r--src/backends/cl/backend.mk3
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp19
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp42
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt4
-rw-r--r--src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp94
-rw-r--r--src/backends/cl/workloads/ClElementwiseBinaryWorkload.hpp34
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp3
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp10
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp5
-rw-r--r--src/backends/neon/backend.mk3
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp22
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp18
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt4
-rw-r--r--src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp92
-rw-r--r--src/backends/neon/workloads/NeonElementwiseBinaryWorkload.hpp33
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp3
18 files changed, 400 insertions, 6 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 6fa4f3ce51..ff2b576f3d 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -37,6 +37,7 @@
#include "workloads/ClDepthwiseConvolutionWorkload.hpp"
#include "workloads/ClDequantizeWorkload.hpp"
#include "workloads/ClDivisionWorkload.hpp"
+#include "workloads/ClElementwiseBinaryWorkload.hpp"
#include "workloads/ClExpWorkload.hpp"
#include "workloads/ClFillWorkload.hpp"
#include "workloads/ClFloorFloatWorkload.hpp"
@@ -390,6 +391,15 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type,
infos[1],
infos[2],
nullptr);
+ case BinaryOperation::Power:
+ case BinaryOperation::SqDiff:
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClElementwiseBinaryValidate,
+ reasonIfUnsupported,
+ infos[0],
+ infos[1],
+ infos[2],
+ desc,
+ nullptr);
case BinaryOperation::Sub:
FORWARD_WORKLOAD_VALIDATE_FUNC(ClSubtractionValidate,
reasonIfUnsupported,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 022867710c..493080f7af 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -459,6 +459,13 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateWorkload(LayerType type,
info,
m_CLCompileContext);
}
+ case BinaryOperation::Power:
+ case BinaryOperation::SqDiff:
+ {
+ return std::make_unique<ClElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor,
+ info,
+ m_CLCompileContext);
+ }
case BinaryOperation::Sub:
{
SubtractionQueueDescriptor subtractionQueueDescriptor;
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 1f97ae7cc8..03f1a9540d 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -1,5 +1,5 @@
#
-# Copyright © 2017 ARM Ltd and Contributors. All rights reserved.
+# Copyright © 2017,2023 ARM Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -46,6 +46,7 @@ BACKEND_SOURCES := \
workloads/ClDepthwiseConvolutionWorkload.cpp \
workloads/ClDequantizeWorkload.cpp \
workloads/ClDivisionWorkload.cpp \
+ workloads/ClElementwiseBinaryWorkload.cpp \
workloads/ClExpWorkload.cpp \
workloads/ClFillWorkload.cpp \
workloads/ClFloorFloatWorkload.cpp \
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index a6ddd97ecf..091526fd2b 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -13,6 +13,7 @@
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
#include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp>
#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
#include <backendsCommon/test/FillEndToEndTestImpl.hpp>
#include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
@@ -46,6 +47,7 @@ TEST_CASE("ClRsqrtEndToEndTestFloat32")
UnaryOperation::Rsqrt);
}
+// ElementwiseBinary
// Addition
TEST_CASE("ClAdditionEndToEndFloat32Test")
{
@@ -57,6 +59,23 @@ TEST_CASE("ClAdditionEndToEndUint8Test")
AdditionEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
}
+// Power
+TEST_CASE("RefPowerEndToEndTestFloat32")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(clDefaultBackends, BinaryOperation::Power);
+}
+
+// SqDiff
+TEST_CASE("RefSquaredDifferenceEndToEndTestFloat32")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(clDefaultBackends, BinaryOperation::SqDiff);
+}
+
+TEST_CASE("RefSquaredDifferenceEndToEndTestUint8")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends, BinaryOperation::SqDiff);
+}
+
// Batch Mat Mul
TEST_CASE("ClBatchMatMulEndToEndFloat32Test")
{
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index a84ecc9f9f..03a4d6fc49 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -913,6 +913,48 @@ ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1DVectorUint8,
MultiplicationBroadcast1DVectorUint8Test)
ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Multiplication5d, ClContextControlFixture, Multiplication5dTest)
+// SquaredDifference
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSquaredDifference, ClContextControlFixture, SquaredDifferenceTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcast1Element,
+ ClContextControlFixture,
+ SquaredDiffBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcast, ClContextControlFixture, SquaredDiffBroadcastTest)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDifferenceFloat16, ClContextControlFixture, SquaredDifferenceFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcast1ElementFloat16,
+ ClContextControlFixture,
+ SquaredDiffBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcastFloat16, ClContextControlFixture, SquaredDiffBroadcastFloat16Test)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDifferenceUint8, ClContextControlFixture, SquaredDifferenceUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcastUint8, ClContextControlFixture, SquaredDiffBroadcastUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcast1ElementUint8,
+ ClContextControlFixture,
+ SquaredDiffBroadcast1ElementUint8Test)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDifferenceInt16, ClContextControlFixture, SquaredDifferenceInt16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcastInt16, ClContextControlFixture, SquaredDiffBroadcastInt16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcast1ElementInt16,
+ ClContextControlFixture,
+ SquaredDiffBroadcast1ElementInt16Test)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDifferenceInt32, ClContextControlFixture, SquaredDifferenceInt32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcastInt32, ClContextControlFixture, SquaredDiffBroadcastInt32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SquaredDiffBroadcast1ElementInt32,
+ ClContextControlFixture,
+ SquaredDiffBroadcast1ElementInt32Test)
+
+// Power
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimplePower, ClContextControlFixture, PowerTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PowerBroadcast1Element, ClContextControlFixture, PowerBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PowerBroadcast, ClContextControlFixture, PowerBroadcastTest)
+
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PowerFloat16, ClContextControlFixture, PowerFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PowerBroadcast1ElementFloat16,
+ ClContextControlFixture,
+ PowerBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PowerBroadcastFloat16, ClContextControlFixture, PowerBroadcastFloat16Test)
+
// Batch Norm
ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchNormFloat32, ClContextControlFixture, BatchNormFloat32Test)
ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchNormFloat32Nhwc, ClContextControlFixture, BatchNormFloat32NhwcTest)
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 8616dec078..030d71988f 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -44,6 +44,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClDequantizeWorkload.hpp
ClDivisionWorkload.cpp
ClDivisionWorkload.hpp
+ ClElementwiseBinaryWorkload.cpp
+ ClElementwiseBinaryWorkload.hpp
ClExpWorkload.cpp
ClExpWorkload.hpp
ClFillWorkload.cpp
diff --git a/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp b/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp
new file mode 100644
index 0000000000..df30feb52a
--- /dev/null
+++ b/src/backends/cl/workloads/ClElementwiseBinaryWorkload.cpp
@@ -0,0 +1,94 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClElementwiseBinaryWorkload.hpp"
+
+#include <cl/ClTensorHandle.hpp>
+#include <armnn/backends/TensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
+#include "ClWorkloadUtils.hpp"
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+ClElementwiseBinaryWorkload::ClElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info,
+ const arm_compute::CLCompileContext& clCompileContext)
+ : ClBaseWorkload<ElementwiseBinaryQueueDescriptor>(descriptor, info)
+{
+ this->m_Data.ValidateInputsOutputs("ClElementwiseBinaryWorkload", 2, 1);
+
+ arm_compute::ICLTensor &input0 = static_cast<IClTensorHandle *>(this->m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor &input1 = static_cast<IClTensorHandle *>(this->m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ICLTensor &output = static_cast<IClTensorHandle *>(this->m_Data.m_Outputs[0])->GetTensor();
+
+ const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+ {
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClElementwiseBinaryWorkload_configure");
+
+ switch (descriptor.m_Parameters.m_Operation)
+ {
+ case armnn::BinaryOperation::Power:
+ {
+ auto powerLayer = std::make_unique<arm_compute::CLElementwisePower>();
+ powerLayer->configure(clCompileContext, &input0, &input1, &output, activationInfo);
+ m_ElementwiseBinaryLayer.reset(powerLayer.release());
+ break;
+ }
+ case armnn::BinaryOperation::SqDiff:
+ {
+ auto SqDiffLayer = std::make_unique<arm_compute::CLElementwiseSquaredDiff>();
+ SqDiffLayer->configure(clCompileContext, &input0, &input1, &output, activationInfo);
+ m_ElementwiseBinaryLayer.reset(SqDiffLayer.release());
+ break;
+ }
+ default:
+ throw InvalidArgumentException("Unknown binary operator", CHECK_LOCATION());
+ }
+ }
+}
+void ClElementwiseBinaryWorkload::Execute() const
+{
+ if (m_ElementwiseBinaryLayer)
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClElementwiseBinaryWorkload_Execute", this->GetGuid());
+ m_ElementwiseBinaryLayer->run();
+ }
+}
+
+arm_compute::Status ClElementwiseBinaryValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const ElementwiseBinaryDescriptor& descriptor,
+ const ActivationDescriptor* activationDescriptor)
+{
+ const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+ activationDescriptor);
+
+ switch (descriptor.m_Operation)
+ {
+ case armnn::BinaryOperation::Power:
+ return arm_compute::CLElementwisePower::validate(&aclInput0Info,
+ &aclInput1Info,
+ &aclOutputInfo,
+ activationInfo);
+ case armnn::BinaryOperation::SqDiff:
+ return arm_compute::CLElementwiseSquaredDiff::validate(&aclInput0Info,
+ &aclInput1Info,
+ &aclOutputInfo,
+ activationInfo);
+ default:
+ throw InvalidArgumentException("Unknown binary operator", CHECK_LOCATION());
+ }
+}
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/workloads/ClElementwiseBinaryWorkload.hpp b/src/backends/cl/workloads/ClElementwiseBinaryWorkload.hpp
new file mode 100644
index 0000000000..addd6e6085
--- /dev/null
+++ b/src/backends/cl/workloads/ClElementwiseBinaryWorkload.hpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ClBaseWorkload.hpp"
+
+#include <arm_compute/runtime/CL/functions/CLElementwiseOperations.h>
+
+namespace armnn
+{
+
+class ClElementwiseBinaryWorkload : public ClBaseWorkload<ElementwiseBinaryQueueDescriptor>
+{
+public:
+ ClElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info,
+ const arm_compute::CLCompileContext& clCompileContext);
+
+ void Execute() const override;
+
+private:
+ std::unique_ptr<arm_compute::IFunction> m_ElementwiseBinaryLayer;
+
+};
+
+arm_compute::Status ClElementwiseBinaryValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const ElementwiseBinaryDescriptor& descriptor,
+ const ActivationDescriptor* activationDescriptor = nullptr);
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 44f3798d7d..d862aab949 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -21,6 +21,7 @@
#include "ClDepthwiseConvolutionWorkload.hpp"
#include "ClDequantizeWorkload.hpp"
#include "ClDivisionWorkload.hpp"
+#include "ClElementwiseBinaryWorkload.hpp"
#include "ClExpWorkload.hpp"
#include "ClFillWorkload.hpp"
#include "ClFloorFloatWorkload.hpp"
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index cd4dca8edb..d097240022 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -39,6 +39,7 @@
#include "workloads/NeonDepthToSpaceWorkload.hpp"
#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
#include "workloads/NeonDequantizeWorkload.hpp"
+#include "workloads/NeonElementwiseBinaryWorkload.hpp"
#include "workloads/NeonExpWorkload.hpp"
#include "workloads/NeonInstanceNormalizationWorkload.hpp"
#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
@@ -387,6 +388,15 @@ bool IsLayerTypeSupported(const LayerType& type,
infos[1],
infos[2],
nullptr);
+ case BinaryOperation::Power:
+ case BinaryOperation::SqDiff:
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonElementwiseBinaryWorkloadValidate,
+ reasonIfUnsupported,
+ infos[0],
+ infos[1],
+ infos[2],
+ desc,
+ nullptr);
case BinaryOperation::Sub:
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
reasonIfUnsupported,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index c78b58d21d..eca386701b 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -334,6 +334,11 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type,
multiplicationQueueDescriptor.m_Outputs = descriptor.m_Outputs;
return std::make_unique<NeonMultiplicationWorkload>(multiplicationQueueDescriptor, info);
}
+ case BinaryOperation::Power:
+ case BinaryOperation::SqDiff:
+ {
+ return std::make_unique<NeonElementwiseBinaryWorkload>(*elementwiseBinaryQueueDescriptor, info);
+ }
case BinaryOperation::Sub:
{
SubtractionQueueDescriptor subtractionQueueDescriptor;
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index bbc55547a0..e2439eecb7 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -1,5 +1,5 @@
#
-# Copyright © 2017 ARM Ltd and Contributors. All rights reserved.
+# Copyright © 2017,2023 ARM Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -42,6 +42,7 @@ BACKEND_SOURCES := \
workloads/NeonDepthwiseConvolutionWorkload.cpp \
workloads/NeonDequantizeWorkload.cpp \
workloads/NeonDetectionPostProcessWorkload.cpp \
+ workloads/NeonElementwiseBinaryWorkload.cpp \
workloads/NeonExpWorkload.cpp \
workloads/NeonFillWorkload.cpp \
workloads/NeonFloorFloatWorkload.cpp \
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index fb05cc415f..071ee415de 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -14,6 +14,7 @@
#include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
#include <backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp>
#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
#include <backendsCommon/test/FillEndToEndTestImpl.hpp>
#include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
@@ -133,6 +134,8 @@ TEST_CASE("NeonGreaterBroadcastEndToEndUint8Test")
expectedOutput);
}
+// ElementwiseBinary
+// Add
TEST_CASE("NeonAdditionEndToEndFloat32Test")
{
AdditionEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
@@ -143,6 +146,24 @@ TEST_CASE("NeonAdditionEndToEndUint8Test")
AdditionEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
+// Power
+TEST_CASE("RefPowerEndToEndTestFloat32")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, BinaryOperation::Power);
+}
+
+// SqDiff
+TEST_CASE("RefSquaredDifferenceEndToEndTestFloat32")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, BinaryOperation::SqDiff);
+}
+
+TEST_CASE("RefSquaredDifferenceEndToEndTestUint8")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, BinaryOperation::SqDiff);
+}
+
+// Batch Mat Mul
TEST_CASE("NeonBatchMatMulEndToEndFloat32Test")
{
BatchMatMulEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
@@ -153,6 +174,7 @@ TEST_CASE("NeonBatchMatMulEndToEndInt8Test")
BatchMatMulEndToEnd<armnn::DataType::QAsymmS8>(neonDefaultBackends);
}
+// Concat
TEST_CASE("NeonConcatEndToEndDim0Test")
{
ConcatDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 715060717f..c0b995baaa 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -817,6 +817,24 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1ElementUint8, Multiplicati
ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(Multiplication5d, Multiplication5dTest)
+// SquaredDifference
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSquaredDifference, SquaredDifferenceTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast1Element, SquaredDiffBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast, SquaredDiffBroadcastTest)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDifferenceUint8, SquaredDifferenceUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcastUint8, SquaredDiffBroadcastUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast1ElementUint8, SquaredDiffBroadcast1ElementUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDifferenceInt32, SquaredDifferenceInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcastInt32, SquaredDiffBroadcastInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast1ElementInt32, SquaredDiffBroadcast1ElementInt32Test)
+
+// Power
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimplePower, PowerTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcast1Element, PowerBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcast, PowerBroadcastTest)
+
// Batch Norm
ARMNN_AUTO_TEST_CASE_WITH_THF(BatchNormFloat32, BatchNormFloat32Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index a3eb883079..f3f12842c1 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -1,5 +1,5 @@
#
-# Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2017,2023 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
#
@@ -44,6 +44,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonDequantizeWorkload.hpp
NeonDetectionPostProcessWorkload.cpp
NeonDetectionPostProcessWorkload.hpp
+ NeonElementwiseBinaryWorkload.cpp
+ NeonElementwiseBinaryWorkload.hpp
NeonExpWorkload.cpp
NeonExpWorkload.hpp
NeonFillWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp b/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp
new file mode 100644
index 0000000000..839ec6c5ad
--- /dev/null
+++ b/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.cpp
@@ -0,0 +1,92 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonElementwiseBinaryWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <armnn/backends/TensorHandle.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEElementwiseOperations.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonElementwiseBinaryWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const ElementwiseBinaryDescriptor& descriptor,
+ const ActivationDescriptor* activationDescriptor)
+{
+ const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
+ activationDescriptor);
+
+ switch (descriptor.m_Operation)
+ {
+ case armnn::BinaryOperation::Power:
+ return arm_compute::NEElementwisePower::validate(&aclInput0,
+ &aclInput1,
+ &aclOutput,
+ activationInfo);
+ case armnn::BinaryOperation::SqDiff:
+ return arm_compute::NEElementwiseSquaredDiff::validate(&aclInput0,
+ &aclInput1,
+ &aclOutput,
+ activationInfo);
+ default:
+ throw InvalidArgumentException("Unknown binary operator", CHECK_LOCATION());
+ }
+}
+
+
+NeonElementwiseBinaryWorkload::NeonElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : NeonBaseWorkload<ElementwiseBinaryQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonElementwiseBinaryWorkload", 2, 1);
+
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input2 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
+
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "NeonElementwiseBinaryWorkload_configure");
+
+ switch (descriptor.m_Parameters.m_Operation)
+ {
+ case armnn::BinaryOperation::Power:
+ {
+ auto powerLayer = std::make_unique<arm_compute::NEElementwisePower>();
+ powerLayer->configure(&input1, &input2, &output, activationInfo);
+ m_ElementwiseBinaryLayer.reset(powerLayer.release());
+ break;
+ }
+ case armnn::BinaryOperation::SqDiff:
+ {
+ auto SqDiffLayer = std::make_unique<arm_compute::NEElementwiseSquaredDiff>();
+ SqDiffLayer->configure(&input1, &input2, &output, activationInfo);
+ m_ElementwiseBinaryLayer.reset(SqDiffLayer.release());
+ break;
+ }
+ default:
+ throw InvalidArgumentException("Unknown binary operator", CHECK_LOCATION());
+ }
+}
+
+void NeonElementwiseBinaryWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonElementwiseBinaryWorkload_Execute", this->GetGuid());
+ m_ElementwiseBinaryLayer->run();
+}
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.hpp b/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.hpp
new file mode 100644
index 0000000000..9cdc7ae11b
--- /dev/null
+++ b/src/backends/neon/workloads/NeonElementwiseBinaryWorkload.hpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "NeonBaseWorkload.hpp"
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/core/Types.h>
+#include <arm_compute/runtime/IFunction.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonElementwiseBinaryWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const ElementwiseBinaryDescriptor& descriptor,
+ const ActivationDescriptor* activationDescriptor = nullptr);
+
+class NeonElementwiseBinaryWorkload : public NeonBaseWorkload<ElementwiseBinaryQueueDescriptor>
+{
+public:
+ NeonElementwiseBinaryWorkload(const ElementwiseBinaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ std::unique_ptr<arm_compute::IFunction> m_ElementwiseBinaryLayer;
+};
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 01fd2f7dba..024748690c 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -25,6 +25,7 @@
#include "NeonDequantizeWorkload.hpp"
#include "NeonDetectionPostProcessWorkload.hpp"
#include "NeonDivisionWorkload.hpp"
+#include "NeonElementwiseBinaryWorkload.hpp"
#include "NeonExpWorkload.hpp"
#include "NeonFillWorkload.hpp"
#include "NeonFloorFloatWorkload.hpp"