aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/common.mk2
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt4
-rw-r--r--src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp6
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp67
-rw-r--r--src/backends/backendsCommon/test/layerTests/PowerTestImpl.cpp539
-rw-r--r--src/backends/backendsCommon/test/layerTests/PowerTestImpl.hpp88
-rw-r--r--src/backends/backendsCommon/test/layerTests/SquaredDifferenceTestImpl.cpp539
-rw-r--r--src/backends/backendsCommon/test/layerTests/SquaredDifferenceTestImpl.hpp88
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp16
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp44
-rw-r--r--src/backends/reference/workloads/ElementwiseFunction.cpp6
-rw-r--r--src/backends/reference/workloads/Power.hpp27
-rw-r--r--src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp14
-rw-r--r--src/backends/reference/workloads/SquaredDifference.hpp28
15 files changed, 1468 insertions, 4 deletions
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index 986d2530c2..c868cbe87a 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -84,6 +84,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/PadTestImpl.cpp \
test/layerTests/Pooling2dTestImpl.cpp \
test/layerTests/Pooling3dTestImpl.cpp \
+ test/layerTests/PowerTestImpl.cpp \
test/layerTests/RankTestImpl.cpp \
test/layerTests/ReductionTestImpl.cpp \
test/layerTests/ReduceProdTestImpl.cpp \
@@ -92,6 +93,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/ResizeTestImpl.cpp \
test/layerTests/RsqrtTestImpl.cpp \
test/layerTests/SliceTestImpl.cpp \
+ test/layerTests/SquaredDifferenceTestImpl.cpp \
test/layerTests/QuantizeTestImpl.cpp \
test/layerTests/SinTestImpl.cpp \
test/layerTests/ShapeTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 95065dffe4..aba9c72e6c 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -153,6 +153,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/Pooling2dTestImpl.hpp
layerTests/Pooling3dTestImpl.cpp
layerTests/Pooling3dTestImpl.hpp
+ layerTests/PowerTestImpl.cpp
+ layerTests/PowerTestImpl.hpp
layerTests/PreluTestImpl.hpp
layerTests/QuantizeTestImpl.cpp
layerTests/QuantizeTestImpl.hpp
@@ -186,6 +188,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/SplitterTestImpl.hpp
layerTests/SqrtTestImpl.cpp
layerTests/SqrtTestImpl.hpp
+ layerTests/SquaredDifferenceTestImpl.cpp
+ layerTests/SquaredDifferenceTestImpl.hpp
layerTests/StackTestImpl.cpp
layerTests/StackTestImpl.hpp
layerTests/StridedSliceTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
index 6546a6ae9e..0d47fd6056 100644
--- a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
@@ -89,6 +89,12 @@ void ElementwiseBinarySimpleEndToEnd(const std::vector<BackendId>& backends,
case armnn::BinaryOperation::Sub:
expectedOutput = { -1, -3, -1, -1, 3, -7, 3, 3, -5, 1, 1, 1, 2, 2, -6, 2 };
break;
+ case armnn::BinaryOperation::SqDiff:
+ expectedOutput = { 1, 9, 1, 1, 9, 49, 9, 9, 25, 1, 1, 1, 4, 4, 36, 4 };
+ break;
+ case armnn::BinaryOperation::Power:
+ expectedOutput = { 1, 1, 1, 1, 25, 25, 25, 25, 9, 9, 9, 9, 16, 16, 16, 16 };
+ break;
default:
throw("Invalid Elementwise Binary operation");
}
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 00bfea5452..b6ddb31419 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -51,6 +51,7 @@
#include <backendsCommon/test/layerTests/PermuteTestImpl.hpp>
#include <backendsCommon/test/layerTests/Pooling2dTestImpl.hpp>
#include <backendsCommon/test/layerTests/Pooling3dTestImpl.hpp>
+#include <backendsCommon/test/layerTests/PowerTestImpl.hpp>
#include <backendsCommon/test/layerTests/PreluTestImpl.hpp>
#include <backendsCommon/test/layerTests/QuantizeTestImpl.hpp>
#include <backendsCommon/test/layerTests/RankTestImpl.hpp>
@@ -68,6 +69,7 @@
#include <backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp>
#include <backendsCommon/test/layerTests/SplitterTestImpl.hpp>
#include <backendsCommon/test/layerTests/SqrtTestImpl.hpp>
+#include <backendsCommon/test/layerTests/SquaredDifferenceTestImpl.hpp>
#include <backendsCommon/test/layerTests/StackTestImpl.hpp>
#include <backendsCommon/test/layerTests/StridedSliceTestImpl.hpp>
#include <backendsCommon/test/layerTests/SubtractionTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
index da6e11fe3e..ded53cbb76 100644
--- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -206,4 +206,67 @@ LayerTestResult<T, NumDims> ElementwiseTestHelper(
tensorHandleFactory,
quantScale,
quantOffset);
-} \ No newline at end of file
+}
+
+// Elementwise Binary Operations
+template<std::size_t NumDims,
+ armnn::DataType ArmnnTypeInput,
+ armnn::DataType ArmnnTypeOutput,
+ typename TInput = armnn::ResolveType<ArmnnTypeInput>,
+ typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
+LayerTestResult<TOutput, NumDims> ElementwiseTestHelper(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::BinaryOperation op,
+ const unsigned int shape0[NumDims],
+ std::vector<TInput> values0,
+ const unsigned int shape1[NumDims],
+ std::vector<TInput> values1,
+ const unsigned int outShape[NumDims],
+ std::vector<TOutput> outValues,
+ const armnn::ITensorHandleFactory& tensorHandleFactory) {
+
+ armnn::TensorInfo inputTensorInfo0{NumDims, shape0, ArmnnTypeInput};
+ armnn::TensorInfo inputTensorInfo1{NumDims, shape1, ArmnnTypeInput};
+ armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnTypeOutput};
+
+ std::vector<TOutput> actualOutput(outputTensorInfo.GetNumElements());
+
+ bool isBoolean = false;
+ if (ArmnnTypeOutput == armnn::DataType::Boolean)
+ {
+ isBoolean = true;
+ }
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
+ std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ElementwiseBinaryQueueDescriptor data;
+ data.m_Parameters.m_Operation = op;
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
+ AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ auto workload = workloadFactory.CreateWorkload(armnn::LayerType::ElementwiseBinary, data, info);
+
+ inputHandle0->Allocate();
+ inputHandle1->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle0.get(), values0.data());
+ CopyDataToITensorHandle(inputHandle1.get(), values1.data());
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
+
+ return LayerTestResult<TOutput, NumDims>(actualOutput,
+ outValues,
+ outputHandle->GetShape(),
+ outputTensorInfo.GetShape(),
+ isBoolean);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/PowerTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PowerTestImpl.cpp
new file mode 100644
index 0000000000..dd6d569270
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/PowerTestImpl.cpp
@@ -0,0 +1,539 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PowerTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+LayerTestResult<float, 4> PowerTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<float> input0 =
+ {
+ 7.f, 3.f, 4.f, 2.f, 6.f, 4.f, 2.f, 1.f,
+ 1.f, 1.f, 0.f, 2.f, 9.f, 3.f, 5.f, 3.f
+ };
+
+ std::vector<float> input1 =
+ {
+ 2.f, 3.f, 2.f, 1.f, 2.f, 3.f, 4.f, 3.f,
+ 4.f, 5.f, 3.f, 5.f, 2.f, 3.f, 2.f, 0.f
+ };
+
+ std::vector<float> output
+ {
+ 49.f, 27.f, 16.f, 2.f, 36.f, 64.f, 16.f, 1.f,
+ 1.f, 1.f, 0.f, 32.f, 81.f, 27.f, 25.f, 1.f
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> PowerBroadcast1ElementTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<float> input0 =
+ {
+ 1.f, 2.f, 3.f, 4.f, 5.f, 0.f, 2.f, 1.f
+ };
+
+ std::vector<float> input1 = { 2.f };
+
+ std::vector<float> output =
+ {
+ 1.f, 4.f, 9.f, 16.f, 25.f, 0.f, 4.f, 1.f
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> PowerBroadcastTest(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<float> input0 =
+ {
+ 1.f, 2.f, 3.f, 3.f, 4.f, 4.f,
+ 4.f, 0.f, 2.f, 3.f, 4.f, 4.f
+ };
+
+ std::vector<float> input1 = { 1.f, 3.f, 1.f };
+
+ std::vector<float> output =
+ {
+ 1.f, 8.f, 3.f, 3.f, 64.f, 4.f,
+ 4.f, 0.f, 2.f, 3.f, 64.f, 4.f
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<armnn::Half, 4> PowerFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ using namespace half_float::literal;
+
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<armnn::Half> input0 =
+ {
+ 1._h, 5._h, 1._h, 4._h, 6._h, 1._h, 3._h, 5._h,
+ 3._h, 7._h, 6._h, 3._h, 8._h, 4._h, 4._h, 2._h
+ };
+
+ std::vector<armnn::Half> input1 =
+ {
+ 2._h, 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 2._h,
+ 1._h, 2._h, 2._h, 4._h, 2._h, 1._h, 3._h, 5._h
+ };
+
+ std::vector<armnn::Half> output
+ {
+ 1._h, 25._h, 1._h, 16._h, 36._h, 1._h, 27._h, 25._h,
+ 3._h, 49._h, 36._h, 81._h, 64._h, 4._h, 64._h, 32._h
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float16, armnn::DataType::Float16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<armnn::Half, 4> PowerBroadcast1ElementFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ using namespace half_float::literal;
+
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<armnn::Half> input0 =
+ {
+ 1._h, 2._h, 3._h, 4._h, 5._h, 4._h,
+ 1._h, 5._h, 4._h, 2._h, 0._h, 1._h
+ };
+
+ std::vector<armnn::Half> input1 = { 2._h };
+
+ std::vector<armnn::Half> output =
+ {
+ 1._h, 4._h, 9._h, 16._h, 25._h, 16._h,
+ 1._h, 25._h, 16._h, 4._h, 0._h, 1._h
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float16, armnn::DataType::Float16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<armnn::Half, 4> PowerBroadcastFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ using namespace half_float::literal;
+
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<armnn::Half> input0 =
+ {
+ 4._h, 2._h, 3._h, 4._h, 1._h, 0._h,
+ 8._h, 1._h, 1._h, 1._h, 2._h, 4._h
+ };
+
+ std::vector<armnn::Half> input1 = { 1._h, 5._h, 3._h };
+
+ std::vector<armnn::Half> output =
+ {
+ 4._h, 32._h, 27._h, 4._h, 1._h, 0._h,
+ 8._h, 1._h, 1._h, 1._h, 32._h, 64._h
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float16, armnn::DataType::Float16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 4> PowerUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape[] = { 1, 1, 2, 2 };
+
+ std::vector<uint8_t> input0 = { 4, 2, 4, 3 };
+
+ std::vector<uint8_t> input1 = { 1, 2, 2, 2 };
+
+ std::vector<uint8_t> output = { 4, 4, 16, 9 };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 4> PowerBroadcast1ElementUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 1, 2, 2 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<uint8_t> input0 = { 4, 5, 1, 0 };
+
+ std::vector<uint8_t> input1 = { 2 };
+
+ std::vector<uint8_t> output = { 16, 25, 1, 0 };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 4> PowerBroadcastUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 1, 2, 2 };
+ const unsigned int shape1[] = { 1, 1, 1, 2 };
+
+ std::vector<uint8_t> input0 = { 4, 1, 6, 2 };
+
+ std::vector<uint8_t> input1 = { 2, 6 };
+
+ std::vector<uint8_t> output = { 16, 1, 36, 64 };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 4> PowerInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<int16_t> input0 =
+ {
+ 1, 5, 1, 4, 4, 9, 3, 7,
+ 3, 2, 9, 6, 1, 2, 1, 4
+ };
+
+ std::vector<int16_t> input1 =
+ {
+ 2, 2, 0, 3, 2, 1, 3, 2,
+ 4, 4, 2, 1, 7, 5, 4, 2
+ };
+
+ std::vector<int16_t> output
+ {
+ 1, 25, 0, 64, 16, 9, 27, 49,
+ 81, 16, 81, 6, 1, 32, 1, 16
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QSymmS16, armnn::DataType::QSymmS16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 4> PowerBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<int16_t> input0 =
+ {
+ 1, 2, 3, 4, 5, 0,
+ 5, 4, 1, 4, 5, 2
+ };
+
+ std::vector<int16_t> input1 = { 2 };
+
+ std::vector<int16_t> output =
+ {
+ 1, 4, 9, 16, 25, 0,
+ 25, 16, 1, 16, 25, 4
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QSymmS16, armnn::DataType::QSymmS16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 4> PowerBroadcastInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<int16_t> input0 =
+ {
+ 4, 2, 1, 4, 5, 3,
+ 7, 3, 4, 8, 1, 2
+ };
+
+ std::vector<int16_t> input1 = { 1, 2, 3 };
+
+ std::vector<int16_t> output =
+ {
+ 4, 4, 1, 4, 25, 27,
+ 7, 9, 64, 8, 1, 8
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QSymmS16, armnn::DataType::QSymmS16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int32_t, 4> PowerInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<int32_t> input0 =
+ {
+ 1, 3, 4, 3, 1, 4, 2, 1,
+ 2, 1, 2, 1, 4, 3, 4, 3
+ };
+
+ std::vector<int32_t> input1 =
+ {
+ 2, 2, 2, 2, 3, 3, 4, 3,
+ 4, 4, 4, 4, 1, 3, 1, 3
+ };
+
+ std::vector<int32_t> output
+ {
+ 1, 9, 16, 9, 1, 64, 16, 1,
+ 16, 1, 16, 1, 4, 27, 4, 27
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Signed32, armnn::DataType::Signed32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int32_t, 4> PowerBroadcastInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<int32_t> input0 =
+ {
+ 4, 4, 3, 4, 5, 0,
+ 5, 8, 1, 3, 9, 2
+ };
+
+ std::vector<int32_t> input1 = { 2, 1, 3 };
+
+ std::vector<int32_t> output =
+ {
+ 16, 4, 27, 16, 5, 0,
+ 25, 8, 1, 9, 9, 8
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Signed32, armnn::DataType::Signed32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int32_t, 4> PowerBroadcast1ElementInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<int32_t> input0 =
+ {
+ 1, 2, 3, 4, 5, 3,
+ 3, 1, 0, 2, 1, 5
+ };
+
+ std::vector<int32_t> input1 = { 2 };
+
+ std::vector<int32_t> output =
+ {
+ 1, 4, 9, 16, 25, 9,
+ 9, 1, 0, 4, 1, 25
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Signed32, armnn::DataType::Signed32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::Power,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/PowerTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PowerTestImpl.hpp
new file mode 100644
index 0000000000..3707208658
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/PowerTestImpl.hpp
@@ -0,0 +1,88 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnnTestUtils/LayerTestResult.hpp>
+
+#include <Half.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> PowerTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> PowerBroadcast1ElementTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> PowerBroadcastTest(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 4> PowerFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 4> PowerBroadcast1ElementFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 4> PowerBroadcastFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> PowerBroadcast1ElementUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> PowerUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> PowerBroadcastUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t , 4> PowerInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> PowerBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> PowerBroadcastInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int32_t, 4> PowerInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int32_t, 4> PowerBroadcastInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int32_t, 4> PowerBroadcast1ElementInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/SquaredDifferenceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SquaredDifferenceTestImpl.cpp
new file mode 100644
index 0000000000..8bb31ed16e
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/SquaredDifferenceTestImpl.cpp
@@ -0,0 +1,539 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SquaredDifferenceTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+LayerTestResult<float, 4> SquaredDifferenceTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<float> input0 =
+ {
+ 7.f, 3.f, 4.f, 2.f, 6.f, 4.f, 2.f, 1.f,
+ 3.f, 1.f, 0.f, 1.f, 4.f, 3.f, 4.f, 3.f
+ };
+
+ std::vector<float> input1 =
+ {
+ 5.f, 3.f, 2.f, 5.f, 3.f, 3.f, 4.f, 3.f,
+ 4.f, 4.f, 3.f, 2.f, 5.f, 5.f, 5.f, 5.f
+ };
+
+ std::vector<float> output
+ {
+ 4.f, 0.f, 4.f, 9.f, 9.f, 1.f, 4.f, 4.f,
+ 1.f, 9.f, 9.f, 1.f, 1.f, 4.f, 1.f, 4.f
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> SquaredDiffBroadcast1ElementTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<float> input0 =
+ {
+ 1.f, 2.f, 3.f, 4.f, 5.f, 0.f, 2.f, 1.f
+ };
+
+ std::vector<float> input1 = { 2.f };
+
+ std::vector<float> output =
+ {
+ 1.f, 0.f, 1.f, 4.f, 9.f, 4.f, 0.f, 1.f
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<float, 4> SquaredDiffBroadcastTest(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<float> input0 =
+ {
+ 1.f, 2.f, 3.f, 3.f, 6.f, 4.f,
+ 4.f, 0.f, 2.f, 3.f, 4.f, 4.f
+ };
+
+ std::vector<float> input1 = { 1.f, 3.f, 1.f };
+
+ std::vector<float> output =
+ {
+ 0.f, 1.f, 4.f, 4.f, 9.f, 9.f,
+ 9.f, 9.f, 1.f, 4.f, 1.f, 9.f
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<armnn::Half, 4> SquaredDifferenceFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ using namespace half_float::literal;
+
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<armnn::Half> input0 =
+ {
+ 1._h, 5._h, 1._h, 4._h, 6._h, 1._h, 3._h, 5._h,
+ 3._h, 7._h, 6._h, 3._h, 8._h, 4._h, 4._h, 2._h
+ };
+
+ std::vector<armnn::Half> input1 =
+ {
+ 2._h, 2._h, 2._h, 2._h, 3._h, 3._h, 3._h, 3._h,
+ 4._h, 4._h, 4._h, 4._h, 5._h, 6._h, 5._h, 5._h
+ };
+
+ std::vector<armnn::Half> output
+ {
+ 1._h, 9._h, 1._h, 4._h, 9._h, 4._h, 0._h, 4._h,
+ 1._h, 9._h, 4._h, 1._h, 9._h, 4._h, 1._h, 9._h
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float16, armnn::DataType::Float16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<armnn::Half, 4> SquaredDiffBroadcast1ElementFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ using namespace half_float::literal;
+
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<armnn::Half> input0 =
+ {
+ 1._h, 2._h, 3._h, 4._h, 5._h, 4._h,
+ 1._h, 5._h, 4._h, 2._h, 0._h, 1._h
+ };
+
+ std::vector<armnn::Half> input1 = { 2._h };
+
+ std::vector<armnn::Half> output =
+ {
+ 1._h, 0._h, 1._h, 4._h, 9._h, 4._h,
+ 1._h, 9._h, 4._h, 0._h, 4._h, 1._h
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float16, armnn::DataType::Float16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<armnn::Half, 4> SquaredDiffBroadcastFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ using namespace half_float::literal;
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<armnn::Half> input0 =
+ {
+ 4._h, 2._h, 3._h, 4._h, 5._h, 5._h,
+ 2._h, 8._h, 1._h, 1._h, 2._h, 4._h
+ };
+
+ std::vector<armnn::Half> input1 = { 1._h, 5._h, 3._h };
+
+ std::vector<armnn::Half> output =
+ {
+ 9._h, 9._h, 0._h, 9._h, 0._h, 4._h,
+ 1._h, 9._h, 4._h, 0._h, 9._h, 1._h
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Float16, armnn::DataType::Float16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 4> SquaredDifferenceUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 1, 2, 2 };
+ const unsigned int shape1[] = { 1, 1, 2, 2 };
+
+ std::vector<uint8_t> input0 = { 4, 2, 4, 3 };
+
+ std::vector<uint8_t> input1 = { 1, 2, 2, 2 };
+
+ std::vector<uint8_t> output = { 9, 0, 4, 1 };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 4> SquaredDiffBroadcast1ElementUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 1, 2, 2 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<uint8_t> input0 = { 4, 5, 1, 0 };
+
+ std::vector<uint8_t> input1 = { 2 };
+
+ std::vector<uint8_t> output = { 4, 9, 1, 4 };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<uint8_t, 4> SquaredDiffBroadcastUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 1, 2, 2 };
+ const unsigned int shape1[] = { 1, 1, 1, 2 };
+
+ std::vector<uint8_t> input0 = { 4, 12, 3, 6 };
+
+ std::vector<uint8_t> input1 = { 2, 9 };
+
+ std::vector<uint8_t> output = { 4, 9, 1, 9 };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 4> SquaredDifferenceInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<int16_t> input0 =
+ {
+ 1, 5, 1, 4, 6, 9, 6, 5,
+ 3, 2, 3, 6, 4, 4, 1, 4
+ };
+
+ std::vector<int16_t> input1 =
+ {
+ 2, 2, 0, 4, 3, 7, 3, 3,
+ 4, 4, 4, 9, 7, 5, 4, 5
+ };
+
+ std::vector<int16_t> output
+ {
+ 1, 9, 1, 0, 9, 4, 9, 4,
+ 1, 4, 1, 9, 9, 1, 9, 1
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QSymmS16, armnn::DataType::QSymmS16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 4> SquaredDiffBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<int16_t> input0 =
+ {
+ 1, 2, 3, 4, 5, 0,
+ 5, 4, 1, 4, 5, 2
+ };
+
+ std::vector<int16_t> input1 = { 2 };
+
+ std::vector<int16_t> output =
+ {
+ 1, 0, 1, 4, 9, 4,
+ 9, 4, 1, 4, 9, 0
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QSymmS16, armnn::DataType::QSymmS16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int16_t, 4> SquaredDiffBroadcastInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<int16_t> input0 =
+ {
+ 4, 2, 1, 4, 5, 6,
+ 7, 3, 5, 8, 1, 5
+ };
+
+ std::vector<int16_t> input1 = { 7, 2, 3 };
+
+ std::vector<int16_t> output =
+ {
+ 9, 0, 4, 9, 9, 9,
+ 0, 1, 4, 1, 1, 4
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::QSymmS16, armnn::DataType::QSymmS16>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int32_t, 4> SquaredDifferenceInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ std::vector<int32_t> input0 =
+ {
+ 1, 3, 4, 3, 6, 4, 2, 6,
+ 3, 1, 3, 1, 4, 3, 4, 3
+ };
+
+ std::vector<int32_t> input1 =
+ {
+ 2, 2, 2, 2, 3, 3, 4, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5
+ };
+
+ std::vector<int32_t> output
+ {
+ 1, 1, 4, 1, 9, 1, 4, 9,
+ 1, 9, 1, 9, 1, 4, 1, 4
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Signed32, armnn::DataType::Signed32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int32_t, 4> SquaredDiffBroadcastInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<int32_t> input0 =
+ {
+ 4, 4, 3, 4, 5, 6,
+ 5, 8, 6, 3, 9, 5
+ };
+
+ std::vector<int32_t> input1 = { 2, 7, 3 };
+
+ std::vector<int32_t> output =
+ {
+ 4, 9, 0, 4, 4, 9,
+ 9, 1, 9, 1, 4, 4
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Signed32, armnn::DataType::Signed32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
+
+LayerTestResult<int32_t, 4> SquaredDiffBroadcast1ElementInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ IgnoreUnused(memoryManager);
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<int32_t> input0 =
+ {
+ 1, 2, 3, 4, 5, 3,
+ 3, 1, 0, 2, 1, 5
+ };
+
+ std::vector<int32_t> input1 = { 2 };
+
+ std::vector<int32_t> output =
+ {
+ 1, 0, 1, 4, 9, 1,
+ 1, 1, 4, 0, 1, 9
+ };
+
+ return ElementwiseTestHelper<4, armnn::DataType::Signed32, armnn::DataType::Signed32>(
+ workloadFactory,
+ memoryManager,
+ armnn::BinaryOperation::SqDiff,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ tensorHandleFactory);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/SquaredDifferenceTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SquaredDifferenceTestImpl.hpp
new file mode 100644
index 0000000000..1d87700d05
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/SquaredDifferenceTestImpl.hpp
@@ -0,0 +1,88 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnnTestUtils/LayerTestResult.hpp>
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> SquaredDifferenceTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> SquaredDiffBroadcast1ElementTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<float, 4> SquaredDiffBroadcastTest(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 4> SquaredDifferenceFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 4> SquaredDiffBroadcast1ElementFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<armnn::Half, 4> SquaredDiffBroadcastFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> SquaredDiffBroadcast1ElementUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> SquaredDifferenceUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<uint8_t, 4> SquaredDiffBroadcastUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t , 4> SquaredDifferenceInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> SquaredDiffBroadcast1ElementInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int16_t, 4> SquaredDiffBroadcastInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int32_t, 4> SquaredDifferenceInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int32_t, 4> SquaredDiffBroadcastInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int32_t, 4> SquaredDiffBroadcast1ElementInt32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 95004c4dc2..4bb3f2947a 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -1602,6 +1602,22 @@ TEST_CASE("RefMinimumEndToEndTestUint8")
{
ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Minimum);
}
+TEST_CASE("RefPowerEndToEndTestFloat32")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Power);
+}
+TEST_CASE("RefPowerEndToEndTestUint8")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Power);
+}
+TEST_CASE("RefSquaredDifferenceEndToEndTestFloat32")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::SqDiff);
+}
+TEST_CASE("RefSquaredDifferenceEndToEndTestUint8")
+{
+ ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::SqDiff);
+}
#endif
}
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 0e228dbea9..8b89743870 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1003,6 +1003,48 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1ElementInt32, Multiplicati
ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1DVectorInt32, MultiplicationBroadcast1DVectorInt32Test)
ARMNN_AUTO_TEST_CASE_WITH_THF(Multiplication5d, Multiplication5dTest)
+// SquaredDifference
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSquaredDifference, SquaredDifferenceTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast1Element, SquaredDiffBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast, SquaredDiffBroadcastTest)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDifferenceFloat16, SquaredDifferenceFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast1ElementFloat16, SquaredDiffBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcastFloat16, SquaredDiffBroadcastFloat16Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDifferenceUint8, SquaredDifferenceUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcastUint8, SquaredDiffBroadcastUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast1ElementUint8, SquaredDiffBroadcast1ElementUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDifferenceInt16, SquaredDifferenceInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcastInt16, SquaredDiffBroadcastInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast1ElementInt16, SquaredDiffBroadcast1ElementInt16Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDifferenceInt32, SquaredDifferenceInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcastInt32, SquaredDiffBroadcastInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(SquaredDiffBroadcast1ElementInt32, SquaredDiffBroadcast1ElementInt32Test)
+
+// Power
+ARMNN_AUTO_TEST_CASE_WITH_THF(SimplePower, PowerTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcast1Element, PowerBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcast, PowerBroadcastTest)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerFloat16, PowerFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcast1ElementFloat16, PowerBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcastFloat16, PowerBroadcastFloat16Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerUint8, PowerUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcastUint8, PowerBroadcastUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcast1ElementUint8, PowerBroadcast1ElementUint8Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerInt16, PowerInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcastInt16, PowerBroadcastInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcast1ElementInt16, PowerBroadcast1ElementInt16Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerInt32, PowerInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcastInt32, PowerBroadcastInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PowerBroadcast1ElementInt32, PowerBroadcast1ElementInt32Test)
+
// Batch Mat Mul
ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleFloat32, BatchMatMul2DSimpleTest<DataType::Float32>);
ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleFloat16, BatchMatMul2DSimpleTest<DataType::Float16>);
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index c5b0ad1f24..4044f06ac4 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -14,6 +14,8 @@
#include "Rsqrt.hpp"
#include "Sin.hpp"
#include "Sqrt.hpp"
+#include "Power.hpp"
+#include "SquaredDifference.hpp"
namespace armnn
@@ -67,6 +69,8 @@ template struct armnn::ElementwiseBinaryFunction<std::multiplies<float>>;
template struct armnn::ElementwiseBinaryFunction<std::divides<float>>;
template struct armnn::ElementwiseBinaryFunction<armnn::maximum<float>>;
template struct armnn::ElementwiseBinaryFunction<armnn::minimum<float>>;
+template struct armnn::ElementwiseBinaryFunction<armnn::power<float>>;
+template struct armnn::ElementwiseBinaryFunction<armnn::squaredDifference<float>>;
template struct armnn::ElementwiseBinaryFunction<std::plus<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<std::minus<int32_t>>;
@@ -74,6 +78,8 @@ template struct armnn::ElementwiseBinaryFunction<std::multiplies<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<std::divides<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<armnn::maximum<int32_t>>;
template struct armnn::ElementwiseBinaryFunction<armnn::minimum<int32_t>>;
+template struct armnn::ElementwiseBinaryFunction<armnn::power<int32_t>>;
+template struct armnn::ElementwiseBinaryFunction<armnn::squaredDifference<int32_t>>;
// Comparison
template struct armnn::ElementwiseBinaryFunction<std::equal_to<float>>;
diff --git a/src/backends/reference/workloads/Power.hpp b/src/backends/reference/workloads/Power.hpp
new file mode 100644
index 0000000000..744328e8bf
--- /dev/null
+++ b/src/backends/reference/workloads/Power.hpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <iostream>
+
+namespace armnn
+{
+
+template<typename T>
+struct power
+{
+ typedef T result_type;
+ typedef T first_argument_type;
+
+ T
+ operator()(const T& input1, const T& input2) const
+ {
+ T power = armnn::numeric_cast<T>(std::pow(static_cast<float>(input1), static_cast<float>(input2)));
+ return power;
+ }
+};
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
index 5dc77f8496..e71cdd4e3c 100644
--- a/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseBinaryWorkload.cpp
@@ -11,6 +11,8 @@
#include "RefWorkloadUtils.hpp"
#include "Maximum.hpp"
#include "Minimum.hpp"
+#include "SquaredDifference.hpp"
+#include "Power.hpp"
#include <Profiling.hpp>
@@ -44,6 +46,8 @@ void ExecuteFunction(std::vector<ITensorHandle*> inputs,
using MinimumFunction = ElementwiseBinaryFunction<armnn::minimum<DataType>>;
using MulFunction = ElementwiseBinaryFunction<std::multiplies<DataType>>;
using SubFunction = ElementwiseBinaryFunction<std::minus<DataType>>;
+ using SqDiffFunction = ElementwiseBinaryFunction<armnn::squaredDifference<DataType>>;
+ using PowerFunction = ElementwiseBinaryFunction<armnn::power<DataType>>;
switch (operation)
{
@@ -77,6 +81,16 @@ void ExecuteFunction(std::vector<ITensorHandle*> inputs,
SubFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
break;
}
+ case BinaryOperation::SqDiff:
+ {
+ SqDiffFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+ break;
+ }
+ case BinaryOperation::Power:
+ {
+ PowerFunction(inShape0, inShape1, outShape, *input0, *input1, *output);
+ break;
+ }
default:
{
throw InvalidArgumentException(std::string("Unsupported binary operation ") +
diff --git a/src/backends/reference/workloads/SquaredDifference.hpp b/src/backends/reference/workloads/SquaredDifference.hpp
new file mode 100644
index 0000000000..c15b379a4d
--- /dev/null
+++ b/src/backends/reference/workloads/SquaredDifference.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <cmath>
+
+namespace armnn
+{
+
+template<typename T>
+struct squaredDifference
+{
+ typedef T result_type;
+ typedef T first_argument_type;
+
+ T
+ operator()(const T& input1, const T& input2) const
+ {
+ float diff = std::minus<>{}(static_cast<float>(input1),static_cast<float>(input2));
+ T squaredDiff = armnn::numeric_cast<T>(std::pow(static_cast<float>(diff), 2));
+ return squaredDiff;
+ }
+};
+
+} //namespace armnn