aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTracy Narine <tracy.narine@arm.com>2024-01-26 09:13:19 +0000
committerTracy Narine <tracy.narine@arm.com>2024-01-30 12:40:20 +0000
commite7d278593b2858a451d246e1f9ba47720af1a532 (patch)
tree6307202a4807c4c5b130b7fc76993edb8fa4638b
parentfbfa49eeb14c6cb94d47e3c770b0c168e818cf79 (diff)
downloadarmnn-e7d278593b2858a451d246e1f9ba47720af1a532.tar.gz
IVGCVSW-7550 GpuFsa Op: Add ElementWiseBinary Operator ADD
* Adding support for Gpu Add operator * Added tests for layer support, end to end and optimization Signed-off-by: Tracy Narine <tracy.narine@arm.com> Change-Id: Ie9328d269c5c0ff60a7e10133b728ac9265033af
-rw-r--r--src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp67
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp11
-rw-r--r--src/backends/gpuFsa/GpuFsaBackend.cpp13
-rw-r--r--src/backends/gpuFsa/GpuFsaLayerSupport.cpp35
-rw-r--r--src/backends/gpuFsa/layers/CMakeLists.txt2
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp1
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp86
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp26
-rw-r--r--src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp13
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp22
-rw-r--r--src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp50
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp11
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp8
13 files changed, 333 insertions, 12 deletions
diff --git a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
index dfc7bfc18e..5138e496e8 100644
--- a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2023 Arm Ltd and contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -110,6 +110,71 @@ void ElementwiseBinarySimpleEndToEnd(const std::vector<BackendId>& backends,
EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
+
+template<armnn::DataType ArmnnInType,
+ typename TInput = armnn::ResolveType<ArmnnInType>>
+void ElementwiseBinarySimple3DEndToEnd(const std::vector<BackendId>& backends,
+ BinaryOperation operation)
+{
+ using namespace armnn;
+
+ const float qScale = IsQuantizedType<TInput>() ? 0.25f : 1.0f;
+ const int32_t qOffset = IsQuantizedType<TInput>() ? 50 : 0;
+
+ const TensorShape& input1Shape = { 2, 2 };
+ const TensorShape& input2Shape = { 2, 2 };
+ const TensorShape& outputShape = { 2, 2 };
+
+ // Builds up the structure of the network
+ INetworkPtr net = CreateElementwiseBinaryNetwork<ArmnnInType>(input1Shape, input2Shape, outputShape,
+ operation, qScale, qOffset);
+
+ CHECK(net);
+
+ const std::vector<float> input1({ 1, -1, 1, 1 });
+
+ const std::vector<float> input2({ 2, 2, 2, 2 });
+ std::vector<float> expectedOutput;
+ switch (operation) {
+ case armnn::BinaryOperation::Add:
+ expectedOutput = { 3, 1, 3, 3 };
+ break;
+ case armnn::BinaryOperation::Div:
+ expectedOutput = {0.5f, -0.5f, 0.5f, 0.5f };
+ break;
+ case armnn::BinaryOperation::Maximum:
+ expectedOutput = { 2, 2, 2, 2 };
+ break;
+ case armnn::BinaryOperation::Minimum:
+ expectedOutput = { 1, -1, 1, 1 };
+ break;
+ case armnn::BinaryOperation::Mul:
+ expectedOutput = { 2, -2, 2, 2 };
+ break;
+ case armnn::BinaryOperation::Sub:
+ expectedOutput = { -1, -3, -1, -1 };
+ break;
+ case armnn::BinaryOperation::SqDiff:
+ expectedOutput = { 1, 9, 1, 1 };
+ break;
+ case armnn::BinaryOperation::Power:
+ expectedOutput = { 1, 1, 1, 1 };
+ break;
+ default:
+ throw("Invalid Elementwise Binary operation");
+ }
+ const std::vector<float> expectedOutput_const = expectedOutput;
+ // quantize data
+ std::vector<TInput> qInput1Data = armnnUtils::QuantizedVector<TInput>(input1, qScale, qOffset);
+ std::vector<TInput> qInput2Data = armnnUtils::QuantizedVector<TInput>(input2, qScale, qOffset);
+ std::vector<TInput> qExpectedOutput = armnnUtils::QuantizedVector<TInput>(expectedOutput_const, qScale, qOffset);
+
+ std::map<int, std::vector<TInput>> inputTensorData = {{ 0, qInput1Data }, { 1, qInput2Data }};
+ std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
+
+ EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
+}
+
template<armnn::DataType ArmnnInType,
typename TInput = armnn::ResolveType<ArmnnInType>>
void ElementwiseBinarySimpleNoReshapeEndToEnd(const std::vector<BackendId>& backends,
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index b83bb1b0ad..8b2d6a514d 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -64,6 +64,15 @@ TEST_CASE("ClAdditionEndToEndUint8Test")
AdditionEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
}
+TEST_CASE("ClAdditionEndToEndFloat32Simple3DTest")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(clDefaultBackends, BinaryOperation::Add);
+}
+TEST_CASE("ClAdditionEndToEndFloat16Simple3DTest")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(clDefaultBackends, BinaryOperation::Add);
+}
+
// Power
TEST_CASE("ClPowerEndToEndTestFloat32")
{
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index e80369965b..7951b17d9d 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -22,6 +22,7 @@
#include "layers/GpuFsaConvolution2d.hpp"
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
+#include "layers/GpuFsaElementwiseBinaryAdd.hpp"
namespace armnn
{
@@ -294,6 +295,18 @@ OptimizationViews GpuFsaBackend::OptimizeSubgraphView(const SubgraphView& subgra
}
break;
}
+ case LayerType::ElementwiseBinary:
+ {
+ auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&base.GetParameters());
+ if (desc->m_Operation == BinaryOperation::Add)
+ {
+ auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+ auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
+
+ GpuFsaElementwiseBinaryAddCreateOp(preCompiledBlobPtr, input0, input1);
+ }
+ break;
+ }
default:
// unsupported layer for GpuFsa backend
continue;
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index 18c9ac8f5b..e98275c166 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -12,6 +12,7 @@
#if defined(ARMCOMPUTEGPUFSA_ENABLED)
#include "layers/GpuFsaConvolution2d.hpp"
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
+#include "layers/GpuFsaElementwiseBinaryAdd.hpp"
#endif
#include <vector>
@@ -79,13 +80,13 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
"TensorInfos should be of format: {input, output, weights, biases}.");
}
- auto desc = *(PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor));
+ auto desc = PolymorphicDowncast<const Convolution2dDescriptor*>(&descriptor);
if (infos[3] == TensorInfo())
{
FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate,
reasonIfUnsupported,
infos[0],
- desc,
+ *desc,
infos[2],
EmptyOptional());
}
@@ -94,7 +95,7 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
FORWARD_LAYER_VALIDATE_FUNC(GpuFsaConvolution2dValidate,
reasonIfUnsupported,
infos[0],
- desc,
+ *desc,
infos[2],
infos[3]);
}
@@ -107,13 +108,13 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
"TensorInfos should be of format: {input, output, weights, biases}.");
}
- auto desc = *(PolymorphicDowncast<const DepthwiseConvolution2dDescriptor*>(&descriptor));
+ auto desc = PolymorphicDowncast<const DepthwiseConvolution2dDescriptor *>(&descriptor);
if (infos[3] == TensorInfo())
{
FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
reasonIfUnsupported,
infos[0],
- desc,
+ *desc,
infos[2],
EmptyOptional());
}
@@ -122,11 +123,33 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
FORWARD_LAYER_VALIDATE_FUNC(GpuFsaDepthwiseConvolution2dValidate,
reasonIfUnsupported,
infos[0],
- desc,
+ *desc,
infos[2],
infos[3]);
}
}
+ case LayerType::ElementwiseBinary:
+ {
+ if (infos.size() != 3)
+ {
+ throw InvalidArgumentException("Invalid number of ElementwiseBinary TensorInfos. "
+ "TensorInfos should be of format: {input0, input1, output0}.");
+ }
+
+ auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&descriptor);
+ if (desc->m_Operation == BinaryOperation::Add)
+ {
+ FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinaryAddValidate,
+ reasonIfUnsupported,
+ infos[0],
+ infos[1]);
+ }
+ else
+ {
+ throw InvalidArgumentException("Invalid ElementwiseBinary BinaryOperation operation.");
+ }
+ return false;
+ }
case LayerType::Constant:
case LayerType::Input:
case LayerType::Output:
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index c174c51640..bba795eedb 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -8,6 +8,8 @@ list(APPEND armnnGpuFsaBackendLayerValidators_sources
GpuFsaConvolution2d.hpp
GpuFsaDepthwiseConvolution2d.cpp
GpuFsaDepthwiseConvolution2d.hpp
+ GpuFsaElementwiseBinaryAdd.cpp
+ GpuFsaElementwiseBinaryAdd.hpp
)
add_library(armnnGpuFsaBackendLayerValidators OBJECT ${armnnGpuFsaBackendLayerValidators_sources})
diff --git a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
index 01a36f2a8b..dd55d4d64a 100644
--- a/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
+++ b/src/backends/gpuFsa/layers/GpuFsaDepthwiseConvolution2d.cpp
@@ -18,7 +18,6 @@
#include <arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h>
#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
-#include <src/dynamic_fusion/sketch/gpu/GpuWorkloadContextImpl.h>
#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h>
#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
new file mode 100644
index 0000000000..bc8b37067c
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
@@ -0,0 +1,86 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaElementwiseBinaryAdd.hpp"
+
+#include <armnn/Types.hpp>
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/core/CL/CLKernelLibrary.h>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
+ const TensorInfo& input1)
+{
+ using namespace armcomputetensorutils;
+
+ // Create a new workload sketch, for validation purposes
+ auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
+ auto workloadContext = GpuWorkloadContext(&compileCtx);
+ GpuWorkloadSketch sketch{ &workloadContext };
+
+ arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+ arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+ aclInput0Info.set_are_values_constant(input0.IsConstant());
+ aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+ arm_compute::ITensorInfo* inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
+ arm_compute::ITensorInfo* inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
+
+ return GpuAdd::validate_op(sketch, inputInfo0, inputInfo1);
+}
+
+void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input0,
+ const TensorInfo& input1)
+{
+ using namespace armcomputetensorutils;
+
+ GpuWorkloadSketch* sketch = blob->sketch.get();
+ GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+ std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
+ std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+ arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+ arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+ aclInput0Info.set_are_values_constant(input0.IsConstant());
+ aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
+
+ // Validate operator, check status and update reasonIfUnsupported
+ arm_compute::Status aclStatus = GpuAdd::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+ const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
+ if (!supported)
+ {
+ throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary add validation");
+ }
+
+ arm_compute::ITensorInfo* addOutputInfo =
+ GpuAdd::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+
+ // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
+ outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+ GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
+
+ // Store the TensorInfos within the blob as unique_ptrs to be used later
+ blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+ blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
new file mode 100644
index 0000000000..8221f0e679
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
@@ -0,0 +1,26 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/Tensor.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+
+ using namespace arm_compute::experimental::dynamic_fusion;
+
+ arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
+ const TensorInfo& input1);
+
+ void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input0,
+ const TensorInfo& input1);
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index 79dd9d357d..9972b24637 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -6,7 +6,9 @@
#include "backendsCommon/test/EndToEndTestImpl.hpp"
#include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
+
#include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
+#include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
#include <doctest/doctest.h>
@@ -32,4 +34,15 @@ TEST_CASE("GpuFsaDepthwiseConvolution2dEndtoEndTestFloat32")
armnn::DataLayout::NHWC);
}
+// ElementwiseBinary Add
+TEST_CASE("GpuFsaElementwiseBinaryAddTestFloat32")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, BinaryOperation::Add);
+}
+
+TEST_CASE("GpuFsaElementwiseBinaryAddTestFloat16")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Add);
+}
+
}
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index f162df0b55..49ddadea59 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -61,4 +61,24 @@ TEST_CASE("IsLayerSupportedGpuFsaConv2dUnsupported")
REQUIRE(reasonIfNotSupported.find("NCHW not supported by this kernel") != std::string::npos);
}
+TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinaryAdd")
+{
+ TensorInfo input0Info({ 2, 2 }, DataType::Float32);
+ TensorInfo input1Info({ 2, 2 }, DataType::Float32);
+ TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
+
+ ElementwiseBinaryDescriptor desc;
+ desc.m_Operation = BinaryOperation::Add;
+
+ GpuFsaLayerSupport supportChecker;
+ std::string reasonIfNotSupported;
+ auto supported = supportChecker.IsLayerSupported(LayerType::ElementwiseBinary,
+ {input0Info, input1Info, outputInfo},
+ desc,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfNotSupported);
+ CHECK(supported);
+}
+
} \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index 7e094cec1e..673a52a5a0 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -134,4 +134,52 @@ TEST_CASE("TwoConv2dSupportedOptimizedNetwork")
&IsLayerOfType<OutputLayer>));
}
+TEST_CASE("ElementwiseBinaryAddSupportedOptimizedNetwork")
+{
+ using namespace armnn;
+
+ const float qScale = 1.0f;
+ const int32_t qOffset = 0;
+
+ const TensorShape& input1Shape = { 2, 2, 2 };
+ const TensorShape& input2Shape = { 2, 2, 2 };
+ const TensorShape& outputShape = { 2, 2, 2 };
+
+ TensorInfo input1TensorInfo(input1Shape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo input2TensorInfo(input2Shape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
+
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+ INetworkPtr network(INetwork::Create());
+
+ IConnectableLayer* input1 = network->AddInputLayer(0, "input0");
+ IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
+
+ ElementwiseBinaryDescriptor desc;
+ desc.m_Operation = BinaryOperation::Add;
+
+ IConnectableLayer* elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(desc, "elementwiseBinary");
+ IConnectableLayer* output = network->AddOutputLayer(2, "output");
+
+ Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
+ Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
+ Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
+
+ std::vector<BackendId> backends = { "GpuFsa" };
+
+ OptimizerOptionsOpaque optimizedOptions;
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<PreCompiledLayer>,
+ &IsLayerOfType<OutputLayer>));
+}
+
} \ No newline at end of file
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 9baded6671..58c55e5d72 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -151,6 +151,15 @@ TEST_CASE("NeonAdditionEndToEndUint8Test")
AdditionEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
}
+TEST_CASE("NeonAdditionEndToEndFloat32Simple3DTest")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, BinaryOperation::Add);
+}
+TEST_CASE("NeonAdditionEndToEndFloat16Simple3DTest")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, BinaryOperation::Add);
+}
+
// Power
TEST_CASE("NeonPowerEndToEndTestFloat32")
{
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 199fbdfafd..185deb685d 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -1787,6 +1787,14 @@ TEST_CASE("RefAddEndToEndTestUint8")
{
ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Add);
}
+TEST_CASE("RefAddEndToEndTestFloat32Simple3D")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Add);
+}
+TEST_CASE("RefAddEndToEndTestFloat16Simple3D")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(defaultBackends, BinaryOperation::Add);
+}
TEST_CASE("RefDivEndToEndTestFloat32")
{
ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Div);