aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2024-02-08 16:23:25 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2024-02-08 16:26:13 +0000
commit20dda37357ac0d02550f4421de6c8bfe44304f90 (patch)
treededea8fbe56180d322bfa2720992caf1b3faa4c5
parenta4b6090eea91d4c11f4319d175b5c6fbf1d2a984 (diff)
downloadarmnn-20dda37357ac0d02550f4421de6c8bfe44304f90.tar.gz
IVGCVSW-7570 GpuFsa Op: Add ElemenWiseBinary Operators available
* Refactor to generalize * Add MUL Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I2ee273d50d3a8b114b5a41abc8ee7585b15e3308
-rw-r--r--src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp3
-rw-r--r--src/backends/gpuFsa/GpuFsaBackend.cpp20
-rw-r--r--src/backends/gpuFsa/GpuFsaLayerSupport.cpp28
-rw-r--r--src/backends/gpuFsa/layers/CMakeLists.txt6
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.cpp145
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.hpp22
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp79
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp20
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp79
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp20
-rw-r--r--src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp11
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp35
-rw-r--r--src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp63
13 files changed, 219 insertions, 312 deletions
diff --git a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
index 5138e496e8..dbc270e0c9 100644
--- a/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp
@@ -135,7 +135,8 @@ void ElementwiseBinarySimple3DEndToEnd(const std::vector<BackendId>& backends,
const std::vector<float> input2({ 2, 2, 2, 2 });
std::vector<float> expectedOutput;
- switch (operation) {
+ switch (operation)
+ {
case armnn::BinaryOperation::Add:
expectedOutput = { 3, 1, 3, 3 };
break;
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index 1bfe8dd14a..de0d01973c 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -23,8 +23,7 @@
#include "layers/GpuFsaCast.hpp"
#include "layers/GpuFsaConvolution2d.hpp"
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
-#include "layers/GpuFsaElementwiseBinaryAdd.hpp"
-#include "layers/GpuFsaElementwiseBinarySub.hpp"
+#include "layers/GpuFsaElementwiseBinary.hpp"
#include "layers/GpuFsaPooling2d.hpp"
#include "layers/GpuFsaResize.hpp"
@@ -309,20 +308,9 @@ OptimizationViews GpuFsaBackend::OptimizeSubgraphView(const SubgraphView& subgra
case LayerType::ElementwiseBinary:
{
auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor *>(&base.GetParameters());
- if (desc->m_Operation == BinaryOperation::Add)
- {
- auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
-
- GpuFsaElementwiseBinaryAddCreateOp(preCompiledBlobPtr, input0, input1);
- }
- else if (desc->m_Operation == BinaryOperation::Sub)
- {
- auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
- auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
-
- GpuFsaElementwiseBinarySubCreateOp(preCompiledBlobPtr, input0, input1);
- }
+ auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+ auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
+ GpuFsaElementwiseBinaryCreateOp(preCompiledBlobPtr, input0, input1, *desc);
break;
}
case (LayerType::Pooling2d):
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index 56af9c4d68..1ee80c964f 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -13,8 +13,7 @@
#include "layers/GpuFsaCast.hpp"
#include "layers/GpuFsaConvolution2d.hpp"
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
-#include "layers/GpuFsaElementwiseBinaryAdd.hpp"
-#include "layers/GpuFsaElementwiseBinarySub.hpp"
+#include "layers/GpuFsaElementwiseBinary.hpp"
#include "layers/GpuFsaPooling2d.hpp"
#include "layers/GpuFsaResize.hpp"
#endif
@@ -150,28 +149,15 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
if (infos.size() != 3)
{
throw InvalidArgumentException("Invalid number of ElementwiseBinary TensorInfos. "
- "TensorInfos should be of format: {input0, input1, output0}.");
+ "TensorInfos should be of format: {input0, input1, output}.");
}
auto desc = PolymorphicDowncast<const ElementwiseBinaryDescriptor*>(&descriptor);
- if (desc->m_Operation == BinaryOperation::Add)
- {
- FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinaryAddValidate,
- reasonIfUnsupported,
- infos[0],
- infos[1]);
- }
- else if (desc->m_Operation == BinaryOperation::Sub)
- {
- FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinarySubValidate,
- reasonIfUnsupported,
- infos[0],
- infos[1]);
- }
- else
- {
- throw InvalidArgumentException("Invalid ElementwiseBinary BinaryOperation operation.");
- }
+ FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinaryValidate,
+ reasonIfUnsupported,
+ infos[0],
+ infos[1],
+ *desc);
}
case LayerType::Pooling2d:
{
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index 9ea36b6f2c..3fe4bdcbc6 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -10,10 +10,8 @@ list(APPEND armnnGpuFsaBackendLayers_sources
GpuFsaConvolution2d.hpp
GpuFsaDepthwiseConvolution2d.cpp
GpuFsaDepthwiseConvolution2d.hpp
- GpuFsaElementwiseBinaryAdd.cpp
- GpuFsaElementwiseBinaryAdd.hpp
- GpuFsaElementwiseBinarySub.cpp
- GpuFsaElementwiseBinarySub.hpp
+ GpuFsaElementwiseBinary.cpp
+ GpuFsaElementwiseBinary.hpp
GpuFsaPooling2d.cpp
GpuFsaPooling2d.hpp
GpuFsaResize.cpp
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.cpp
new file mode 100644
index 0000000000..7c3760a4ef
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.cpp
@@ -0,0 +1,145 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaElementwiseBinary.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuMul.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaElementwiseBinaryValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const ElementwiseBinaryDescriptor& descriptor)
+{
+ // Create a new workload sketch, for validation purposes
+ auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
+ auto workloadContext = GpuWorkloadContext(&compileCtx);
+ GpuWorkloadSketch sketch{ &workloadContext };
+
+ arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+ arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+ aclInput0Info.set_are_values_constant(input0.IsConstant());
+ aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+ arm_compute::ITensorInfo* inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
+ arm_compute::ITensorInfo* inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
+
+ switch (descriptor.m_Operation)
+ {
+ case BinaryOperation::Add:
+ {
+ return GpuAdd::validate_op(sketch, inputInfo0, inputInfo1);
+ }
+ case BinaryOperation::Mul:
+ {
+ return GpuMul::validate_op(sketch, inputInfo0, inputInfo1);
+ }
+ case BinaryOperation::Sub:
+ {
+ return GpuSub::validate_op(sketch, inputInfo0, inputInfo1);
+ }
+ default:
+ return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
+ std::string("Elementwise Binary operation not supported in GpuFsa: ")
+ + GetBinaryOperationAsCString(descriptor.m_Operation));
+ }
+}
+
+void GpuFsaElementwiseBinaryCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const ElementwiseBinaryDescriptor& descriptor)
+{
+ GpuWorkloadSketch* sketch = blob->sketch.get();
+ GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+ std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
+ std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+ arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+ arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+ aclInput0Info.set_are_values_constant(input0.IsConstant());
+ aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
+
+ // Validate operator, check status and update reasonIfUnsupported
+ // Validate operator, check status and update reasonIfUnsupported
+ arm_compute::Status aclStatus{};
+ switch (descriptor.m_Operation)
+ {
+ case BinaryOperation::Add:
+ {
+ aclStatus = GpuAdd::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+ break;
+ }
+ case BinaryOperation::Mul:
+ {
+ aclStatus = GpuMul::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+ break;
+ }
+ case BinaryOperation::Sub:
+ {
+ aclStatus = GpuSub::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+ break;
+ }
+ default:
+ throw InvalidArgumentException(std::string("Elementwise Binary operation not supported in GpuFsa: ")
+ + GetBinaryOperationAsCString(descriptor.m_Operation));
+ }
+
+ const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
+ if (!supported)
+ {
+ throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary add validation");
+ }
+
+ arm_compute::ITensorInfo* elementwiseBinaryOutputInfo{};
+ switch (descriptor.m_Operation)
+ {
+ case BinaryOperation::Add:
+ {
+ elementwiseBinaryOutputInfo = GpuAdd::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+ break;
+ }
+ case BinaryOperation::Mul:
+ {
+ elementwiseBinaryOutputInfo = GpuMul::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+ break;
+ }
+ case BinaryOperation::Sub:
+ {
+ elementwiseBinaryOutputInfo = GpuSub::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+ break;
+ }
+ default:
+ throw InvalidArgumentException(std::string("Elementwise Binary operation not supported in GpuFsa: ")
+ + GetBinaryOperationAsCString(descriptor.m_Operation));
+ }
+
+ // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
+ outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+ GpuOutput::create_op(*sketch, elementwiseBinaryOutputInfo, outputTensorInfos[0]);
+
+ // Store the TensorInfos within the blob as unique_ptrs to be used later
+ blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+ blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.hpp
new file mode 100644
index 0000000000..11583f1dc7
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinary.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+arm_compute::Status GpuFsaElementwiseBinaryValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const ElementwiseBinaryDescriptor& descriptor);
+
+void GpuFsaElementwiseBinaryCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const ElementwiseBinaryDescriptor& descriptor);
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
deleted file mode 100644
index d6404dd67e..0000000000
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "GpuFsaElementwiseBinaryAdd.hpp"
-
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
-
-using namespace arm_compute::experimental::dynamic_fusion;
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
- const TensorInfo& input1)
-{
- // Create a new workload sketch, for validation purposes
- auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
- auto workloadContext = GpuWorkloadContext(&compileCtx);
- GpuWorkloadSketch sketch{ &workloadContext };
-
- arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
- arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
-
- aclInput0Info.set_are_values_constant(input0.IsConstant());
- aclInput1Info.set_are_values_constant(input1.IsConstant());
-
- arm_compute::ITensorInfo* inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
- arm_compute::ITensorInfo* inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
-
- return GpuAdd::validate_op(sketch, inputInfo0, inputInfo1);
-}
-
-void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
- const TensorInfo& input0,
- const TensorInfo& input1)
-{
- GpuWorkloadSketch* sketch = blob->sketch.get();
- GpuWorkloadContext* workloadContext = blob->workloadContext.get();
- std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
- std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
-
- arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
- arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
-
- aclInput0Info.set_are_values_constant(input0.IsConstant());
- aclInput1Info.set_are_values_constant(input1.IsConstant());
-
- inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
- inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
-
- // Validate operator, check status and update reasonIfUnsupported
- arm_compute::Status aclStatus = GpuAdd::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
- const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
- if (!supported)
- {
- throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary add validation");
- }
-
- arm_compute::ITensorInfo* addOutputInfo =
- GpuAdd::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
-
- // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
- outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
- GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
-
- // Store the TensorInfos within the blob as unique_ptrs to be used later
- blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
- blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
-}
-
-} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
deleted file mode 100644
index 1392d01257..0000000000
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinaryAdd.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include <armnn/Descriptors.hpp>
-
-#include <gpuFsa/GpuFsaBackend.hpp>
-
-namespace armnn
-{
-arm_compute::Status GpuFsaElementwiseBinaryAddValidate(const TensorInfo& input0,
- const TensorInfo& input1);
-
-void GpuFsaElementwiseBinaryAddCreateOp(GpuFsaPreCompiledBlob* blob,
- const TensorInfo& input0,
- const TensorInfo& input1);
-
-} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
deleted file mode 100644
index 5e0f478686..0000000000
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "GpuFsaElementwiseBinarySub.hpp"
-
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-
-#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h>
-#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
-
-using namespace arm_compute::experimental::dynamic_fusion;
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
- const TensorInfo& input1)
-{
- // Create a new workload sketch, for validation purposes
- auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
- auto workloadContext = GpuWorkloadContext(&compileCtx);
- GpuWorkloadSketch sketch{ &workloadContext };
-
- arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
- arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
-
- aclInput0Info.set_are_values_constant(input0.IsConstant());
- aclInput1Info.set_are_values_constant(input1.IsConstant());
-
- arm_compute::ITensorInfo* inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
- arm_compute::ITensorInfo* inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
-
- return GpuSub::validate_op(sketch, inputInfo0, inputInfo1);
-}
-
-void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob,
- const TensorInfo& input0,
- const TensorInfo& input1)
-{
- GpuWorkloadSketch* sketch = blob->sketch.get();
- GpuWorkloadContext* workloadContext = blob->workloadContext.get();
- std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
- std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
-
- arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
- arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
-
- aclInput0Info.set_are_values_constant(input0.IsConstant());
- aclInput1Info.set_are_values_constant(input1.IsConstant());
-
- inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
- inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
-
- // Validate operator, check status and update reasonIfUnsupported
- arm_compute::Status aclStatus = GpuSub::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
- const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
- if (!supported)
- {
- throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary subtract validation");
- }
-
- arm_compute::ITensorInfo* addOutputInfo =
- GpuSub::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
-
- // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
- outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
- GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
-
- // Store the TensorInfos within the blob as unique_ptrs to be used later
- blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
- blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
-}
-
-} \ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
deleted file mode 100644
index 4d58f313b6..0000000000
--- a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
+++ /dev/null
@@ -1,20 +0,0 @@
-//
-// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include <armnn/Descriptors.hpp>
-
-#include <gpuFsa/GpuFsaBackend.hpp>
-
-namespace armnn
-{
-arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
- const TensorInfo& input1);
-
-void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob,
- const TensorInfo& input0,
- const TensorInfo& input1);
-
-} // namespace armnn \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index d2412bfdbc..a2708c0e53 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -66,6 +66,17 @@ TEST_CASE("GpuFsaElementwiseBinaryAddTestFloat16")
ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Add);
}
+// ElementwiseBinary Mul
+TEST_CASE("GpuFsaElementwiseBinaryMulTestFloat32")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, BinaryOperation::Mul);
+}
+
+TEST_CASE("GpuFsaElementwiseBinaryMulTestFloat16")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Mul);
+}
+
// ElementwiseBinary Sub
TEST_CASE("GpuFsaElementwiseBinarySubTestFloat32")
{
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index dda4d1ffaf..34af1909e6 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -79,34 +79,25 @@ TEST_CASE("IsLayerSupportedGpuFsaConv2dUnsupported")
REQUIRE(reasonIfNotSupported.find("NCHW not supported by this kernel") != std::string::npos);
}
-TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinaryAdd")
+TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinary")
{
TensorInfo input0Info({ 2, 2 }, DataType::Float32);
TensorInfo input1Info({ 2, 2 }, DataType::Float32);
TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
ElementwiseBinaryDescriptor desc;
- desc.m_Operation = BinaryOperation::Add;
-
- GpuFsaLayerSupport supportChecker;
- std::string reasonIfNotSupported;
- auto supported = supportChecker.IsLayerSupported(LayerType::ElementwiseBinary,
- {input0Info, input1Info, outputInfo},
- desc,
- EmptyOptional(),
- EmptyOptional(),
- reasonIfNotSupported);
- CHECK(supported);
-}
-
-TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinarySub")
-{
- TensorInfo input0Info({ 2, 2 }, DataType::Float32);
- TensorInfo input1Info({ 2, 2 }, DataType::Float32);
- TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
-
- ElementwiseBinaryDescriptor desc;
- desc.m_Operation = BinaryOperation::Sub;
+ SUBCASE("Add")
+ {
+ desc.m_Operation = BinaryOperation::Add;
+ }
+ SUBCASE("Mul")
+ {
+ desc.m_Operation = BinaryOperation::Mul;
+ }
+ SUBCASE("Sub")
+ {
+ desc.m_Operation = BinaryOperation::Sub;
+ }
GpuFsaLayerSupport supportChecker;
std::string reasonIfNotSupported;
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index aad3a0ff6f..6ddb942dea 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -173,7 +173,7 @@ TEST_CASE("TwoConv2dSupportedOptimizedNetwork")
&IsLayerOfType<OutputLayer>));
}
-TEST_CASE("ElementwiseBinaryAddSupportedOptimizedNetwork")
+TEST_CASE("ElementwiseBinarySupportedOptimizedNetwork")
{
using namespace armnn;
@@ -196,55 +196,18 @@ TEST_CASE("ElementwiseBinaryAddSupportedOptimizedNetwork")
IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
ElementwiseBinaryDescriptor desc;
- desc.m_Operation = BinaryOperation::Add;
-
- IConnectableLayer* elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(desc, "elementwiseBinary");
- IConnectableLayer* output = network->AddOutputLayer(2, "output");
-
- Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
- Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
- Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
-
- std::vector<BackendId> backends = { "GpuFsa" };
-
- OptimizerOptionsOpaque optimizedOptions;
- IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
- CHECK(optNet);
-
- Graph& graph = GetGraphForTesting(optNet.get());
-
- // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
- CHECK(CheckSequence(graph.cbegin(), graph.cend(),
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<InputLayer>,
- &IsLayerOfType<PreCompiledLayer>,
- &IsLayerOfType<OutputLayer>));
-}
-
-TEST_CASE("ElementwiseBinarySubSupportedOptimizedNetwork")
-{
- using namespace armnn;
-
- const float qScale = 1.0f;
- const int32_t qOffset = 0;
-
- const TensorShape& input1Shape = { 2, 2, 2 };
- const TensorShape& input2Shape = { 2, 2, 2 };
- const TensorShape& outputShape = { 2, 2, 2 };
-
- TensorInfo input1TensorInfo(input1Shape, DataType::Float32, qScale, qOffset, true);
- TensorInfo input2TensorInfo(input2Shape, DataType::Float32, qScale, qOffset, true);
- TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
-
- IRuntime::CreationOptions options;
- IRuntimePtr runtime(IRuntime::Create(options));
- INetworkPtr network(INetwork::Create());
-
- IConnectableLayer* input1 = network->AddInputLayer(0, "input0");
- IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
-
- ElementwiseBinaryDescriptor desc;
- desc.m_Operation = BinaryOperation::Sub;
+ SUBCASE("Add")
+ {
+ desc.m_Operation = BinaryOperation::Add;
+ }
+ SUBCASE("Mul")
+ {
+ desc.m_Operation = BinaryOperation::Mul;
+ }
+ SUBCASE("Sub")
+ {
+ desc.m_Operation = BinaryOperation::Sub;
+ }
IConnectableLayer* elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(desc, "elementwiseBinary");
IConnectableLayer* output = network->AddOutputLayer(2, "output");