aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Mcloughlin <john.mcloughlin@arm.com>2024-01-31 11:00:27 +0000
committerJohn Mcloughlin <john.mcloughlin@arm.com>2024-01-31 21:24:23 +0000
commit829e13edbe9487b5a0600688cec6312b867e2f07 (patch)
tree2b9176173e73afc1f1cdf62cec334c7ee64e7123
parente4134fc28ce2a9c751fb22ff84b733d067255849 (diff)
downloadarmnn-829e13edbe9487b5a0600688cec6312b867e2f07.tar.gz
IVGCVSW-7568 Implement Sub ElementwiseBinary operator GpuFsa
* Added support for Gpu Sub operator * Added unit tests Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com> Change-Id: I1efaa485772a3716e3781566843bd50bd9bab811
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp9
-rw-r--r--src/backends/gpuFsa/GpuFsaBackend.cpp8
-rw-r--r--src/backends/gpuFsa/GpuFsaLayerSupport.cpp8
-rw-r--r--src/backends/gpuFsa/layers/CMakeLists.txt2
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp82
-rw-r--r--src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp23
-rw-r--r--src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp11
-rw-r--r--src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp20
-rw-r--r--src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp48
-rw-r--r--src/backends/neon/test/NeonEndToEndTests.cpp9
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp8
11 files changed, 228 insertions, 0 deletions
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 8b2d6a514d..878054f7ba 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -90,6 +90,15 @@ TEST_CASE("ClSquaredDifferenceEndToEndTestUint8")
ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends, BinaryOperation::SqDiff);
}
+TEST_CASE("ClSubtractionEndToEndFloat32Simple3DTest")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(clDefaultBackends, BinaryOperation::Sub);
+}
+TEST_CASE("ClSubtractionEndToEndFloat16Simple3DTest")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(clDefaultBackends, BinaryOperation::Sub);
+}
+
// Batch Mat Mul
TEST_CASE("ClBatchMatMulEndToEndFloat32Test")
{
diff --git a/src/backends/gpuFsa/GpuFsaBackend.cpp b/src/backends/gpuFsa/GpuFsaBackend.cpp
index 7951b17d9d..8b62aec9e6 100644
--- a/src/backends/gpuFsa/GpuFsaBackend.cpp
+++ b/src/backends/gpuFsa/GpuFsaBackend.cpp
@@ -23,6 +23,7 @@
#include "layers/GpuFsaConvolution2d.hpp"
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
#include "layers/GpuFsaElementwiseBinaryAdd.hpp"
+#include "layers/GpuFsaElementwiseBinarySub.hpp"
namespace armnn
{
@@ -305,6 +306,13 @@ OptimizationViews GpuFsaBackend::OptimizeSubgraphView(const SubgraphView& subgra
GpuFsaElementwiseBinaryAddCreateOp(preCompiledBlobPtr, input0, input1);
}
+ else if (desc->m_Operation == BinaryOperation::Sub)
+ {
+ auto input0 = base.GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+ auto input1 = base.GetInputSlot(1).GetConnectedOutputSlot()->GetTensorInfo();
+
+ GpuFsaElementwiseBinarySubCreateOp(preCompiledBlobPtr, input0, input1);
+ }
break;
}
default:
diff --git a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
index e98275c166..2e5c7d5a53 100644
--- a/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
+++ b/src/backends/gpuFsa/GpuFsaLayerSupport.cpp
@@ -13,6 +13,7 @@
#include "layers/GpuFsaConvolution2d.hpp"
#include "layers/GpuFsaDepthwiseConvolution2d.hpp"
#include "layers/GpuFsaElementwiseBinaryAdd.hpp"
+#include "layers/GpuFsaElementwiseBinarySub.hpp"
#endif
#include <vector>
@@ -144,6 +145,13 @@ bool GpuFsaLayerSupport::IsLayerSupported(const LayerType& type,
infos[0],
infos[1]);
}
+ else if (desc->m_Operation == BinaryOperation::Sub)
+ {
+ FORWARD_LAYER_VALIDATE_FUNC(GpuFsaElementwiseBinarySubValidate,
+ reasonIfUnsupported,
+ infos[0],
+ infos[1]);
+ }
else
{
throw InvalidArgumentException("Invalid ElementwiseBinary BinaryOperation operation.");
diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt
index bba795eedb..182a32c121 100644
--- a/src/backends/gpuFsa/layers/CMakeLists.txt
+++ b/src/backends/gpuFsa/layers/CMakeLists.txt
@@ -10,6 +10,8 @@ list(APPEND armnnGpuFsaBackendLayerValidators_sources
GpuFsaDepthwiseConvolution2d.hpp
GpuFsaElementwiseBinaryAdd.cpp
GpuFsaElementwiseBinaryAdd.hpp
+ GpuFsaElementwiseBinarySub.cpp
+ GpuFsaElementwiseBinarySub.hpp
)
add_library(armnnGpuFsaBackendLayerValidators OBJECT ${armnnGpuFsaBackendLayerValidators_sources})
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
new file mode 100644
index 0000000000..4e7eb77190
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp
@@ -0,0 +1,82 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GpuFsaElementwiseBinarySub.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
+#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
+
+using namespace arm_compute::experimental::dynamic_fusion;
+
+namespace armnn
+{
+
+arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
+ const TensorInfo& input1)
+{
+ using namespace armcomputetensorutils;
+
+ // Create a new workload sketch, for validation purposes
+ auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
+ auto workloadContext = GpuWorkloadContext(&compileCtx);
+ GpuWorkloadSketch sketch{ &workloadContext };
+
+ arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+ arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+ aclInput0Info.set_are_values_constant(input0.IsConstant());
+ aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+ arm_compute::ITensorInfo* inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
+ arm_compute::ITensorInfo* inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
+
+ return GpuSub::validate_op(sketch, inputInfo0, inputInfo1);
+}
+
+void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input0,
+ const TensorInfo& input1)
+{
+ using namespace armcomputetensorutils;
+
+ GpuWorkloadSketch* sketch = blob->sketch.get();
+ GpuWorkloadContext* workloadContext = blob->workloadContext.get();
+ std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
+ std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
+
+ arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
+ arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
+
+ aclInput0Info.set_are_values_constant(input0.IsConstant());
+ aclInput1Info.set_are_values_constant(input1.IsConstant());
+
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
+ inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
+
+ // Validate operator, check status and update reasonIfUnsupported
+ arm_compute::Status aclStatus = GpuSub::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+ const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
+ if (!supported)
+ {
+ throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary subtract validation");
+ }
+
+ arm_compute::ITensorInfo* addOutputInfo =
+ GpuSub::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
+
+ // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
+ outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
+ GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]);
+
+ // Store the TensorInfos within the blob as unique_ptrs to be used later
+ blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
+ blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
+}
+
+} \ No newline at end of file
diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
new file mode 100644
index 0000000000..59d8189f1f
--- /dev/null
+++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Descriptors.hpp>
+
+#include <gpuFsa/GpuFsaBackend.hpp>
+
+namespace armnn
+{
+
+ using namespace arm_compute::experimental::dynamic_fusion;
+
+ arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0,
+ const TensorInfo& input1);
+
+ void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob,
+ const TensorInfo& input0,
+ const TensorInfo& input1);
+} \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index 9972b24637..26c7cb8d63 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -45,4 +45,15 @@ TEST_CASE("GpuFsaElementwiseBinaryAddTestFloat16")
ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Add);
}
+// ElementwiseBinary Sub
+TEST_CASE("GpuFsaElementwiseBinarySubTestFloat32")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends, BinaryOperation::Sub);
+}
+
+TEST_CASE("GpuFsaElementwiseBinarySubTestFloat16")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(gpuFsaDefaultBackends, BinaryOperation::Sub);
+}
+
}
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index 49ddadea59..9d4b3b9367 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -81,4 +81,24 @@ TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinaryAdd")
CHECK(supported);
}
+TEST_CASE("IsLayerSupportedGpuFsaElementWiseBinarySub")
+{
+ TensorInfo input0Info({ 2, 2 }, DataType::Float32);
+ TensorInfo input1Info({ 2, 2 }, DataType::Float32);
+ TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
+
+ ElementwiseBinaryDescriptor desc;
+ desc.m_Operation = BinaryOperation::Sub;
+
+ GpuFsaLayerSupport supportChecker;
+ std::string reasonIfNotSupported;
+ auto supported = supportChecker.IsLayerSupported(LayerType::ElementwiseBinary,
+ {input0Info, input1Info, outputInfo},
+ desc,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfNotSupported);
+ CHECK(supported);
+}
+
} \ No newline at end of file
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index 673a52a5a0..4b27f3bff0 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -182,4 +182,52 @@ TEST_CASE("ElementwiseBinaryAddSupportedOptimizedNetwork")
&IsLayerOfType<OutputLayer>));
}
+TEST_CASE("ElementwiseBinarySubSupportedOptimizedNetwork")
+{
+ using namespace armnn;
+
+ const float qScale = 1.0f;
+ const int32_t qOffset = 0;
+
+ const TensorShape& input1Shape = { 2, 2, 2 };
+ const TensorShape& input2Shape = { 2, 2, 2 };
+ const TensorShape& outputShape = { 2, 2, 2 };
+
+ TensorInfo input1TensorInfo(input1Shape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo input2TensorInfo(input2Shape, DataType::Float32, qScale, qOffset, true);
+ TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
+
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+ INetworkPtr network(INetwork::Create());
+
+ IConnectableLayer* input1 = network->AddInputLayer(0, "input0");
+ IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
+
+ ElementwiseBinaryDescriptor desc;
+ desc.m_Operation = BinaryOperation::Sub;
+
+ IConnectableLayer* elementwiseBinaryLayer = network->AddElementwiseBinaryLayer(desc, "elementwiseBinary");
+ IConnectableLayer* output = network->AddOutputLayer(2, "output");
+
+ Connect(input1, elementwiseBinaryLayer, input1TensorInfo, 0, 0);
+ Connect(input2, elementwiseBinaryLayer, input2TensorInfo, 0, 1);
+ Connect(elementwiseBinaryLayer, output, outputTensorInfo, 0, 0);
+
+ std::vector<BackendId> backends = { "GpuFsa" };
+
+ OptimizerOptionsOpaque optimizedOptions;
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+ CHECK(optNet);
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<PreCompiledLayer>,
+ &IsLayerOfType<OutputLayer>));
+}
+
} \ No newline at end of file
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index 58c55e5d72..37f6d3845b 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -177,6 +177,15 @@ TEST_CASE("NeonSquaredDifferenceEndToEndTestUint8")
ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, BinaryOperation::SqDiff);
}
+TEST_CASE("NeonSubtractionEndToEndFloat32Simple3DTest")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, BinaryOperation::Sub);
+}
+TEST_CASE("NeonSubtractionEndToEndFloat16Simple3DTest")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, BinaryOperation::Sub);
+}
+
// Batch Mat Mul
TEST_CASE("NeonBatchMatMulEndToEndFloat32Test")
{
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 185deb685d..c09304e4d1 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -1819,6 +1819,14 @@ TEST_CASE("RefSubEndToEndTestUint8")
{
ElementwiseBinarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, BinaryOperation::Sub);
}
+TEST_CASE("RefSubEndToEndTestFloat32Simple3D")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Sub);
+}
+TEST_CASE("RefSubEndToEndTestFloat16Simple3D")
+{
+ ElementwiseBinarySimple3DEndToEnd<armnn::DataType::Float16>(defaultBackends, BinaryOperation::Sub);
+}
TEST_CASE("RefMaximumEndToEndTestFloat32")
{
ElementwiseBinarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, BinaryOperation::Maximum);