From 829e13edbe9487b5a0600688cec6312b867e2f07 Mon Sep 17 00:00:00 2001 From: John Mcloughlin Date: Wed, 31 Jan 2024 11:00:27 +0000 Subject: IVGCVSW-7568 Implement Sub ElementwiseBinary operator GpuFsa * Added support for Gpu Sub operator * Added unit tests Signed-off-by: John Mcloughlin Change-Id: I1efaa485772a3716e3781566843bd50bd9bab811 --- src/backends/gpuFsa/layers/CMakeLists.txt | 2 + .../gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp | 82 ++++++++++++++++++++++ .../gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp | 23 ++++++ 3 files changed, 107 insertions(+) create mode 100644 src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp create mode 100644 src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp (limited to 'src/backends/gpuFsa/layers') diff --git a/src/backends/gpuFsa/layers/CMakeLists.txt b/src/backends/gpuFsa/layers/CMakeLists.txt index bba795eedb..182a32c121 100644 --- a/src/backends/gpuFsa/layers/CMakeLists.txt +++ b/src/backends/gpuFsa/layers/CMakeLists.txt @@ -10,6 +10,8 @@ list(APPEND armnnGpuFsaBackendLayerValidators_sources GpuFsaDepthwiseConvolution2d.hpp GpuFsaElementwiseBinaryAdd.cpp GpuFsaElementwiseBinaryAdd.hpp + GpuFsaElementwiseBinarySub.cpp + GpuFsaElementwiseBinarySub.hpp ) add_library(armnnGpuFsaBackendLayerValidators OBJECT ${armnnGpuFsaBackendLayerValidators_sources}) diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp new file mode 100644 index 0000000000..4e7eb77190 --- /dev/null +++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.cpp @@ -0,0 +1,82 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "GpuFsaElementwiseBinarySub.hpp" + +#include + +#include +#include +#include +#include + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace armnn +{ + +arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0, + const TensorInfo& input1) +{ + using namespace armcomputetensorutils; + + // Create a new workload sketch, for validation purposes + auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context(); + auto workloadContext = GpuWorkloadContext(&compileCtx); + GpuWorkloadSketch sketch{ &workloadContext }; + + arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions()); + arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions()); + + aclInput0Info.set_are_values_constant(input0.IsConstant()); + aclInput1Info.set_are_values_constant(input1.IsConstant()); + + arm_compute::ITensorInfo* inputInfo0 = workloadContext.create_tensor_info(aclInput0Info); + arm_compute::ITensorInfo* inputInfo1 = workloadContext.create_tensor_info(aclInput1Info); + + return GpuSub::validate_op(sketch, inputInfo0, inputInfo1); +} + +void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob, + const TensorInfo& input0, + const TensorInfo& input1) +{ + using namespace armcomputetensorutils; + + GpuWorkloadSketch* sketch = blob->sketch.get(); + GpuWorkloadContext* workloadContext = blob->workloadContext.get(); + std::vector inputTensorInfos = {}; + std::vector outputTensorInfos = {}; + + arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions()); + arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions()); + + aclInput0Info.set_are_values_constant(input0.IsConstant()); + aclInput1Info.set_are_values_constant(input1.IsConstant()); + + inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info)); + inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info)); + + // Validate operator, check status and update reasonIfUnsupported + arm_compute::Status aclStatus = GpuSub::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]); + const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK; + if (!supported) + { + throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary subtract validation"); + } + + arm_compute::ITensorInfo* addOutputInfo = + GpuSub::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]); + + // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created. + outputTensorInfos.emplace_back(workloadContext->create_tensor_info()); + GpuOutput::create_op(*sketch, addOutputInfo, outputTensorInfos[0]); + + // Store the TensorInfos within the blob as unique_ptrs to be used later + blob->inputTensorInfos = std::make_unique>(inputTensorInfos); + blob->outputTensorInfos = std::make_unique>(outputTensorInfos); +} + +} \ No newline at end of file diff --git a/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp new file mode 100644 index 0000000000..59d8189f1f --- /dev/null +++ b/src/backends/gpuFsa/layers/GpuFsaElementwiseBinarySub.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2024 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +namespace armnn +{ + + using namespace arm_compute::experimental::dynamic_fusion; + + arm_compute::Status GpuFsaElementwiseBinarySubValidate(const TensorInfo& input0, + const TensorInfo& input1); + + void GpuFsaElementwiseBinarySubCreateOp(GpuFsaPreCompiledBlob* blob, + const TensorInfo& input0, + const TensorInfo& input1); +} \ No newline at end of file -- cgit v1.2.1