ArmNN
 24.02
GpuFsaElementwiseBinary.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 
10 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
11 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
12 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
13 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h>
14 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuMul.h>
15 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSub.h>
16 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
17 
18 using namespace arm_compute::experimental::dynamic_fusion;
19 using namespace armnn::armcomputetensorutils;
20 
21 namespace armnn
22 {
23 
25  const TensorInfo& input1,
26  const ElementwiseBinaryDescriptor& descriptor)
27 {
28  // Create a new workload sketch, for validation purposes
29  auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
30  auto workloadContext = GpuWorkloadContext(&compileCtx);
31  GpuWorkloadSketch sketch{ &workloadContext };
32 
33  arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
34  arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
35 
36  aclInput0Info.set_are_values_constant(input0.IsConstant());
37  aclInput1Info.set_are_values_constant(input1.IsConstant());
38 
39  arm_compute::ITensorInfo* inputInfo0 = workloadContext.create_tensor_info(aclInput0Info);
40  arm_compute::ITensorInfo* inputInfo1 = workloadContext.create_tensor_info(aclInput1Info);
41 
42  switch (descriptor.m_Operation)
43  {
44  case BinaryOperation::Add:
45  {
46  return GpuAdd::validate_op(sketch, inputInfo0, inputInfo1);
47  }
48  case BinaryOperation::Mul:
49  {
50  return GpuMul::validate_op(sketch, inputInfo0, inputInfo1);
51  }
52  case BinaryOperation::Sub:
53  {
54  return GpuSub::validate_op(sketch, inputInfo0, inputInfo1);
55  }
56  default:
57  return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
58  std::string("Elementwise Binary operation not supported in GpuFsa: ")
60  }
61 }
62 
64  const TensorInfo& input0,
65  const TensorInfo& input1,
66  const ElementwiseBinaryDescriptor& descriptor)
67 {
68  GpuWorkloadSketch* sketch = blob->sketch.get();
69  GpuWorkloadContext* workloadContext = blob->workloadContext.get();
70  std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
71  std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
72 
73  arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0, input0.GetNumDimensions());
74  arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1, input1.GetNumDimensions());
75 
76  aclInput0Info.set_are_values_constant(input0.IsConstant());
77  aclInput1Info.set_are_values_constant(input1.IsConstant());
78 
79  inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
80  inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput1Info));
81 
82  // Validate operator, check status and update reasonIfUnsupported
83  // Validate operator, check status and update reasonIfUnsupported
84  arm_compute::Status aclStatus{};
85  switch (descriptor.m_Operation)
86  {
87  case BinaryOperation::Add:
88  {
89  aclStatus = GpuAdd::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
90  break;
91  }
92  case BinaryOperation::Mul:
93  {
94  aclStatus = GpuMul::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
95  break;
96  }
97  case BinaryOperation::Sub:
98  {
99  aclStatus = GpuSub::validate_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
100  break;
101  }
102  default:
103  throw InvalidArgumentException(std::string("Elementwise Binary operation not supported in GpuFsa: ")
105  }
106 
107  const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
108  if (!supported)
109  {
110  throw BackendCapabilityException("\"GpuFsa\" backend failed during elementwise binary add validation");
111  }
112 
113  arm_compute::ITensorInfo* elementwiseBinaryOutputInfo{};
114  switch (descriptor.m_Operation)
115  {
116  case BinaryOperation::Add:
117  {
118  elementwiseBinaryOutputInfo = GpuAdd::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
119  break;
120  }
121  case BinaryOperation::Mul:
122  {
123  elementwiseBinaryOutputInfo = GpuMul::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
124  break;
125  }
126  case BinaryOperation::Sub:
127  {
128  elementwiseBinaryOutputInfo = GpuSub::create_op(*sketch, inputTensorInfos[0], inputTensorInfos[1]);
129  break;
130  }
131  default:
132  throw InvalidArgumentException(std::string("Elementwise Binary operation not supported in GpuFsa: ")
134  }
135 
136  // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
137  outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
138  GpuOutput::create_op(*sketch, elementwiseBinaryOutputInfo, outputTensorInfos[0]);
139 
140  // Store the TensorInfos within the blob as unique_ptrs to be used later
141  blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
142  blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
143 }
144 
145 } // namespace armnn
armnn::GetBinaryOperationAsCString
constexpr char const * GetBinaryOperationAsCString(BinaryOperation operation)
Definition: TypesUtils.hpp:76
armnn::GpuFsaElementwiseBinaryCreateOp
void GpuFsaElementwiseBinaryCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &input0, const TensorInfo &input1, const ElementwiseBinaryDescriptor &descriptor)
Definition: GpuFsaElementwiseBinary.cpp:63
armnn::TensorInfo
Definition: Tensor.hpp:152
GpuFsaElementwiseBinary.hpp
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:197
armnn::TensorInfo::IsConstant
bool IsConstant() const
Definition: Tensor.cpp:509
armnn::GpuFsaPreCompiledBlob::inputTensorInfos
std::unique_ptr< std::vector< arm_compute::ITensorInfo * > > inputTensorInfos
Definition: GpuFsaBackend.hpp:37
armnn::ElementwiseBinaryDescriptor
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
Definition: Descriptors.hpp:109
armnn::GpuFsaPreCompiledBlob::sketch
std::unique_ptr< arm_compute::experimental::dynamic_fusion::GpuWorkloadSketch > sketch
Definition: GpuFsaBackend.hpp:34
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::Status
Status
Definition: Types.hpp:42
armnn::BackendCapabilityException
Definition: Exceptions.hpp:152
armnn::ElementwiseBinaryDescriptor::m_Operation
BinaryOperation m_Operation
Specifies the elementwiseBinary operation to execute.
Definition: Descriptors.hpp:125
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::GpuFsaPreCompiledBlob::workloadContext
std::shared_ptr< arm_compute::experimental::dynamic_fusion::GpuWorkloadContext > workloadContext
Definition: GpuFsaBackend.hpp:35
armnn::GpuFsaElementwiseBinaryValidate
arm_compute::Status GpuFsaElementwiseBinaryValidate(const TensorInfo &input0, const TensorInfo &input1, const ElementwiseBinaryDescriptor &descriptor)
Definition: GpuFsaElementwiseBinary.cpp:24
armnn::GpuFsaPreCompiledBlob
A structure which contains all the elements needed to execute a fused workload in the GpuFsa Backend.
Definition: GpuFsaBackend.hpp:32
armnn::GpuFsaPreCompiledBlob::outputTensorInfos
std::unique_ptr< std::vector< arm_compute::ITensorInfo * > > outputTensorInfos
Definition: GpuFsaBackend.hpp:38