ArmNN
 24.02
GpuFsaActivation.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "GpuFsaActivation.hpp"
7 
9 
10 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
11 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
12 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuTanh.h>
13 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSigmoid.h>
14 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
15 
16 using namespace arm_compute::experimental::dynamic_fusion;
17 using namespace armnn::armcomputetensorutils;
18 
19 namespace armnn
20 {
21 
23  const ActivationDescriptor& descriptor)
24 {
25  // Create a new workload sketch, for validation purposes
26  auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
27  auto workloadContext = GpuWorkloadContext(&compileCtx);
28  GpuWorkloadSketch sketch{ &workloadContext };
29 
30  arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, input.GetNumDimensions());
31  aclInputInfo.set_are_values_constant(input.IsConstant());
32 
33  arm_compute::ITensorInfo* inputInfo = workloadContext.create_tensor_info(aclInputInfo);
34 
35  switch (descriptor.m_Function)
36  {
37  case ActivationFunction::TanH:
38  {
39  if ( descriptor.m_A != 1 || descriptor.m_B != 1)
40  {
41  return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
42  "Activation function TanH only works with a=1 and b=1");
43  }
44  return GpuTanh::validate_op(sketch, inputInfo);
45  }
46  case ActivationFunction::Sigmoid:
47  {
48  return GpuSigmoid::validate_op(sketch, inputInfo);
49  }
50  default:
51  return arm_compute::Status(arm_compute::ErrorCode::RUNTIME_ERROR,
52  std::string("Activation function currently not supported in GpuFsa: ")
54  }
55 
56 }
57 
59  const TensorInfo& input,
60  const ActivationDescriptor& descriptor)
61 {
62  GpuWorkloadSketch* sketch = blob->sketch.get();
63  GpuWorkloadContext* workloadContext = blob->workloadContext.get();
64  std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
65  std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
66 
67  arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input, input.GetNumDimensions());
68 
69  aclInput0Info.set_are_values_constant(input.IsConstant());
70 
71  inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInput0Info));
72 
73  // Validate operator, check status and update reasonIfUnsupported
74  arm_compute::Status aclStatus{};
75  switch (descriptor.m_Function)
76  {
77  case ActivationFunction::TanH:
78  {
79  aclStatus = GpuTanh::validate_op(*sketch, inputTensorInfos[0]);
80  break;
81  }
82  case ActivationFunction::Sigmoid:
83  {
84  aclStatus = GpuSigmoid::validate_op(*sketch, inputTensorInfos[0]);
85  break;
86  }
87  default:
88  throw InvalidArgumentException(std::string("Activation function currently not supported in GpuFsa: ")
90 
91  }
92  const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
93  if (!supported)
94  {
95  throw BackendCapabilityException("\"GpuFsa\" backend failed during Activation layer validation");
96  }
97 
98  arm_compute::ITensorInfo* activationOutputInfo{};
99  switch (descriptor.m_Function)
100  {
101  case ActivationFunction::TanH:
102  {
103  activationOutputInfo = GpuTanh::create_op(*sketch, inputTensorInfos[0]);
104  break;
105  }
106  case ActivationFunction::Sigmoid:
107  {
108  activationOutputInfo = GpuSigmoid::create_op(*sketch, inputTensorInfos[0]);
109  break;
110  }
111  default:
112  throw InvalidArgumentException(std::string("Activation function currently not supported in GpuFsa: ")
114 
115  }
116 
117  // Temporary fix until fusing attempt is make for GpuFsa backend and Output layer workload is created.
118  outputTensorInfos.emplace_back(workloadContext->create_tensor_info());
119  GpuOutput::create_op(*sketch, activationOutputInfo, outputTensorInfos[0]);
120 
121  // Store the TensorInfos within the blob as unique_ptrs to be used later
122  blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
123  blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
124 }
125 
126 } // namespace armnn
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:197
armnn::GpuFsaActivationCreateOp
void GpuFsaActivationCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &input, const ActivationDescriptor &descriptor)
Definition: GpuFsaActivation.cpp:58
armnn::GetActivationFunctionAsCString
constexpr char const * GetActivationFunctionAsCString(ActivationFunction activation)
Definition: TypesUtils.hpp:31
armnn::TensorInfo::IsConstant
bool IsConstant() const
Definition: Tensor.cpp:509
armnn::GpuFsaPreCompiledBlob::inputTensorInfos
std::unique_ptr< std::vector< arm_compute::ITensorInfo * > > inputTensorInfos
Definition: GpuFsaBackend.hpp:37
armnn::GpuFsaPreCompiledBlob::sketch
std::unique_ptr< arm_compute::experimental::dynamic_fusion::GpuWorkloadSketch > sketch
Definition: GpuFsaBackend.hpp:34
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::GpuFsaActivationValidate
arm_compute::Status GpuFsaActivationValidate(const TensorInfo &input, const ActivationDescriptor &descriptor)
Definition: GpuFsaActivation.cpp:22
GpuFsaActivation.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::BackendCapabilityException
Definition: Exceptions.hpp:152
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
ArmComputeTensorUtils.hpp
armnn::GpuFsaPreCompiledBlob::workloadContext
std::shared_ptr< arm_compute::experimental::dynamic_fusion::GpuWorkloadContext > workloadContext
Definition: GpuFsaBackend.hpp:35
armnn::GpuFsaPreCompiledBlob
A structure which contains all the elements needed to execute a fused workload in the GpuFsa Backend.
Definition: GpuFsaBackend.hpp:32
armnn::GpuFsaPreCompiledBlob::outputTensorInfos
std::unique_ptr< std::vector< arm_compute::ITensorInfo * > > outputTensorInfos
Definition: GpuFsaBackend.hpp:38