ArmNN
 24.02
GpuFsaSoftmax.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "GpuFsaSoftmax.hpp"
7 
10 
11 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
12 #include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
13 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h>
14 #include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
15 
16 using namespace arm_compute::experimental::dynamic_fusion;
17 using namespace armnn::armcomputetensorutils;
18 
19 namespace armnn
20 {
21 
23  const TensorInfo& output,
24  const SoftmaxDescriptor& descriptor)
25 {
26  // Create a new workload sketch, for validation purposes
27  auto compileCtx = arm_compute::CLKernelLibrary::get().get_compile_context();
28  auto workloadContext = GpuWorkloadContext(&compileCtx);
29  GpuWorkloadSketch sketch{ &workloadContext };
30 
31  // Build and create tensor infos using the sketch
32  arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, input.GetNumDimensions());
33  arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, output.GetNumDimensions());
34  aclInputInfo.set_are_values_constant(input.IsConstant());
35  aclOutputInfo.set_are_values_constant(output.IsConstant());
36  arm_compute::ITensorInfo* inputInfo = workloadContext.create_tensor_info(aclInputInfo);
37  arm_compute::ITensorInfo* outputInfo = workloadContext.create_tensor_info(aclOutputInfo);
38 
39  // Set Softmax attributes using descriptor
40  SoftmaxAttributes softmaxAttributes{};
41  softmaxAttributes.beta(descriptor.m_Beta);
42  softmaxAttributes.is_log_softmax(false); // Use Softmax not LogSoftmax
43  int aclAxis = ComputeAclAxis(descriptor.m_Axis, input);
44  softmaxAttributes.axis(aclAxis);
45 
46  // Validate operator, check status and update reasonIfUnsupported
47  arm_compute::Status aclStatus = GpuSoftmax::validate_op(sketch,
48  inputInfo,
49  outputInfo,
50  softmaxAttributes);
51 
52 #ifndef NDEBUG
53  const bool validated = aclStatus.error_code() == arm_compute::ErrorCode::OK;
54  if (!validated)
55  {
56  std::cout << "GpuFsaSoftmaxValidate failed: " << aclStatus.error_description() << std::endl;
57  }
58 #endif
59 
60  return aclStatus;
61 }
62 
64  const TensorInfo& input,
65  const TensorInfo& output,
66  const SoftmaxDescriptor& descriptor)
67 {
68  GpuWorkloadSketch* sketch = blob->sketch.get();
69  GpuWorkloadContext* workloadContext = blob->workloadContext.get();
70  std::vector<arm_compute::ITensorInfo*> inputTensorInfos = {};
71  std::vector<arm_compute::ITensorInfo*> outputTensorInfos = {};
72 
73  arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, input.GetNumDimensions());
74  arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, output.GetNumDimensions());
75  aclInputInfo.set_are_values_constant(input.IsConstant());
76  aclOutputInfo.set_are_values_constant(output.IsConstant());
77 
78  inputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclInputInfo));
79  outputTensorInfos.emplace_back(workloadContext->create_tensor_info(aclOutputInfo));
80 
81  // Set Softmax attributes using descriptor
82  SoftmaxAttributes softmaxAttributes{};
83  softmaxAttributes.beta(descriptor.m_Beta); // Only used for LogSoftmax else default
84  softmaxAttributes.is_log_softmax(false); // Use Softmax not LogSoftmax
85  int aclAxis = ComputeAclAxis(descriptor.m_Axis, input);
86  softmaxAttributes.axis(aclAxis);
87 
88  // Validate operator, check status and update reasonIfUnsupported
89  arm_compute::Status aclStatus = GpuSoftmax::validate_op(*sketch,
90  inputTensorInfos[0],
91  outputTensorInfos[0],
92  softmaxAttributes);
93  const bool supported = aclStatus.error_code() == arm_compute::ErrorCode::OK;
94  if (!supported)
95  {
96  throw BackendCapabilityException("\"GpuFsa\" backend failed during softmax validation");
97  }
98 
99  GpuSoftmax::create_op(*sketch, inputTensorInfos[0], outputTensorInfos[0], softmaxAttributes);
100 
101  // Store the TensorInfos within the blob as unique_ptrs to be used later
102  blob->inputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(inputTensorInfos);
103  blob->outputTensorInfos = std::make_unique<std::vector<arm_compute::ITensorInfo*>>(outputTensorInfos);
104 }
105 
106 }
armnn::GpuFsaSoftmaxCreateOp
void GpuFsaSoftmaxCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: GpuFsaSoftmax.cpp:63
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:197
armnn::TensorInfo::IsConstant
bool IsConstant() const
Definition: Tensor.cpp:509
armnn::GpuFsaPreCompiledBlob::inputTensorInfos
std::unique_ptr< std::vector< arm_compute::ITensorInfo * > > inputTensorInfos
Definition: GpuFsaBackend.hpp:37
GpuFsaSoftmax.hpp
armnn::GpuFsaPreCompiledBlob::sketch
std::unique_ptr< arm_compute::experimental::dynamic_fusion::GpuWorkloadSketch > sketch
Definition: GpuFsaBackend.hpp:34
ArmComputeUtils.hpp
armnn::Status
Status
Definition: Types.hpp:42
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:192
armnn::BackendCapabilityException
Definition: Exceptions.hpp:152
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::GpuFsaPreCompiledBlob::workloadContext
std::shared_ptr< arm_compute::experimental::dynamic_fusion::GpuWorkloadContext > workloadContext
Definition: GpuFsaBackend.hpp:35
armnn::GpuFsaPreCompiledBlob
A structure which contains all the elements needed to execute a fused workload in the GpuFsa Backend.
Definition: GpuFsaBackend.hpp:32
armnn::GpuFsaSoftmaxValidate
arm_compute::Status GpuFsaSoftmaxValidate(const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor)
Definition: GpuFsaSoftmax.cpp:22
armnn::GpuFsaPreCompiledBlob::outputTensorInfos
std::unique_ptr< std::vector< arm_compute::ITensorInfo * > > outputTensorInfos
Definition: GpuFsaBackend.hpp:38
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::ComputeAclAxis
int ComputeAclAxis(const int &armnnAxis, const armnn::TensorInfo &tensor)
Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank,...
Definition: ArmComputeUtils.hpp:273