ArmNN
 23.08
ClBatchMatMulWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "ClWorkloadUtils.hpp"
9 
12 
14 
16 
17 #include <cl/ClTensorHandle.hpp>
18 
19 #include <arm_compute/function_info/MatMulInfo.h>
20 
21 namespace armnn
22 {
23 
25  const TensorInfo& inputInfoY,
26  const TensorInfo& outputInfo,
27  const BatchMatMulDescriptor& descriptor,
28  const ActivationDescriptor* activationDescriptor)
29 {
30  if (descriptor.m_AdjointX || descriptor.m_AdjointY )
31  {
32  throw Exception("Support for adjoint not implemented.");
33  }
35  {
36  throw Exception("Only supported the MatMul in the last 2 dimensions");
37  }
38 
39  arm_compute::TensorInfo aclInputInfoX = armcomputetensorutils::BuildArmComputeTensorInfo(inputInfoX);
40  arm_compute::TensorInfo aclInputInfoY = armcomputetensorutils::BuildArmComputeTensorInfo(inputInfoY);
41  const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(outputInfo);
42 
43  // GeMM dispatches kernel handles dynamic inputs differently to static so this flag needs to be set
44  aclInputInfoX.set_are_values_constant(false);
45  aclInputInfoY.set_are_values_constant(false);
46 
47  const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
48  activationDescriptor);
49 
50  arm_compute::MatMulInfo matMulInfo;
51  matMulInfo.adj_lhs(descriptor.m_TransposeX);
52  matMulInfo.adj_rhs(descriptor.m_TransposeY);
53 
54  return arm_compute::CLMatMul::validate(&aclInputInfoX, &aclInputInfoY, &aclOutputInfo, matMulInfo, activationInfo);
55 }
56 
58  const WorkloadInfo& info,
59  const arm_compute::CLCompileContext& clCompileContext)
61 {
62  // Report Profiling Details
63  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClBatchMatMulWorkload_Construct",
64  descriptor.m_Parameters,
65  info,
66  this->GetGuid());
67 
68  if (descriptor.m_Parameters.m_AdjointX || descriptor.m_Parameters.m_AdjointY )
69  {
70  throw Exception("Support for adjoint not implemented.");
71  }
74  {
75  throw Exception("Only supported the MatMul in the last 2 dimensions");
76  }
77 
78  m_Data.ValidateInputsOutputs("ClBatchMatMulWorkload", 2, 1);
79 
80  arm_compute::ICLTensor& inputX = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
81  arm_compute::ICLTensor& inputY = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
82  auto outputHandle = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0]);
83  arm_compute::ICLTensor& output = outputHandle->GetTensor();
84 
85  // GeMM dispatches kernel handles dynamic inputs differently to static so this flag needs to be set
86  inputX.info()->set_are_values_constant(false);
87  inputY.info()->set_are_values_constant(false);
88 
89  const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
90 
91  arm_compute::MatMulInfo matMulInfo;
92  matMulInfo.adj_lhs(descriptor.m_Parameters.m_TransposeX);
93  matMulInfo.adj_rhs(descriptor.m_Parameters.m_TransposeY);
94 
95  arm_compute::GpuMatMulSettings settings;
96 
97  m_MatMulLayer.configure(clCompileContext, &inputX, &inputY, &output, matMulInfo, settings, activationInfo);
98 
99  // Report Profiling Details
100  WorkloadInfo detailsInfo;
101  detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
102  detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
103  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClBatchMatMulWorkload_Construct",
104  descriptor.m_Parameters,
105  detailsInfo,
106  GetGuid());
107 }
108 
110 {
111  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchMatMulWorkload_Execute");
112  RunClFunction(m_MatMulLayer, CHECK_LOCATION());
113 }
114 } //namespace armnn
armnn::RunClFunction
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:168
armnn::BatchMatMulQueueDescriptor
Definition: WorkloadData.hpp:748
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1591
armnn::BatchMatMulDescriptor::m_AdjointX
bool m_AdjointX
Adjoint the slices of each input tensor Transpose and Adjoint can not both be set to true for the sam...
Definition: Descriptors.hpp:1596
WorkloadUtils.hpp
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:446
armnn::ConvertAdditionalInfoToAclActivationLayerInfo
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
Definition: ArmComputeUtils.hpp:103
armnn::BatchMatMulDescriptor::m_DataLayoutX
DataLayout m_DataLayoutX
Data layout of each input tensor, such as NHWC/NDHWC (leave as default for arbitrary layout)
Definition: Descriptors.hpp:1600
armnn::TensorInfo
Definition: Tensor.hpp:152
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::ClBaseWorkload
Definition: ClBaseWorkload.hpp:13
armnn::BatchMatMulDescriptor::m_AdjointY
bool m_AdjointY
Definition: Descriptors.hpp:1597
armnn::ClBatchMatMulWorkload::Execute
virtual void Execute() const override
Definition: ClBatchMatMulWorkload.cpp:109
armnn::WorkloadInfo::m_OutputTensorInfos
std::vector< TensorInfo > m_OutputTensorInfos
Definition: WorkloadInfo.hpp:19
armnn::QueueDescriptorWithParameters::m_Parameters
LayerDescriptor m_Parameters
Definition: WorkloadData.hpp:66
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1592
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
PolymorphicDowncast.hpp
ClWorkloadUtils.hpp
armnn::BatchMatMulDescriptor::m_DataLayoutY
DataLayout m_DataLayoutY
Definition: Descriptors.hpp:1601
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1563
ArmComputeUtils.hpp
armnn::ClBatchMatMulValidate
arm_compute::Status ClBatchMatMulValidate(const TensorInfo &inputInfoX, const TensorInfo &inputInfoY, const TensorInfo &outputInfo, const BatchMatMulDescriptor &descriptor, const ActivationDescriptor *activationDescriptor)
Definition: ClBatchMatMulWorkload.cpp:24
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::ConvertActivationDescriptorToAclActivationLayerInfo
arm_compute::ActivationLayerInfo ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor &actDesc)
Definition: ArmComputeUtils.hpp:85
armnn::BoostLogSeverityMapping::info
@ info
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
armnn::ClBatchMatMulWorkload::ClBatchMatMulWorkload
ClBatchMatMulWorkload(const BatchMatMulQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
Definition: ClBatchMatMulWorkload.cpp:57
ARMNN_REPORT_PROFILING_WORKLOAD_DESC
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
armnn::BaseWorkload< BatchMatMulQueueDescriptor >::GetGuid
arm::pipe::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:67
armnn::Status
Status
Definition: Types.hpp:42
ClTensorHandle.hpp
armnn::BaseWorkload< BatchMatMulQueueDescriptor >::m_Data
BatchMatMulQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::WorkloadInfo::m_InputTensorInfos
std::vector< TensorInfo > m_InputTensorInfos
Definition: WorkloadInfo.hpp:18
ClBatchMatMulWorkload.hpp
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::DataLayout::NCHW
@ NCHW