ArmNN
 24.02
ClBatchMatMulWorkload Class Reference

#include <ClBatchMatMulWorkload.hpp>

Inheritance diagram for ClBatchMatMulWorkload:
[legend]
Collaboration diagram for ClBatchMatMulWorkload:
[legend]

Public Member Functions

 ClBatchMatMulWorkload (const BatchMatMulQueueDescriptor &descriptor, const WorkloadInfo &info, const arm_compute::CLCompileContext &clCompileContext)
 
virtual void Execute () const override
 
- Public Member Functions inherited from ClBaseWorkload< BatchMatMulQueueDescriptor >
 ClBaseWorkload (const BatchMatMulQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
- Public Member Functions inherited from BaseWorkload< BatchMatMulQueueDescriptor >
 BaseWorkload (const BatchMatMulQueueDescriptor &descriptor, const WorkloadInfo &info)
 
virtual const std::string & GetName () const override
 
void ExecuteAsync (ExecutionData &executionData) override
 
void PostAllocationConfigure () override
 
const BatchMatMulQueueDescriptorGetData () const
 
arm::pipe::ProfilingGuid GetGuid () const final
 
virtual bool SupportsTensorHandleReplacement () const override
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual arm::pipe::ProfilingGuid GetGuid () const =0
 
virtual bool SupportsTensorHandleReplacement () const =0
 
virtual const std::string & GetName () const =0
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()
 

Additional Inherited Members

- Protected Member Functions inherited from ClBaseWorkload< BatchMatMulQueueDescriptor >
virtual void Reconfigure ()
 
- Protected Attributes inherited from BaseWorkload< BatchMatMulQueueDescriptor >
BatchMatMulQueueDescriptor m_Data
 
const arm::pipe::ProfilingGuid m_Guid
 
const std::string m_Name
 

Detailed Description

Definition at line 20 of file ClBatchMatMulWorkload.hpp.

Constructor & Destructor Documentation

◆ ClBatchMatMulWorkload()

ClBatchMatMulWorkload ( const BatchMatMulQueueDescriptor descriptor,
const WorkloadInfo info,
const arm_compute::CLCompileContext &  clCompileContext 
)

Definition at line 57 of file ClBatchMatMulWorkload.cpp.

60  : ClBaseWorkload<BatchMatMulQueueDescriptor>(descriptor, info)
61 {
62  // Report Profiling Details
63  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClBatchMatMulWorkload_Construct",
64  descriptor.m_Parameters,
65  info,
66  this->GetGuid());
67 
68  if (descriptor.m_Parameters.m_AdjointX || descriptor.m_Parameters.m_AdjointY )
69  {
70  throw Exception("Support for adjoint not implemented.");
71  }
72  if (descriptor.m_Parameters.m_DataLayoutX != armnn::DataLayout::NCHW ||
73  descriptor.m_Parameters.m_DataLayoutY != armnn::DataLayout::NCHW )
74  {
75  throw Exception("Only supported the MatMul in the last 2 dimensions");
76  }
77 
78  m_Data.ValidateInputsOutputs("ClBatchMatMulWorkload", 2, 1);
79 
80  arm_compute::ICLTensor& inputX = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
81  arm_compute::ICLTensor& inputY = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
82  auto outputHandle = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0]);
83  arm_compute::ICLTensor& output = outputHandle->GetTensor();
84 
85  // GeMM dispatches kernel handles dynamic inputs differently to static so this flag needs to be set
86  inputX.info()->set_are_values_constant(false);
87  inputY.info()->set_are_values_constant(false);
88 
89  const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
90 
91  arm_compute::MatMulInfo matMulInfo;
92  matMulInfo.adj_lhs(descriptor.m_Parameters.m_TransposeX);
93  matMulInfo.adj_rhs(descriptor.m_Parameters.m_TransposeY);
94 
95  arm_compute::GpuMatMulSettings settings;
96 
97  m_MatMulLayer.configure(clCompileContext, &inputX, &inputY, &output, matMulInfo, settings, activationInfo);
98 
99  // Report Profiling Details
100  WorkloadInfo detailsInfo;
101  detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
102  detailsInfo.m_OutputTensorInfos = info.m_OutputTensorInfos;
103  ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClBatchMatMulWorkload_Construct",
104  descriptor.m_Parameters,
105  detailsInfo,
106  GetGuid());
107 }

References ARMNN_REPORT_PROFILING_WORKLOAD_DESC, armnn::ConvertAdditionalInfoToAclActivationLayerInfo(), BaseWorkload< BatchMatMulQueueDescriptor >::GetGuid(), armnn::info, BatchMatMulDescriptor::m_AdjointX, BatchMatMulDescriptor::m_AdjointY, BaseWorkload< BatchMatMulQueueDescriptor >::m_Data, BatchMatMulDescriptor::m_DataLayoutX, BatchMatMulDescriptor::m_DataLayoutY, QueueDescriptor::m_Inputs, WorkloadInfo::m_InputTensorInfos, QueueDescriptor::m_Outputs, WorkloadInfo::m_OutputTensorInfos, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, BatchMatMulDescriptor::m_TransposeX, BatchMatMulDescriptor::m_TransposeY, armnn::NCHW, and QueueDescriptor::ValidateInputsOutputs().

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 109 of file ClBatchMatMulWorkload.cpp.

110 {
111  ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClBatchMatMulWorkload_Execute");
112  RunClFunction(m_MatMulLayer, CHECK_LOCATION());
113 }

References ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID, CHECK_LOCATION, and armnn::RunClFunction().


The documentation for this class was generated from the following files:
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:446
armnn::ConvertAdditionalInfoToAclActivationLayerInfo
arm_compute::ActivationLayerInfo ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor &queueDescriptor)
Definition: ArmComputeUtils.hpp:105
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)
Creates a profiling event that uses GetGuid() and GetName() from the calling class.
Definition: ClWorkloadUtils.hpp:36
armnn::BoostLogSeverityMapping::info
@ info
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
ARMNN_REPORT_PROFILING_WORKLOAD_DESC
#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)
Definition: Profiling.hpp:227
armnn::BaseWorkload< BatchMatMulQueueDescriptor >::GetGuid
arm::pipe::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:67
armnn::BaseWorkload< BatchMatMulQueueDescriptor >::m_Data
BatchMatMulQueueDescriptor m_Data
Definition: Workload.hpp:89
armnn::RunClFunction
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
Definition: ClWorkloadUtils.hpp:168
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::DataLayout::NCHW
@ NCHW