ArmNN
 20.02
ClFullyConnectedWorkload Class Reference

#include <ClFullyConnectedWorkload.hpp>

Inheritance diagram for ClFullyConnectedWorkload:
BaseWorkload< armnn::FullyConnectedQueueDescriptor > IWorkload

Public Member Functions

 ClFullyConnectedWorkload (const armnn::FullyConnectedQueueDescriptor &descriptor, const armnn::WorkloadInfo &info, std::shared_ptr< arm_compute::MemoryManagerOnDemand > &memoryManager)
 
void Execute () const override
 
- Public Member Functions inherited from BaseWorkload< armnn::FullyConnectedQueueDescriptor >
 BaseWorkload (const armnn::FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void PostAllocationConfigure () override
 
const armnn::FullyConnectedQueueDescriptorGetData () const
 
profiling::ProfilingGuid GetGuid () const final
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< armnn::FullyConnectedQueueDescriptor >
const armnn::FullyConnectedQueueDescriptor m_Data
 
const profiling::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 24 of file ClFullyConnectedWorkload.hpp.

Constructor & Destructor Documentation

◆ ClFullyConnectedWorkload()

ClFullyConnectedWorkload ( const armnn::FullyConnectedQueueDescriptor descriptor,
const armnn::WorkloadInfo info,
std::shared_ptr< arm_compute::MemoryManagerOnDemand > &  memoryManager 
)

Definition at line 47 of file ClFullyConnectedWorkload.cpp.

49  : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
50  , m_FullyConnectedLayer(memoryManager)
51 {
52  m_WeightsTensor = std::make_unique<arm_compute::CLTensor>();
53  BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
54 
56  {
57  m_BiasesTensor = std::make_unique<arm_compute::CLTensor>();
58  BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
59  }
60 
61  m_Data.ValidateInputsOutputs("ClFullyConnectedWorkload", 1, 1);
62 
63  arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
64  arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
65 
66  // Construct
67  arm_compute::FullyConnectedLayerInfo fc_info;
68  fc_info.transpose_weights = m_Data.m_Parameters.m_TransposeWeightMatrix;
69  m_FullyConnectedLayer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
70 
72 
73  if (m_BiasesTensor)
74  {
76  }
77 
78  // Force Compute Library to perform the necessary copying and reshaping, after which
79  // delete all the input tensors that will no longer be needed
80  m_FullyConnectedLayer.prepare();
81  FreeUnusedTensors();
82 }
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstCpuTensorHandle *handle)
const ConstCpuTensorHandle * m_Weight
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
const armnn::FullyConnectedQueueDescriptor m_Data
Definition: Workload.hpp:46
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
bool m_BiasEnabled
Enable/disable bias.
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
const ConstCpuTensorHandle * m_Bias
const TensorInfo & GetTensorInfo() const

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 84 of file ClFullyConnectedWorkload.cpp.

References ARMNN_SCOPED_PROFILING_EVENT_CL, CHECK_LOCATION, and armnn::RunClFunction().

85 {
86  ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute");
87  RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION());
88 }
#define ARMNN_SCOPED_PROFILING_EVENT_CL(name)
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
#define CHECK_LOCATION()
Definition: Exceptions.hpp:192

The documentation for this class was generated from the following files: