ArmNN
 20.11
RefFullyConnectedWorkload Class Reference

#include <RefFullyConnectedWorkload.hpp>

Inheritance diagram for RefFullyConnectedWorkload:
BaseWorkload< FullyConnectedQueueDescriptor > IWorkload

Public Member Functions

 RefFullyConnectedWorkload (const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void PostAllocationConfigure () override
 
virtual void Execute () const override
 
- Public Member Functions inherited from BaseWorkload< FullyConnectedQueueDescriptor >
 BaseWorkload (const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void PostAllocationConfigure () override
 
const FullyConnectedQueueDescriptorGetData () const
 
profiling::ProfilingGuid GetGuid () const final
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< FullyConnectedQueueDescriptor >
const FullyConnectedQueueDescriptor m_Data
 
const profiling::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 18 of file RefFullyConnectedWorkload.hpp.

Constructor & Destructor Documentation

◆ RefFullyConnectedWorkload()

RefFullyConnectedWorkload ( const FullyConnectedQueueDescriptor descriptor,
const WorkloadInfo info 
)
explicit

Definition at line 15 of file RefFullyConnectedWorkload.cpp.

References TensorInfo::GetShape(), FullyConnectedQueueDescriptor::m_Bias, FullyConnectedDescriptor::m_BiasEnabled, and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

17  : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info),
18  m_Weight(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight)))
19 {
20  const TensorInfo& rWeightInfo = m_Weight->GetTensorInfo();
21  m_WeightShape = rWeightInfo.GetShape();
22  m_WeightDecoder = MakeDecoder<float>(rWeightInfo, m_Weight->Map(true));
23 
24  if (descriptor.m_Parameters.m_BiasEnabled)
25  {
26  m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
27  const TensorInfo& biasInfo = m_Bias->GetTensorInfo();
28  m_BiasDecoder = MakeDecoder<float>(biasInfo, m_Bias->Map(true));
29  }
30 }

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 50 of file RefFullyConnectedWorkload.cpp.

References ARMNN_SCOPED_PROFILING_EVENT, armnn::CpuRef, armnn::FullyConnected, FullyConnectedDescriptor::m_BiasEnabled, BaseWorkload< FullyConnectedQueueDescriptor >::m_Data, QueueDescriptor::m_Inputs, QueueDescriptor::m_Outputs, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and FullyConnectedDescriptor::m_TransposeWeightMatrix.

51 {
52  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedWorkload_Execute");
53 
54  m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map());
55  m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map());
56 
57  FullyConnected(m_InputShape,
58  *m_InputDecoder,
59  m_OutputShape,
60  *m_OutputEncoder,
61  m_WeightShape,
62  *m_WeightDecoder,
63  *m_BiasDecoder,
65  m_NumActivations,
67 }
CPU Execution: Reference C++ kernels.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
const FullyConnectedQueueDescriptor m_Data
Definition: Workload.hpp:46
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:175
bool m_BiasEnabled
Enable/disable bias.
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs

◆ PostAllocationConfigure()

void PostAllocationConfigure ( )
overridevirtual

Implements IWorkload.

Definition at line 32 of file RefFullyConnectedWorkload.cpp.

References ARMNN_ASSERT, TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), armnn::GetTensorInfo(), BaseWorkload< FullyConnectedQueueDescriptor >::m_Data, QueueDescriptor::m_Inputs, and QueueDescriptor::m_Outputs.

33 {
34  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
35  ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
36  m_InputShape = inputInfo.GetShape();
37  m_InputDecoder = MakeDecoder<float>(inputInfo);
38 
39  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
40  m_OutputShape = outputInfo.GetShape();
41  m_OutputEncoder = MakeEncoder<float>(outputInfo);
42 
43  m_NumActivations = 1; // Total number of activations in the input.
44  for (unsigned int i = 1; i < inputInfo.GetNumDimensions(); i++)
45  {
46  m_NumActivations *= inputInfo.GetShape()[i];
47  }
48 }
const FullyConnectedQueueDescriptor m_Data
Definition: Workload.hpp:46
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers

The documentation for this class was generated from the following files: