ArmNN
 22.08
EthosnRefDepthwiseConvolution2dWorkload Class Reference

#include <EthosnRefDepthwiseConvolution2dWorkload.hpp>

Inheritance diagram for EthosnRefDepthwiseConvolution2dWorkload:
BaseWorkload< DepthwiseConvolution2dQueueDescriptor > IWorkload

Public Member Functions

 EthosnRefDepthwiseConvolution2dWorkload (const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
 
virtual void Execute () const override
 
- Public Member Functions inherited from BaseWorkload< DepthwiseConvolution2dQueueDescriptor >
 BaseWorkload (const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ExecuteAsync (ExecutionData &executionData) override
 
void PostAllocationConfigure () override
 
const DepthwiseConvolution2dQueueDescriptorGetData () const
 
arm::pipe::ProfilingGuid GetGuid () const final
 
virtual bool SupportsTensorHandleReplacement () const override
 
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< DepthwiseConvolution2dQueueDescriptor >
DepthwiseConvolution2dQueueDescriptor m_Data
 
const arm::pipe::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 13 of file EthosnRefDepthwiseConvolution2dWorkload.hpp.

Constructor & Destructor Documentation

◆ EthosnRefDepthwiseConvolution2dWorkload()

Definition at line 17 of file EthosnRefDepthwiseConvolution2dWorkload.cpp.

References DepthwiseConvolution2dQueueDescriptor::m_Bias, DepthwiseConvolution2dDescriptor::m_BiasEnabled, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and DepthwiseConvolution2dQueueDescriptor::m_Weight.

19  : BaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
20 {
21  m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
22 
23  if (descriptor.m_Parameters.m_BiasEnabled)
24  {
25  m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
26  }
27 }

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 29 of file EthosnRefDepthwiseConvolution2dWorkload.cpp.

References ARMNN_SCOPED_PROFILING_EVENT_ETHOSN, armnn::ethosnref::CheckDataType(), TensorInfo::GetDataType(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), armnn::GetTensorInfo(), DepthwiseConvolution2dDescriptor::m_BiasEnabled, BaseWorkload< DepthwiseConvolution2dQueueDescriptor >::m_Data, QueueDescriptor::m_Inputs, QueueDescriptor::m_Outputs, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

30 {
31  ARMNN_SCOPED_PROFILING_EVENT_ETHOSN("EthosnRefDepthwiseConvolution2dWorkload_Execute");
32  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
33  const TensorInfo& weightsInfo = m_Weight->GetTensorInfo();
34 
35  if (CheckDataType(DataType::QSymmS16, inputInfo.GetDataType(), weightsInfo.GetDataType())) {
36  const int16_t* inputData = GetInputTensorData<int16_t>(0, m_Data);;
37  const int16_t* weightsData = m_Weight->template GetConstTensor<int16_t>();
38  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
39  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
40  const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
41  EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, int16_t, int16_t, int32_t, int64_t>(
42  m_Data,
43  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
44  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
45  biasData,
46  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo, true);
47  }
48  else if (CheckDataType(DataType::QSymmS8, inputInfo.GetDataType(), weightsInfo.GetDataType())) {
49  const int8_t* inputData = GetInputTensorData<int8_t>(0, m_Data);;
50  const int8_t* weightsData = m_Weight->template GetConstTensor<int8_t>();
51  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
52  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
53  const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
54  EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, int8_t, int8_t, int32_t, int64_t>(
55  m_Data,
56  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
57  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
58  biasData,
59  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo, true);
60  }
61  else { // QAsymmU8
62  assert(CheckDataType(DataType::QAsymmU8, inputInfo.GetDataType(), weightsInfo.GetDataType()));
63 
64  const uint8_t* inputData = GetInputTensorData<uint8_t>(0, m_Data);;
65  const uint8_t* weightsData = m_Weight->template GetConstTensor<uint8_t>();
66  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
67  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
68  const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
69  EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, uint8_t, uint8_t, int32_t, int32_t>(
70  m_Data,
71  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
72  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
73  biasData,
74  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo, true);
75  }
76 }
bool m_BiasEnabled
Enable/disable bias.
bool CheckDataType(DataType type, DataType inputType, DataType weightsType)
DepthwiseConvolution2dQueueDescriptor m_Data
Definition: Workload.hpp:83
#define ARMNN_SCOPED_PROFILING_EVENT_ETHOSN(name)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers

The documentation for this class was generated from the following files: