ArmNN
 22.08
EthosnRefTransposeConvolution2dWorkload Class Reference

#include <EthosnRefTransposeConvolution2dWorkload.hpp>

Inheritance diagram for EthosnRefTransposeConvolution2dWorkload:
BaseWorkload< TransposeConvolution2dQueueDescriptor > IWorkload

Public Member Functions

 EthosnRefTransposeConvolution2dWorkload (const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
 
 ~EthosnRefTransposeConvolution2dWorkload ()=default
 
void Execute () const override
 
- Public Member Functions inherited from BaseWorkload< TransposeConvolution2dQueueDescriptor >
 BaseWorkload (const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
 
void ExecuteAsync (ExecutionData &executionData) override
 
void PostAllocationConfigure () override
 
const TransposeConvolution2dQueueDescriptorGetData () const
 
arm::pipe::ProfilingGuid GetGuid () const final
 
virtual bool SupportsTensorHandleReplacement () const override
 
void ReplaceInputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
void ReplaceOutputTensorHandle (ITensorHandle *tensorHandle, unsigned int slot) override
 
- Public Member Functions inherited from IWorkload
virtual ~IWorkload ()
 
virtual void RegisterDebugCallback (const DebugCallbackFunction &)
 
virtual armnn::Optional< armnn::MemoryRequirementsGetMemoryRequirements ()
 

Additional Inherited Members

- Protected Attributes inherited from BaseWorkload< TransposeConvolution2dQueueDescriptor >
TransposeConvolution2dQueueDescriptor m_Data
 
const arm::pipe::ProfilingGuid m_Guid
 

Detailed Description

Definition at line 14 of file EthosnRefTransposeConvolution2dWorkload.hpp.

Constructor & Destructor Documentation

◆ EthosnRefTransposeConvolution2dWorkload()

Definition at line 16 of file EthosnRefTransposeConvolution2dWorkload.cpp.

References TransposeConvolution2dQueueDescriptor::m_Bias, TransposeConvolution2dDescriptor::m_BiasEnabled, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and TransposeConvolution2dQueueDescriptor::m_Weight.

17  :
18  BaseWorkload<TransposeConvolution2dQueueDescriptor>(descriptor, info)
19 {
20  // set up weights decoder
21  m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
22 
23  // set up biases decoder
24  if (descriptor.m_Parameters.m_BiasEnabled)
25  {
26  m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
27  }
28 }

◆ ~EthosnRefTransposeConvolution2dWorkload()

Member Function Documentation

◆ Execute()

void Execute ( ) const
overridevirtual

Implements IWorkload.

Definition at line 30 of file EthosnRefTransposeConvolution2dWorkload.cpp.

References ARMNN_SCOPED_PROFILING_EVENT_ETHOSN, armnn::ethosnref::CheckDataType(), TensorInfo::GetDataType(), TensorInfo::GetQuantizationOffset(), TensorInfo::GetQuantizationScale(), armnn::GetTensorInfo(), TransposeConvolution2dDescriptor::m_BiasEnabled, BaseWorkload< TransposeConvolution2dQueueDescriptor >::m_Data, QueueDescriptor::m_Inputs, QueueDescriptor::m_Outputs, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, armnn::QAsymmU8, armnn::QSymmS16, and armnn::QSymmS8.

31 {
32  ARMNN_SCOPED_PROFILING_EVENT_ETHOSN("EthosnRefTransposeConvolution2dWorkload_Execute");
33 
34  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
35  const TensorInfo& weightsInfo = m_Weight->GetTensorInfo();
36 
37  if (CheckDataType(DataType::QSymmS16, inputInfo.GetDataType(), weightsInfo.GetDataType())) {
38  const int16_t* inputData = GetInputTensorData<int16_t>(0, m_Data);;
39  const int16_t* weightsData = m_Weight->template GetConstTensor<int16_t>();
40  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
41  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
42  EthosnRefTransposeConvolutionImpl<armnn::TransposeConvolution2dQueueDescriptor, int16_t, int16_t, int32_t, int64_t>(
43  m_Data,
44  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
45  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
46  biasData,
47  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), weightsInfo);
48  }
49  else if (CheckDataType(DataType::QSymmS8, inputInfo.GetDataType(), weightsInfo.GetDataType())) {
50  const int8_t* inputData = GetInputTensorData<int8_t>(0, m_Data);;
51  const int8_t* weightsData = m_Weight->template GetConstTensor<int8_t>();
52  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
53  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
54  EthosnRefTransposeConvolutionImpl<armnn::TransposeConvolution2dQueueDescriptor, int8_t, int8_t, int32_t, int32_t>(
55  m_Data,
56  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
57  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
58  biasData,
59  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), weightsInfo);
60  } else { // QAsymmU8
61  assert(CheckDataType(DataType::QAsymmU8, inputInfo.GetDataType(), weightsInfo.GetDataType()));
62 
63  const uint8_t* inputData = GetInputTensorData<uint8_t>(0, m_Data);;
64  const uint8_t* weightsData = m_Weight->template GetConstTensor<uint8_t>();
65  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
66  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
67  EthosnRefTransposeConvolutionImpl<armnn::TransposeConvolution2dQueueDescriptor, uint8_t, uint8_t, int32_t, int32_t>(
68  m_Data,
69  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
70  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
71  biasData,
72  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), weightsInfo);
73  }
74 
75 }
bool m_BiasEnabled
Enable/disable bias.
bool CheckDataType(DataType type, DataType inputType, DataType weightsType)
TransposeConvolution2dQueueDescriptor m_Data
Definition: Workload.hpp:83
#define ARMNN_SCOPED_PROFILING_EVENT_ETHOSN(name)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers

The documentation for this class was generated from the following files: