ArmNN
 22.08
EthosnRefTransposeConvolution2dWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
10 
11 using namespace armnn::ethosnref;
12 
13 namespace armnn
14 {
15 
17  const TransposeConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) :
19 {
20  // set up weights decoder
21  m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
22 
23  // set up biases decoder
24  if (descriptor.m_Parameters.m_BiasEnabled)
25  {
26  m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
27  }
28 }
29 
31 {
32  ARMNN_SCOPED_PROFILING_EVENT_ETHOSN("EthosnRefTransposeConvolution2dWorkload_Execute");
33 
34  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
35  const TensorInfo& weightsInfo = m_Weight->GetTensorInfo();
36 
37  if (CheckDataType(DataType::QSymmS16, inputInfo.GetDataType(), weightsInfo.GetDataType())) {
38  const int16_t* inputData = GetInputTensorData<int16_t>(0, m_Data);;
39  const int16_t* weightsData = m_Weight->template GetConstTensor<int16_t>();
40  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
41  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
42  EthosnRefTransposeConvolutionImpl<armnn::TransposeConvolution2dQueueDescriptor, int16_t, int16_t, int32_t, int64_t>(
43  m_Data,
44  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
45  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
46  biasData,
47  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), weightsInfo);
48  }
49  else if (CheckDataType(DataType::QSymmS8, inputInfo.GetDataType(), weightsInfo.GetDataType())) {
50  const int8_t* inputData = GetInputTensorData<int8_t>(0, m_Data);;
51  const int8_t* weightsData = m_Weight->template GetConstTensor<int8_t>();
52  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
53  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
54  EthosnRefTransposeConvolutionImpl<armnn::TransposeConvolution2dQueueDescriptor, int8_t, int8_t, int32_t, int32_t>(
55  m_Data,
56  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
57  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
58  biasData,
59  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), weightsInfo);
60  } else { // QAsymmU8
61  assert(CheckDataType(DataType::QAsymmU8, inputInfo.GetDataType(), weightsInfo.GetDataType()));
62 
63  const uint8_t* inputData = GetInputTensorData<uint8_t>(0, m_Data);;
64  const uint8_t* weightsData = m_Weight->template GetConstTensor<uint8_t>();
65  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
66  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
67  EthosnRefTransposeConvolutionImpl<armnn::TransposeConvolution2dQueueDescriptor, uint8_t, uint8_t, int32_t, int32_t>(
68  m_Data,
69  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
70  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
71  biasData,
72  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), weightsInfo);
73  }
74 
75 }
76 
77 } // namespace armnn
bool m_BiasEnabled
Enable/disable bias.
EthosnRefTransposeConvolution2dWorkload(const TransposeConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
Copyright (c) 2021 ARM Limited and Contributors.
bool CheckDataType(DataType type, DataType inputType, DataType weightsType)
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
float GetQuantizationScale() const
Definition: Tensor.cpp:461
TransposeConvolution2dQueueDescriptor m_Data
Definition: Workload.hpp:83
DataType GetDataType() const
Definition: Tensor.hpp:198
#define ARMNN_SCOPED_PROFILING_EVENT_ETHOSN(name)
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers