From 7bfd38a721360183f3392f9ab35db18a0dd7fef8 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Fri, 19 Aug 2022 15:23:36 +0100 Subject: Update Doxygen for 22.08 Release Signed-off-by: Nikhil Raj Change-Id: I4789fe868e0492839be1482e5cee3642ed90d756 --- ...thwise_convolution2d_workload_8cpp_source.xhtml | 141 +++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 22.08/_ethosn_ref_depthwise_convolution2d_workload_8cpp_source.xhtml (limited to '22.08/_ethosn_ref_depthwise_convolution2d_workload_8cpp_source.xhtml') diff --git a/22.08/_ethosn_ref_depthwise_convolution2d_workload_8cpp_source.xhtml b/22.08/_ethosn_ref_depthwise_convolution2d_workload_8cpp_source.xhtml new file mode 100644 index 0000000000..657aedc4ef --- /dev/null +++ b/22.08/_ethosn_ref_depthwise_convolution2d_workload_8cpp_source.xhtml @@ -0,0 +1,141 @@ + + + + + + + + + + + + + +ArmNN: src/backends/ethosnref/workloads/EthosnRefDepthwiseConvolution2dWorkload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  22.08 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
EthosnRefDepthwiseConvolution2dWorkload.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include "EthosnRefConvImpl.hpp"
10 #include <ResolveType.hpp>
11 
12 using namespace armnn::ethosnref;
13 
14 namespace armnn
15 {
16 
18  const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info)
20 {
21  m_Weight = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Weight));
22 
23  if (descriptor.m_Parameters.m_BiasEnabled)
24  {
25  m_Bias = std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Bias));
26  }
27 }
28 
30 {
31  ARMNN_SCOPED_PROFILING_EVENT_ETHOSN("EthosnRefDepthwiseConvolution2dWorkload_Execute");
32  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
33  const TensorInfo& weightsInfo = m_Weight->GetTensorInfo();
34 
35  if (CheckDataType(DataType::QSymmS16, inputInfo.GetDataType(), weightsInfo.GetDataType())) {
36  const int16_t* inputData = GetInputTensorData<int16_t>(0, m_Data);;
37  const int16_t* weightsData = m_Weight->template GetConstTensor<int16_t>();
38  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
39  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
40  const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
41  EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, int16_t, int16_t, int32_t, int64_t>(
42  m_Data,
43  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
44  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
45  biasData,
46  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo, true);
47  }
48  else if (CheckDataType(DataType::QSymmS8, inputInfo.GetDataType(), weightsInfo.GetDataType())) {
49  const int8_t* inputData = GetInputTensorData<int8_t>(0, m_Data);;
50  const int8_t* weightsData = m_Weight->template GetConstTensor<int8_t>();
51  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
52  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
53  const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
54  EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, int8_t, int8_t, int32_t, int64_t>(
55  m_Data,
56  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
57  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
58  biasData,
59  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo, true);
60  }
61  else { // QAsymmU8
62  assert(CheckDataType(DataType::QAsymmU8, inputInfo.GetDataType(), weightsInfo.GetDataType()));
63 
64  const uint8_t* inputData = GetInputTensorData<uint8_t>(0, m_Data);;
65  const uint8_t* weightsData = m_Weight->template GetConstTensor<uint8_t>();
66  const int32_t* biasData = m_Data.m_Parameters.m_BiasEnabled ? m_Bias->template GetConstTensor<int32_t>() : nullptr;
67  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
68  const TensorInfo& filterInfo = m_Weight->GetTensorInfo();
69  EthosnRefConvImpl<armnn::DepthwiseConvolution2dQueueDescriptor, uint8_t, uint8_t, int32_t, int32_t>(
70  m_Data,
71  inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(),
72  weightsData, weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(),
73  biasData,
74  outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), filterInfo, true);
75  }
76 }
77 
78 } //namespace armnn
bool m_BiasEnabled
Enable/disable bias.
+ + + + +
Copyright (c) 2021 ARM Limited and Contributors.
+ + + +
bool CheckDataType(DataType type, DataType inputType, DataType weightsType)
+ + +
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
+
float GetQuantizationScale() const
Definition: Tensor.cpp:461
+
DepthwiseConvolution2dQueueDescriptor m_Data
Definition: Workload.hpp:83
+
DataType GetDataType() const
Definition: Tensor.hpp:198
+ + + +
#define ARMNN_SCOPED_PROFILING_EVENT_ETHOSN(name)
+ +
std::vector< ITensorHandle * > m_Outputs
+
Contains information about TensorInfos of a layer.
+ +
std::vector< ITensorHandle * > m_Inputs
+
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
+
EthosnRefDepthwiseConvolution2dWorkload(const DepthwiseConvolution2dQueueDescriptor &descriptor, const WorkloadInfo &info)
+
Depthwise Convolution 2D layer workload data.
+
+
+ + + + -- cgit v1.2.1