ArmNN
 22.05
DepthwiseConvolution2dEndToEndTests.hpp File Reference
#include "EndToEndTestImpl.hpp"
#include <armnnUtils/QuantizeHelper.hpp>
#include <ResolveType.hpp>
#include <CommonTestUtils.hpp>
#include <armnnTestUtils/DataLayoutUtils.hpp>
#include <map>
#include <vector>

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType>
void DepthwiseConvolution2dEndToEnd (const std::vector< armnn::BackendId > &backends, armnn::DataLayout dataLayout)
 

Function Documentation

◆ DepthwiseConvolution2dEndToEnd()

void DepthwiseConvolution2dEndToEnd ( const std::vector< armnn::BackendId > &  backends,
armnn::DataLayout  dataLayout 
)

Definition at line 49 of file DepthwiseConvolution2dEndToEndTests.hpp.

References DepthwiseConvolution2dDescriptor::m_BiasEnabled, DepthwiseConvolution2dDescriptor::m_DataLayout, DepthwiseConvolution2dDescriptor::m_PadBottom, DepthwiseConvolution2dDescriptor::m_PadLeft, DepthwiseConvolution2dDescriptor::m_PadRight, DepthwiseConvolution2dDescriptor::m_PadTop, DepthwiseConvolution2dDescriptor::m_StrideX, DepthwiseConvolution2dDescriptor::m_StrideY, armnn::NCHW, and PermuteTensorNhwcToNchw().

51 {
52  using namespace armnn;
53  using T = ResolveType<ArmnnType>;
54  using BT = ResolveType<ArmnnBType>;
55 
56  const float qScale = IsQuantizedType<T>() ? 0.25f : 1.0f;
57  const int32_t qOffset = IsQuantizedType<T>() ? 50 : 0;
58 
59  unsigned int depthMultiplier = 2;
60 
61  unsigned int inputHeight = 8;
62  unsigned int inputWidth = 16;
63  unsigned int inputChannels = 2;
64  unsigned int inputBatchSize = 1;
65 
66  unsigned int kernelHeight = 5;
67  unsigned int kernelWidth = 3;
68 
69  unsigned int outputHeight = inputHeight - kernelHeight + 1 + 2;
70  unsigned int outputWidth = (inputWidth - kernelWidth + 1)/2;
71  unsigned int outputChannels = inputChannels * depthMultiplier;
72  unsigned int outputBatchSize = inputBatchSize;
73 
74  TensorInfo inputInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset, true);
75  TensorInfo outputInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType, qScale, qOffset);
76  TensorInfo weightsInfo({1, kernelHeight, kernelWidth, outputChannels}, ArmnnType, qScale, qOffset, true);
77  TensorInfo biasesInfo({outputChannels}, ArmnnBType, qScale * qScale, 0, true);
78 
79  std::vector<float> inputData =
80  {
81  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
82  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
83  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
84  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
85  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
86  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
87  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
88  0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
89  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
90  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
91  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
92  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
93  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
94  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
95  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
96  0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
97  };
98 
99  std::vector<float> weightsData =
100  {
101  1.0f, 1.0f, 1.0f,
102  1.0f, -1.0f, 1.0f,
103  1.0f, 1.0f, 1.0f,
104  1.0f, 1.0f, 1.0f,
105  1.0f, 1.0f, 1.0f,
106 
107  2.0f, 2.0f, 2.0f,
108  2.0f, 2.0f, 2.0f,
109  2.0f, 2.0f, 2.0f,
110  2.0f, 2.0f, 2.0f,
111  2.0f, 2.0f, 2.0f,
112 
113  0.0f, 0.0f, 0.0f,
114  0.0f, -1.0f, 0.0f,
115  0.0f, 0.0f, 0.0f,
116  0.0f, 0.0f, 0.0f,
117  0.0f, 0.0f, 0.0f,
118 
119  0.0f, 0.0f, 0.0f,
120  0.0f, 0.0f, 0.0f,
121  0.0f, 1.0f, 0.0f,
122  0.0f, 0.0f, 0.0f,
123  0.0f, 0.0f, 0.0f
124  };
125 
126  std::vector<float> biasesData = { 0.0f, 2.0f, 1.0f, -1.0f };
127 
128  std::vector<float> expectedOutputData =
129  {
130  3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f,
131  5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f,
132  5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.5f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f,
133  2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 2.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f,
134  4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 4.5f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f,
135  6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f,
136  1.0f, 3.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
137  2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
138  2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
139  2.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
140  3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
141  3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 3.0f, 5.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
142  };
143 
145  descriptor.m_PadLeft = 0;
146  descriptor.m_PadRight = 0;
147  descriptor.m_PadTop = 1;
148  descriptor.m_PadBottom = 0;
149  descriptor.m_StrideX = 2;
150  descriptor.m_StrideY = 1;
151  descriptor.m_BiasEnabled = true;
152  descriptor.m_DataLayout = dataLayout;
153 
154  // Permute input and output if NCDHW.
155  if (dataLayout == DataLayout::NCHW)
156  {
157  PermuteTensorNhwcToNchw(inputInfo, inputData);
158  PermuteTensorNhwcToNchw(outputInfo, expectedOutputData);
159  }
160 
161  // Quantize data
162  std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
163  std::vector<T> qWeightsData = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
164  std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
165 
166  std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
167 
168  ConstTensor weights(weightsInfo, qWeightsData);
169  ConstTensor biases(biasesInfo, qBiasesData);
170 
171  INetworkPtr network = CreateDepthwiseConvolution2dNetwork(descriptor,
172  inputInfo,
173  weightsInfo,
174  biasesInfo,
175  outputInfo,
176  weights,
177  biases);
178 
179  EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),
180  { { 0, qInputData } },
181  { { 0, qExpectedOutputData } },
182  backends);
183 }
void PermuteTensorNhwcToNchw(armnn::TensorInfo &tensorInfo, std::vector< T > &tensorData)
bool m_BiasEnabled
Enable/disable bias.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_PadLeft
Padding left value in the width dimension.
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
uint32_t m_PadRight
Padding right value in the width dimension.