ArmNN
 21.08
FullyConnectedTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 
9 #include <QuantizeHelper.hpp>
10 
12 
16 
17 #include <test/TensorHelpers.hpp>
18 
19 //
20 // Implementation templates
21 //
22 
23 template<typename T, typename B>
25  armnn::IWorkloadFactory& workloadFactory,
27  const armnn::ITensorHandleFactory& tensorHandleFactory,
28  armnn::TensorInfo inputTensorInfo,
29  armnn::TensorInfo outputTensorInfo,
30  armnn::TensorInfo weightsTensorInfo,
31  armnn::TensorInfo biasesTensorInfo,
32  std::vector<T>& weights,
33  std::vector<B>& bias,
34  std::vector<T>& input,
35  bool biasEnabled,
36  bool transposeWeights,
37  bool constantWeights)
38 {
39  std::unique_ptr<armnn::ITensorHandle> input0Handle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
40  std::unique_ptr<armnn::ITensorHandle> input1Handle = tensorHandleFactory.CreateTensorHandle(weightsTensorInfo);
41  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
42 
43  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
44 
47  armnn::ScopedTensorHandle weightsTensor(weightsTensorInfo);
48  armnn::ScopedTensorHandle biasTensor(biasesTensorInfo);
49 
50  AllocateAndCopyDataToITensorHandle(&weightsTensor, weights.data());
51  AllocateAndCopyDataToITensorHandle(&biasTensor, bias.data());
52 
53  AddInputToWorkload(data, info, inputTensorInfo, input0Handle.get());
54  AddInputToWorkload(data, info, weightsTensorInfo, input1Handle.get());
55  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
56 
57  // Need to set as layer members will be null when creating the workload because the optimization hasn't been run.
58  data.m_Weight = &weightsTensor;
59  data.m_Bias = &biasTensor;
60 
61  data.m_Parameters.m_BiasEnabled = biasEnabled;
62  data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
63  data.m_Parameters.m_ConstantWeights = constantWeights;
64 
65  std::unique_ptr<armnn::ITensorHandle> input2Handle = nullptr;
66  if (biasEnabled)
67  {
68  input2Handle = tensorHandleFactory.CreateTensorHandle(biasesTensorInfo);
69  AddInputToWorkload(data, info, biasesTensorInfo, input2Handle.get());
70  }
71 
72  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
73  LayerTestResult<T, 2> result(outputTensorInfo);
74 
75  input0Handle->Allocate();
76  input1Handle->Allocate();
77  outputHandle->Allocate();
78  CopyDataToITensorHandle(input0Handle.get(), input.data());
79  CopyDataToITensorHandle(input1Handle.get(), weights.data());
80  if (biasEnabled)
81  {
82  input2Handle->Allocate();
83  CopyDataToITensorHandle(input2Handle.get(), bias.data());
84  }
85 
86  ExecuteWorkload(*workload, memoryManager);
87 
88  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
89  result.m_ActualData = actualOutput;
90 
91  return result;
92 }
93 
94 template<armnn::DataType ArmnnType, typename T>
96  armnn::IWorkloadFactory& workloadFactory,
98  const armnn::ITensorHandleFactory& tensorHandleFactory,
99  bool biasEnabled,
100  bool constantWeights)
101 {
102  constexpr static unsigned int inputWidth = 3u;
103  constexpr static unsigned int inputHeight = 2u;
104  constexpr static unsigned int inputChannels = 1u;
105 
106  constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
107 
108  constexpr static unsigned int outputChannels = 2u;
109 
110  armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, ArmnnType);
111  inputTensorInfo.SetQuantizationScale(0.1f);
112  inputTensorInfo.SetQuantizationOffset(63);
113 
114  armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, ArmnnType);
115  outputTensorInfo.SetQuantizationScale(5.f);
116  outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
117 
118  armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, ArmnnType);
119  weightsDesc.SetQuantizationScale(0.2f);
120  weightsDesc.SetQuantizationOffset(93);
121 
122  armnn::TensorInfo biasesDesc({ outputChannels }, GetBiasTypeFromWeightsType(weightsDesc.GetDataType()).value());
123  biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
124  biasesDesc.SetQuantizationOffset(0);
125 
126  LayerTestResult<T, 2> result(outputTensorInfo);
127 
128  std::vector<T> input = ConvertToDataType<ArmnnType>(
129  {
130  -1.2f, 6.1f, -3.5f,
131  18.8f, -5.5f, 2.9f
132  },
133  inputTensorInfo);
134 
135  std::vector<T> weights = ConvertToDataType<ArmnnType>(
136  {
137  -8.4f, 20.0f, -10.4f, -8, 16.4f, -11.8f,
138  23.4f, 10.4f, -14.0f, -3.8f, -11.8f, 11.4f
139  },
140  weightsDesc);
141 
142  std::vector<int32_t> bias = {9250, 67500};
143 
144  result = SimpleFullyConnectedTestImpl<T>(workloadFactory,
145  memoryManager,
146  tensorHandleFactory,
147  inputTensorInfo,
148  outputTensorInfo,
149  weightsDesc,
150  biasesDesc,
151  weights,
152  bias,
153  input,
154  biasEnabled,
155  true,
156  constantWeights);
157 
158  if (biasEnabled)
159  {
160  result.m_ExpectedData = ConvertToDataType<ArmnnType>({80.f, 1460.f}, outputTensorInfo);
161  }
162  else
163  {
164  result.m_ExpectedData = ConvertToDataType<ArmnnType>({-107.04f, 110.f}, outputTensorInfo);
165  }
166 
167  return result;
168 }
169 
170 //
171 // ArmNN variant of the AndroidNN fully_connected_float_large test.
172 //
173 // Tests the fully connected layer with large values, optionally transposing weights.
174 // Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
175 //
176 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
178  armnn::IWorkloadFactory& workloadFactory,
180  const armnn::ITensorHandleFactory& tensorHandleFactory,
181  bool transposeWeights,
182  float qScale = 0.0f,
183  int32_t qOffset = 0)
184 {
185  unsigned int inputWidth = 1;
186  unsigned int inputHeight = 1;
187  unsigned int inputChannels = 5;
188  unsigned int inputNum = 1;
189 
190  unsigned int outputChannels = 1;
191  unsigned int outputNum = 1;
192 
193  // Define the tensor descriptors.
194  armnn::TensorInfo inputTensorInfo;
195  armnn::TensorInfo outputTensorInfo;
196  armnn::TensorInfo weightsDesc;
197  armnn::TensorInfo biasesDesc;
198 
199  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
200  unsigned int outputShape[] = { outputNum, outputChannels };
201  unsigned int weightsShape[] = { inputChannels, outputChannels };
202  if (transposeWeights)
203  {
204  std::swap(weightsShape[0], weightsShape[1]);
205  }
206 
207  unsigned int biasShape[] = { outputChannels };
208 
209  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
210  outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
211  weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
212  biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
213 
214  // Set quantization parameters if the requested type is a quantized type.
215  if(armnn::IsQuantizedType<T>())
216  {
217  inputTensorInfo.SetQuantizationScale(qScale);
218  inputTensorInfo.SetQuantizationOffset(qOffset);
219  outputTensorInfo.SetQuantizationScale(qScale);
220  outputTensorInfo.SetQuantizationOffset(qOffset);
221  }
222 
223  LayerTestResult<T, 2> result(outputTensorInfo);
224 
225  std::vector<T> input = armnnUtils::QuantizedVector<T>(
226  {
227  1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
228  },
229  qScale, qOffset);
230 
231  std::vector<T> weights = armnnUtils::QuantizedVector<T>(
232  {
233  2.0f, 3.0f, 4.0f, 5.0f, 6.0f
234  },
235  qScale, qOffset);
236 
237  std::vector<T> biasValues({900000.f});
238 
239  result = SimpleFullyConnectedTestImpl<T>(
240  workloadFactory,
241  memoryManager,
242  tensorHandleFactory,
243  inputTensorInfo, outputTensorInfo,
244  weightsDesc, biasesDesc,
245  weights, biasValues, input,
246  true, transposeWeights, true
247  );
248 
249  result.m_ExpectedData = armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset);
250 
251  return result;
252 }
253 
254 //
255 // Explicit template specializations
256 //
257 
259 FullyConnectedTest<armnn::DataType::QAsymmU8>(
260  armnn::IWorkloadFactory& workloadFactory,
262  const armnn::ITensorHandleFactory& tensorHandleFactory,
263  bool biasEnabled,
264  bool constWeights);
265 
267 FullyConnectedTest<armnn::DataType::QSymmS16>(
268  armnn::IWorkloadFactory& workloadFactory,
270  const armnn::ITensorHandleFactory& tensorHandleFactory,
271  bool biasEnabled,
272  bool constWeights);
273 
274 //
275 // Implementation functions
276 //
277 
279  armnn::IWorkloadFactory& workloadFactory,
281  const armnn::ITensorHandleFactory& tensorHandleFactory,
282  bool biasEnabled,
283  bool transposeWeights)
284 {
285  unsigned int inputWidth = 1;
286  unsigned int inputHeight = 1;
287  unsigned int inputChannels = 5;
288  unsigned int inputNum = 2;
289 
290  unsigned int outputChannels = 3;
291  unsigned int outputNum = 2;
292 
293  // Define the tensor descriptors.
294  armnn::TensorInfo inputTensorInfo;
295  armnn::TensorInfo outputTensorInfo;
296  armnn::TensorInfo weightsDesc;
297  armnn::TensorInfo biasesDesc;
298 
299  unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
300  unsigned int outputShape[] = { outputNum, outputChannels };
301  unsigned int weightsShape[] = { inputChannels, outputChannels };
302 
303  if (transposeWeights)
304  {
305  std::swap(weightsShape[0], weightsShape[1]);
306  }
307 
308  unsigned int biasShape[] = { outputChannels };
309 
310  inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
311  outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
312  weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
313  biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
314 
315  LayerTestResult<float, 2> result(outputTensorInfo);
316 
317  std::vector<float> input =
318  {
319  1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
320  5.0f, 4.0f, 3.0f, 2.0f, 1.0f
321  };
322 
323  std::vector<float> weights =
324  {
325  .5f, 2.f, .5f,
326  .5f, 2.f, 1.f,
327  .5f, 2.f, 2.f,
328  .5f, 2.f, 3.f,
329  .5f, 2.f, 4.f
330  };
331 
332  if (transposeWeights)
333  {
334  weights =
335  {
336  .5f, .5f, .5f, .5f, .5f,
337  2.f, 2.f, 2.f, 2.f, 2.f,
338  .5f, 1.f, 2.f, 3.f, 4.f
339  };
340  }
341 
342  std::vector<float> biasValues({0.f, 0.f, 0.f});
343  if (biasEnabled)
344  {
345  biasValues = std::vector<float>({10.f, 20.f, 30.f});
346  }
347 
348  result = SimpleFullyConnectedTestImpl<float>(
349  workloadFactory,
350  memoryManager,
351  tensorHandleFactory,
352  inputTensorInfo, outputTensorInfo,
353  weightsDesc, biasesDesc,
354  weights, biasValues, input,
355  biasEnabled, transposeWeights, true
356  );
357 
358  std::vector<float> expectedOutput =
359  {
360  0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
361  2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
362  0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
363 
364  2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
365  10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
366  2.5f + 4.0f + 6.0f + 6.f + 4.f + biasValues[2]
367  };
368  result.m_ExpectedData = expectedOutput;
369 
370  return result;
371 }
372 
374  armnn::IWorkloadFactory& workloadFactory,
376  const armnn::ITensorHandleFactory& tensorHandleFactory,
377  bool transposeWeights)
378 {
379  return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory,
380  memoryManager,
381  tensorHandleFactory,
382  transposeWeights);
383 }
LayerTestResult< float, 2 > FullyConnectedFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool transposeWeights)
void swap(OriginsDescriptor &first, OriginsDescriptor &second)
LayerTestResult< T, 2 > SimpleFullyConnectedTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::TensorInfo inputTensorInfo, armnn::TensorInfo outputTensorInfo, armnn::TensorInfo weightsTensorInfo, armnn::TensorInfo biasesTensorInfo, std::vector< T > &weights, std::vector< B > &bias, std::vector< T > &input, bool biasEnabled, bool transposeWeights, bool constantWeights)
LayerTestResult< T, 2 > FullyConnectedTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool biasEnabled, bool constantWeights)
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< T > m_ExpectedData
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< T > m_ActualData
LayerTestResult< float, 2 > FullyConnectedLargeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights)
LayerTestResult< T, 2 > FullyConnectedLargeTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool transposeWeights, float qScale=0.0f, int32_t qOffset=0)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< IWorkload > CreateFullyConnected(const FullyConnectedQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)