ArmNN
 20.02
MultiplicationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 
10 template<>
11 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MultiplicationQueueDescriptor>(
12  const armnn::IWorkloadFactory& workloadFactory,
13  const armnn::WorkloadInfo& info,
14  const armnn::MultiplicationQueueDescriptor& descriptor)
15 {
16  return workloadFactory.CreateMultiplication(descriptor, info);
17 }
18 
21 {
22  const unsigned int width = 2u;
23  const unsigned int height = 2u;
24  const unsigned int channelCount = 2u;
25  const unsigned int batchSize = 2u;
26 
27  unsigned int shape[] = { batchSize, channelCount, height, width };
28 
29  std::vector<float> input0 =
30  {
31  1, 1, 1, 1, 2, 2, 2, 2,
32  3, 3, 3, 3, 4, 4, 4, 4
33  };
34 
35  std::vector<float> input1 =
36  {
37  2, 2, 2, 2, 3, 3, 3, 3,
38  4, 4, 4, 4, 5, 5, 5, 5
39  };
40 
41  std::vector<float> output =
42  {
43  2, 2, 2, 2, 6, 6, 6, 6,
44  12, 12, 12, 12, 20, 20, 20, 20
45  };
46 
47  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
48  workloadFactory,
49  memoryManager,
50  shape,
51  input0,
52  shape,
53  input1,
54  shape,
55  output);
56 }
57 
60 {
61  const unsigned int width = 3u;
62  const unsigned int height = 2u;
63  const unsigned int channelCount = 2u;
64  const unsigned int batchSize = 2u;;
65  const unsigned int depth = 2u;
66 
67  unsigned int shape[] = { depth, batchSize, channelCount, height, width };
68 
69  std::vector<float> input0 =
70  {
71  1.80f, 0.20f, 2.30f, 1.30f, 2.10f, 1.00f,
72  2.60f, 0.60f, 2.10f, 2.30f, 2.30f, 2.00f,
73 
74  2.50f, 1.00f, 2.90f, 3.10f, 1.50f, 2.40f,
75  2.80f, 1.10f, 1.00f, 3.20f, 1.00f, 2.30f,
76 
77 
78  0.30f, 2.20f, 1.00f, 0.20f, 1.60f, 1.40f,
79  0.80f, 3.20f, 0.10f, 0.10f, 3.10f, 2.10f,
80 
81  1.50f, 2.40f, 1.40f, 0.70f, 2.40f, 1.40f,
82  1.60f, 1.20f, 1.90f, 0.80f, 0.00f, 0.10f,
83  };
84 
85  std::vector<float> input1 =
86  {
87  0.70f, 1.00f, 2.90f, 2.20f, 3.10f, 2.80f,
88  1.80f, 2.00f, 0.50f, 2.30f, 1.20f, 2.70f,
89 
90  2.40f, 0.20f, 3.20f, 1.60f, 0.20f, 2.50f,
91  2.30f, 0.70f, 2.70f, 1.80f, 2.90f, 2.70f,
92 
93 
94  3.20f, 3.20f, 0.70f, 1.90f, 2.70f, 2.50f,
95  2.40f, 0.90f, 2.30f, 1.80f, 2.50f, 2.00f,
96 
97  1.60f, 2.20f, 1.60f, 2.00f, 0.30f, 3.20f,
98  0.40f, 3.00f, 2.60f, 0.30f, 0.00f, 2.50f,
99  };
100 
101  std::vector<float> output =
102  {
103  1.26f, 0.20f, 6.67f, 2.86f, 6.51f, 2.80f,
104  4.68f, 1.20f, 1.05f, 5.29f, 2.76f, 5.40f,
105 
106  6.00f, 0.20f, 9.28f, 4.96f, 0.30f, 6.00f,
107  6.44f, 0.77f, 2.70f, 5.76f, 2.90f, 6.21f,
108 
109 
110  0.96f, 7.04f, 0.70f, 0.38f, 4.32f, 3.50f,
111  1.92f, 2.88f, 0.23f, 0.18f, 7.75f, 4.20f,
112 
113  2.40f, 5.28f, 2.24f, 1.40f, 0.72f, 4.48f,
114  0.64f, 3.60f, 4.94f, 0.24f, 0.00f, 0.25f,
115  };
116 
117  return ElementwiseTestHelper<5, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
118  workloadFactory,
119  memoryManager,
120  shape,
121  input0,
122  shape,
123  input1,
124  shape,
125  output);
126 }
127 
129  armnn::IWorkloadFactory& workloadFactory,
131 {
132  unsigned int shape0[] = { 1, 2, 2, 2 };
133  unsigned int shape1[] = { 1, 1, 1, 1 };
134 
135  std::vector<float> input0 = { 1, 2, 3, 4, 5, 6, 7, 8};
136 
137  std::vector<float> input1 = { 2 };
138 
139  std::vector<float> output = { 2, 4, 6, 8, 10, 12, 14, 16};
140 
141  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
142  workloadFactory,
143  memoryManager,
144  shape0,
145  input0,
146  shape1,
147  input1,
148  shape0,
149  output);
150 }
151 
153  armnn::IWorkloadFactory& workloadFactory,
155 {
156  unsigned int shape0[] = { 1, 3, 3, 2 };
157  unsigned int shape1[] = { 1, 1, 1, 2 };
158 
159  std::vector<float> input0 =
160  {
161  1, 2, 3, 4, 5, 6,
162  7, 8, 9, 10, 11, 12,
163  13, 14, 15, 16, 17, 18
164  };
165 
166  std::vector<float> input1 = { 1, 2 };
167 
168  std::vector<float> output =
169  {
170  1, 4, 3, 8, 5, 12,
171  7, 16, 9, 20, 11, 24,
172  13, 28, 15, 32, 17, 36
173  };
174 
175  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
176  workloadFactory,
177  memoryManager,
178  shape0,
179  input0,
180  shape1,
181  input1,
182  shape0,
183  output);
184 }
185 
187  armnn::IWorkloadFactory& workloadFactory,
189 {
190  constexpr unsigned int batchSize = 1u;
191  constexpr unsigned int channels = 2u;
192  constexpr unsigned int height = 2u;
193  constexpr unsigned int width = 3u;
194 
195  const unsigned int shape[] = { batchSize, channels, height, width };
196 
197  // See dequantized values to the right
198  std::vector<uint8_t> input0 =
199  {
200  62, 37, 3, 172, 13, 111, // 244, 144, 8, 684, 48, 440,
201  188, 20, 73, 31, 23, 31 // 748, 76, 288, 120, 88, 120
202  };
203 
204  // See dequantized values to the right
205  std::vector<uint8_t> input1 =
206  {
207  126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
208  48, 115, 151, 79, 78, 97 // 150, 351, 459, 243, 240, 297
209  };
210 
211  // See dequantized values to the right
212  std::vector<uint8_t> output =
213  {
214  64, 72, 0, 255, 8, 236, // 93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
215  77, 15, 92, 16, 10, 21, // 112200, 26676, 132192, 29160, 21120, 35640
216  };
217 
218  // Scale/offset chosen to have output values out of range
219  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
220  workloadFactory,
221  memoryManager,
222  shape,
223  input0,
224  4.0f,
225  1,
226  shape,
227  input1,
228  3.0f,
229  -2,
230  shape,
231  output,
232  1366.255f,
233  -5);
234 }
235 
237  armnn::IWorkloadFactory& workloadFactory,
239 {
240  const unsigned int shape0[] = { 1, 2, 2, 3 };
241  const unsigned int shape1[] = { 1, 1, 1, 1 };
242 
243  std::vector<uint8_t> input0 =
244  {
245  1, 2, 3, 4, 5, 6,
246  7, 8, 9, 10, 11, 12
247  };
248 
249  std::vector<uint8_t> input1 = { 2 };
250 
251  std::vector<uint8_t> output =
252  {
253  2, 4, 6, 8, 10, 12,
254  14, 16, 18, 20, 22, 24
255  };
256 
257  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
258  workloadFactory,
259  memoryManager,
260  shape0,
261  input0,
262  shape1,
263  input1,
264  shape0,
265  output);
266 }
267 
269  armnn::IWorkloadFactory& workloadFactory,
271 {
272  const unsigned int shape0[] = { 1, 2, 2, 3 };
273  const unsigned int shape1[] = { 1, 1, 1, 3 };
274 
275  std::vector<uint8_t> input0 =
276  {
277  1, 2, 3, 4, 5, 6,
278  7, 8, 9, 10, 11, 12
279  };
280 
281  std::vector<uint8_t> input1 = { 1, 2, 3 };
282 
283  std::vector<uint8_t> output =
284  {
285  1, 4, 9, 4, 10, 18,
286  7, 16, 27, 10, 22, 36
287  };
288 
289  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QAsymmU8>(
290  workloadFactory,
291  memoryManager,
292  shape0,
293  input0,
294  shape1,
295  input1,
296  shape0,
297  output);
298 }
299 
301  armnn::IWorkloadFactory& workloadFactory,
303 {
304  const unsigned int shape[] = { 1, 2, 2, 3 };
305 
306  std::vector<int16_t> input0 =
307  {
308  6, 7, 8, 9, 10, 11,
309  12, 13, 14, 15, 16, 17
310  };
311 
312  std::vector<int16_t> input1 =
313  {
314  1, 2, 3, 4, 5, 6,
315  7, 8, 9, 10, 11, 12
316  };
317 
318  std::vector<int16_t> output =
319  {
320  6, 14, 24, 36, 50, 66,
321  84, 104, 126, 150, 176, 204
322  };
323 
324  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
325  workloadFactory,
326  memoryManager,
327  shape,
328  input0,
329  shape,
330  input1,
331  shape,
332  output);
333 }
334 
336  armnn::IWorkloadFactory& workloadFactory,
338 {
339  const unsigned int shape0[] = { 1, 2, 2, 3 };
340  const unsigned int shape1[] = { 1, 1, 1, 1 };
341 
342  std::vector<int16_t> input0 =
343  {
344  1, 2, 3, 4, 5, 6,
345  7, 8, 9, 10, 11, 12
346  };
347 
348  std::vector<int16_t> input1 = { 2 };
349 
350  std::vector<int16_t> output =
351  {
352  2, 4, 6, 8, 10, 12,
353  14, 16, 18, 20, 22, 24
354  };
355 
356  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
357  workloadFactory,
358  memoryManager,
359  shape0,
360  input0,
361  shape1,
362  input1,
363  shape0,
364  output);
365 }
366 
368  armnn::IWorkloadFactory& workloadFactory,
370 {
371  const unsigned int shape0[] = { 1, 2, 2, 3 };
372  const unsigned int shape1[] = { 1, 1, 1, 3 };
373 
374  std::vector<int16_t> input0 =
375  {
376  1, 2, 3, 4, 5, 6,
377  7, 8, 9, 10, 11, 12
378  };
379 
380  std::vector<int16_t> input1 = { 1, 2, 3 };
381 
382  std::vector<int16_t> output =
383  {
384  1, 4, 9, 4, 10, 18,
385  7, 16, 27, 10, 22, 36
386  };
387 
388  return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QSymmS16>(
389  workloadFactory,
390  memoryManager,
391  shape0,
392  input0,
393  shape1,
394  input1,
395  shape0,
396  output);
397 }
398 
400  armnn::IWorkloadFactory& workloadFactory,
402  armnn::IWorkloadFactory& refWorkloadFactory)
403 {
404  IgnoreUnused(memoryManager);
405  const unsigned int width = 16;
406  const unsigned int height = 32;
407  const unsigned int channelCount = 2;
408  const unsigned int batchSize = 5;
409 
410  armnn::TensorInfo inputTensorInfo0;
411  armnn::TensorInfo inputTensorInfo1;
412  armnn::TensorInfo outputTensorInfo;
413 
414  constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
415 
416  inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
417  inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
418  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
419 
420  LayerTestResult<float,4> comparisonResult(outputTensorInfo);
421 
422  auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
423  auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
424 
425  std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
426  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
427  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
428 
429  std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
430  std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
431  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
432 
434  armnn::WorkloadInfo info;
435  AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
436  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
437  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
438 
440  armnn::WorkloadInfo refInfo = info;
441  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
442  SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
443  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
444 
445  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
446  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
447 
448  inputHandle0->Allocate();
449  inputHandle1->Allocate();
450  outputHandle->Allocate();
451  inputHandle0Ref->Allocate();
452  inputHandle1Ref->Allocate();
453  outputHandleRef->Allocate();
454 
455  CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
456  CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
457  CopyDataToITensorHandle(inputHandle0Ref.get(), input0.origin());
458  CopyDataToITensorHandle(inputHandle1Ref.get(), input1.origin());
459 
460  workload->PostAllocationConfigure();
461  workload->Execute();
462  workloadRef->PostAllocationConfigure();
463  workloadRef->Execute();
464  CopyDataFromITensorHandle(comparisonResult.output.origin(), outputHandle.get());
465  CopyDataFromITensorHandle(comparisonResult.outputExpected.origin(), outputHandleRef.get());
466 
467  return comparisonResult;
468 }
LayerTestResult< float, 4 > MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > MultiplicationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< IWorkload > CreateMultiplication(const MultiplicationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > CompareMultiplicationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
LayerTestResult< int16_t, 4 > MultiplicationBroadcast1DVectorInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
boost::multi_array< T, n > outputExpected
void IgnoreUnused(Ts &&...)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int16_t, 4 > MultiplicationBroadcast1ElementInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > MultiplicationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< uint8_t, 4 > MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
boost::multi_array< T, n > output
LayerTestResult< float, 5 > Multiplication5dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > MultiplicationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)