ArmNN
 20.11
RefCreateWorkloadTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
12 
13 namespace
14 {
15 
16 template<typename Workload>
17 void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
18 {
19  auto queueDescriptor = workload->GetData();
20  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
21  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
22  BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
23  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
24 }
25 
26 template <typename Workload>
27 void CheckInputsOutput(std::unique_ptr<Workload> workload,
28  const TensorInfo& inputInfo0,
29  const TensorInfo& inputInfo1,
30  const TensorInfo& outputInfo)
31 {
32  auto queueDescriptor = workload->GetData();
33  auto inputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
34  auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[1]);
35  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
36  BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
37  BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
38  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
39 }
40 
41 armnn::RefWorkloadFactory GetFactory()
42 {
43  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
44  return RefWorkloadFactory(memoryManager);
45 }
46 
47 
48 }
49 
50 BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
51 
52 template <typename ActivationWorkloadType, armnn::DataType DataType>
53 static void RefCreateActivationWorkloadTest()
54 {
55  Graph graph;
56  RefWorkloadFactory factory = GetFactory();
57  auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
58 
59  // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
60  CheckInputOutput(std::move(workload),
61  TensorInfo({ 1, 1 }, DataType),
62  TensorInfo({ 1, 1 }, DataType));
63 }
64 
65 BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
66 {
67  RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
68 }
69 
70 BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
71 {
72  RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
73 }
74 
75 template <typename WorkloadType,
76  typename DescriptorType,
77  typename LayerType,
79 static void RefCreateElementwiseWorkloadTest()
80 {
81  Graph graph;
82  RefWorkloadFactory factory = GetFactory();
83  auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
84  factory, graph);
85 
86  CheckInputsOutput(std::move(workload),
87  TensorInfo({ 2, 3 }, DataType),
88  TensorInfo({ 2, 3 }, DataType),
89  TensorInfo({ 2, 3 }, DataType));
90 }
91 
92 BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest)
93 {
94  Graph graph;
95  RefWorkloadFactory factory = GetFactory();
97 
98  auto workload = CreateSubtractionWithBlobWorkloadTest<RefSubtractionWorkload<>,
101  (factory, graph);
102 
103  CheckInputsOutput(std::move(workload),
104  TensorInfo({ 2, 3 }, DataType),
105  TensorInfo({ 2, 3 }, DataType),
106  TensorInfo({ 2, 3 }, DataType));
107 }
108 
109 BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest)
110 {
111  Graph graph;
112  RefWorkloadFactory factory = GetFactory();
114 
115  auto workload = CreateAdditionWithBlobWorkloadTest<RefAdditionWorkload<>,
117  armnn::DataType::Float32>(factory, graph);
118 
119  CheckInputsOutput(std::move(workload),
120  TensorInfo({ 2, 3 }, DataType),
121  TensorInfo({ 2, 3 }, DataType),
122  TensorInfo({ 2, 3 }, DataType));
123 }
124 
125 BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest)
126 {
127  Graph graph;
128  RefWorkloadFactory factory = GetFactory();
130 
131  auto workload = CreateMultiplicationWithBlobWorkloadTest<RefMultiplicationWorkload<>,
133  armnn::DataType::Float32>(factory, graph);
134 
135  CheckInputsOutput(std::move(workload),
136  TensorInfo({2, 3}, DataType),
137  TensorInfo({2, 3}, DataType),
138  TensorInfo({2, 3}, DataType));
139 }
140 
141 BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
142 {
143  RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
147 }
148 
149 BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
150 {
151  RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
155 }
156 
157 BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
158 {
159  RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
163 }
164 
165 BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload)
166 {
167  RefCreateElementwiseWorkloadTest<RefAdditionWorkload<int32_t>,
171 }
172 
173 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
174 {
175  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
179 }
180 
181 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
182 {
183  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
187 }
188 
189 BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
190 {
191  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
195 }
196 
197 BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
198 {
199  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
203 }
204 
205 BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload)
206 {
207  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<int32_t>,
211 }
212 
213 BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
214 {
215  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
219 }
220 
221 BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
222 {
223  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
227 }
228 
229 BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
230 {
231  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
235 }
236 
237 BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload)
238 {
239  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<int32_t>,
243 }
244 
245 BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
246 {
247  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
251 }
252 
253 BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
254 {
255  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
259 }
260 
261 BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
262 {
263  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
267 }
268 
269 BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
270 {
271  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
275 }
276 
277 BOOST_AUTO_TEST_CASE(CreateDivisionInt32Workload)
278 {
279  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<int32_t>,
283 }
284 
285 template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
286 static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
287 {
288  Graph graph;
289  RefWorkloadFactory factory = GetFactory();
290  auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory,
291  graph,
292  dataLayout);
293 
294  TensorShape inputShape;
295  TensorShape outputShape;
296 
297  switch (dataLayout)
298  {
299  case DataLayout::NHWC:
300  inputShape = { 2, 4, 4, 3 };
301  outputShape = { 2, 4, 4, 3 };
302  break;
303  case DataLayout::NCHW:
304  default:
305  inputShape = { 2, 3, 4, 4 };
306  outputShape = { 2, 3, 4, 4 };
307  break;
308  }
309 
310  // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
311  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
312 }
313 
314 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload)
315 {
316  Graph graph;
317  RefWorkloadFactory factory = GetFactory();
318  auto dataType = armnn::DataType::Float32;
319  auto workload = CreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,
320  armnn::DataType::Float32>(factory, graph, DataLayout::NHWC);
321 
322  TensorShape inputShape;
323  TensorShape outputShape;
324 
325  inputShape = { 2, 4, 4, 3 };
326  outputShape = { 2, 4, 4, 3 };
327 
328  // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
329  CheckInputOutput(std::move(workload), TensorInfo(inputShape, dataType), TensorInfo(outputShape, dataType));
330 }
331 
332 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
333 {
334  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
335  (DataLayout::NCHW);
336 }
337 
338 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
339 {
340  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
341  (DataLayout::NHWC);
342 }
343 
344 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
345 {
346  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
347  (DataLayout::NCHW);
348 }
349 
350 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
351 {
352  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
353  (DataLayout::NHWC);
354 }
355 
356 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
357 {
358  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
359  (DataLayout::NCHW);
360 }
361 
362 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
363 {
364  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
365  (DataLayout::NHWC);
366 }
367 
368 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
369 {
370  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
371  (DataLayout::NCHW);
372 }
373 
374 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
375 {
376  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
377  (DataLayout::NHWC);
378 }
379 
380 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
381 {
382  Graph graph;
383  RefWorkloadFactory factory = GetFactory();
384  auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
385 
386  // Checks that outputs and inputs are as we expect them
387  CheckInputOutput(
388  std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
389 }
390 
391 BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
392 {
393  Graph graph;
394  RefWorkloadFactory factory = GetFactory();
395  auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
396 
397  // Checks that outputs and inputs are as we expect them
398  CheckInputOutput(
399  std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
400 }
401 
402 static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
403 {
404  Graph graph;
405  RefWorkloadFactory factory = GetFactory();
406  auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
407  (factory, graph, dataLayout);
408 
409  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 3, 8, 16})
410  : std::initializer_list<unsigned int>({2, 8, 16, 3});
411  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 2, 2, 10})
412  : std::initializer_list<unsigned int>({2, 2, 10, 2});
413 
414  // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
415  CheckInputOutput(std::move(workload),
416  TensorInfo(inputShape, DataType::Float32),
417  TensorInfo(outputShape, DataType::Float32));
418 }
419 
420 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
421 {
422  RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
423 }
424 
425 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
426 {
427  RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
428 }
429 
430 BOOST_AUTO_TEST_CASE(CreateConvolution2dWithBlobWorkload)
431 {
432  DataLayout dataLayout = DataLayout::NHWC;
433  Graph graph;
434  RefWorkloadFactory factory = GetFactory();
435  auto workload = CreateConvolution2dFusedActivationWithBlobWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
436  (factory, graph, dataLayout);
437 
438  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 3, 8, 16})
439  : std::initializer_list<unsigned int>({2, 8, 16, 3});
440  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 2, 2, 10})
441  : std::initializer_list<unsigned int>({2, 2, 10, 2});
442 
443  // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
444  CheckInputOutput(std::move(workload),
445  TensorInfo(inputShape, DataType::Float32),
446  TensorInfo(outputShape, DataType::Float32));
447 }
448 
449 static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
450 {
451  Graph graph;
452  RefWorkloadFactory factory = GetFactory();
453  auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
454  (factory, graph, dataLayout);
455 
456  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
457  : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
458  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
459  : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
460 
461  // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
462  CheckInputOutput(std::move(workload),
463  TensorInfo(inputShape, DataType::Float32),
464  TensorInfo(outputShape, DataType::Float32));
465 }
466 
467 BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
468 {
469  RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
470 }
471 
472 BOOST_AUTO_TEST_CASE(RefCreateFullyConnectedWithBlobWorkloadTest)
473 {
474  Graph graph;
475  RefWorkloadFactory factory = GetFactory();
476  auto workload = CreateFullyConnectedWithBlobWorkloadTest<RefFullyConnectedWorkload,
477  armnn::DataType::Float32>(factory, graph);
478 
479  // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
480  float inputsQScale = 0.0f;
481  float outputQScale = 0.0f;
482  CheckInputOutput(std::move(workload),
483  TensorInfo({ 3, 1, 4, 5 }, armnn::DataType::Float32, inputsQScale),
484  TensorInfo({ 3, 7 }, armnn::DataType::Float32, outputQScale));
485 }
486 
487 template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
488 static void RefCreateFullyConnectedWorkloadTest()
489 {
490  Graph graph;
491  RefWorkloadFactory factory = GetFactory();
492  auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
493 
494  // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
495  float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
496  float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
497  CheckInputOutput(std::move(workload),
498  TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
499  TensorInfo({ 3, 7 }, DataType, outputQScale));
500 }
501 
502 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
503 {
504  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
505 }
506 
507 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
508 {
509  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
510 }
511 
512 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
513 {
514  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
515 }
516 
517 template <typename NormalizationWorkloadType, armnn::DataType DataType>
518 static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
519 {
520  Graph graph;
521  RefWorkloadFactory factory = GetFactory();
522  auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
523 
524  TensorShape inputShape;
525  TensorShape outputShape;
526 
527  switch (dataLayout)
528  {
529  case DataLayout::NHWC:
530  inputShape = { 3, 1, 5, 5 };
531  outputShape = { 3, 1, 5, 5 };
532  break;
533  case DataLayout::NCHW:
534  default:
535  inputShape = { 3, 5, 5, 1 };
536  outputShape = { 3, 5, 5, 1 };
537  break;
538  }
539 
540  // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
541  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
542 }
543 
544 BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
545 {
546  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
547 }
548 
549 BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
550 {
551  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
552 }
553 
554 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
555 {
556  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
557 }
558 
559 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
560 {
561  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
562 }
563 
564 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
565 {
566  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
567 }
568 
569 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
570 {
571  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
572 }
573 
574 template <typename Pooling2dWorkloadType, armnn::DataType DataType>
575 static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
576 {
577  Graph graph;
578  RefWorkloadFactory factory = GetFactory();
579  auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
580 
581  TensorShape inputShape;
582  TensorShape outputShape;
583 
584  switch (dataLayout)
585  {
586  case DataLayout::NHWC:
587  inputShape = { 3, 5, 5, 2 };
588  outputShape = { 3, 2, 4, 2 };
589  break;
590  case DataLayout::NCHW:
591  default:
592  inputShape = { 3, 2, 5, 5 };
593  outputShape = { 3, 2, 2, 4 };
594  }
595 
596  // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
597  CheckInputOutput(std::move(workload),
598  TensorInfo(inputShape, DataType),
599  TensorInfo(outputShape, DataType));
600 }
601 
602 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
603 {
604  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
605 }
606 
607 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
608 {
609  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
610 }
611 
612 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
613 {
614  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
615 }
616 
617 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
618 {
619  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
620 }
621 
622 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
623 {
624  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
625 }
626 
627 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
628 {
629  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
630 }
631 
632 template <typename SoftmaxWorkloadType, armnn::DataType DataType>
633 static void RefCreateSoftmaxWorkloadTest()
634 {
635  Graph graph;
636  RefWorkloadFactory factory = GetFactory();
637  auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
638 
639  // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
640 
641  armnn::TensorInfo tensorInfo({4, 1}, DataType);
643  {
644  tensorInfo.SetQuantizationOffset(0);
645  tensorInfo.SetQuantizationScale(1.f / 256);
646  }
648  {
649  tensorInfo.SetQuantizationOffset(-128);
650  tensorInfo.SetQuantizationScale(1.f / 256);
651  }
652  CheckInputOutput(
653  std::move(workload),
654  tensorInfo,
655  tensorInfo);
656 }
657 
658 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
659 {
660  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
661 }
662 
663 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
664 {
665  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
666 }
667 
668 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
669 {
670  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
671 }
672 
673 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
674 {
675  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
676 }
677 
678 template <typename SplitterWorkloadType, armnn::DataType DataType>
679 static void RefCreateSplitterWorkloadTest()
680 {
681  Graph graph;
682  RefWorkloadFactory factory = GetFactory();
683  auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
684 
685  // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
686  SplitterQueueDescriptor queueDescriptor = workload->GetData();
687  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
688  BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
689 
690  auto outputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
691  BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
692 
693  auto outputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
694  BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
695 
696  auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
697  BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
698 }
699 
700 BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
701 {
702  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
703 }
704 
705 BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
706 {
707  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
708 }
709 
710 BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
711 {
712  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
713 }
714 
715 template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
716 static void RefCreateSplitterConcatWorkloadTest()
717 {
718  // Tests that it is possible to decide which output of the splitter layer
719  // should be lined to which input of the concat layer.
720  // We tested that is is possible to specify 0th output
721  // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
722  // of the concat.
723 
724  Graph graph;
725  RefWorkloadFactory factory = GetFactory();
726  auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
727  (factory, graph);
728 
729  auto wlSplitter = std::move(workloads.first);
730  auto wlConcat = std::move(workloads.second);
731 
732  //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
733  armnn::RefTensorHandle* sOut0 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
734  armnn::RefTensorHandle* sOut1 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
735  armnn::RefTensorHandle* mIn0 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
736  armnn::RefTensorHandle* mIn1 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
737 
738  BOOST_TEST(sOut0);
739  BOOST_TEST(sOut1);
740  BOOST_TEST(mIn0);
741  BOOST_TEST(mIn1);
742 
743  bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
744 
745  BOOST_TEST(validDataPointers);
746 }
747 
748 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
749 {
750  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
751 }
752 
753 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
754 {
755  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
756 }
757 
758 BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
759 {
760  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
761 }
762 
763 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
764 static void RefCreateSingleOutputMultipleInputsTest()
765 {
766  // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
767  // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
768 
769  Graph graph;
770  RefWorkloadFactory factory = GetFactory();
771  std::unique_ptr<SplitterWorkloadType> wlSplitter;
772  std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
773  std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
774  std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
775  std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
776 
777  CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
778  ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
779 
780  armnn::RefTensorHandle* sOut0 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
781  armnn::RefTensorHandle* sOut1 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
782  armnn::RefTensorHandle* activ0_0Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
783  armnn::RefTensorHandle* activ0_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
784  armnn::RefTensorHandle* activ1_0Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
785  armnn::RefTensorHandle* activ1_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
786 
787 
788  BOOST_TEST(sOut0);
789  BOOST_TEST(sOut1);
790  BOOST_TEST(activ0_0Im);
791  BOOST_TEST(activ0_1Im);
792  BOOST_TEST(activ1_0Im);
793  BOOST_TEST(activ1_1Im);
794 
795  bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
796  (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
797 
798  BOOST_TEST(validDataPointers);
799 }
800 
801 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
802 {
803  RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
805 }
806 
807 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
808 {
809  RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
811 }
812 
813 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
814 static void RefCreateResizeBilinearTest(DataLayout dataLayout)
815 {
816  Graph graph;
817  RefWorkloadFactory factory = GetFactory();
818  auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
819 
820  TensorShape inputShape;
821  TensorShape outputShape;
822 
823  switch (dataLayout)
824  {
825  case DataLayout::NHWC:
826  inputShape = { 2, 4, 4, 3 };
827  outputShape = { 2, 2, 2, 3 };
828  break;
829  case DataLayout::NCHW:
830  default:
831  inputShape = { 2, 3, 4, 4 };
832  outputShape = { 2, 3, 2, 2 };
833  }
834 
835  // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
836  CheckInputOutput(std::move(workload),
837  TensorInfo(inputShape, DataType),
838  TensorInfo(outputShape, DataType));
839 }
840 
841 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
842 {
843  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
844 }
845 
846 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
847 {
848  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
849 }
850 
851 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
852 {
853  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
854 }
855 
856 BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
857 {
858  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
859 }
860 
861 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
862 {
863  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
864 }
865 
866 template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
867 static void RefCreateBatchToSpaceNdTest()
868 {
869  Graph graph;
870  RefWorkloadFactory factory;
871 
872  auto workload = CreateBatchToSpaceNdWorkloadTest<BatchToSpaceNdWorkloadType, DataType>(factory, graph);
873 
874  CheckInputOutput(std::move(workload),
875  TensorInfo({ 1, 1, 1, 1 }, DataType),
876  TensorInfo({ 1, 1, 1, 1 }, DataType));
877 }
878 
879 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32)
880 {
881  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
882 }
883 
884 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
885 {
886  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
887 }
888 
889 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
890 {
891  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
892 }
893 
894 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
895 {
896  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
897 }
898 
899 template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
900 static void RefCreateL2NormalizationTest(DataLayout dataLayout)
901 {
902  Graph graph;
903  RefWorkloadFactory factory = GetFactory();
904  auto workload =
905  CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
906 
907  TensorShape inputShape;
908  TensorShape outputShape;
909 
910  switch (dataLayout)
911  {
912  case DataLayout::NHWC:
913  inputShape = { 5, 50, 67, 20 };
914  outputShape = { 5, 50, 67, 20 };
915  break;
916  case DataLayout::NCHW:
917  default:
918  inputShape = { 5, 20, 50, 67 };
919  outputShape = { 5, 20, 50, 67 };
920  break;
921  }
922 
923  // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
924  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
925 }
926 
927 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
928 {
929  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
930 }
931 
932 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
933 {
934  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
935 }
936 
937 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
938 {
939  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
940 }
941 
942 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
943 {
944  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
945 }
946 
947 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
948 {
949  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
950 }
951 
952 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
953 {
954  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
955 }
956 
957 template <typename ReshapeWorkloadType, armnn::DataType DataType>
958 static void RefCreateReshapeWorkloadTest()
959 {
960  Graph graph;
961  RefWorkloadFactory factory = GetFactory();
962  auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
963 
964  // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
965  CheckInputOutput(
966  std::move(workload),
967  TensorInfo({ 4, 1 }, DataType),
968  TensorInfo({ 1, 4 }, DataType));
969 }
970 
971 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
972 {
973  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
974 }
975 
976 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
977 {
978  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
979 }
980 
981 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
982 {
983  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
984 }
985 
986 template <typename ConcatWorkloadType, armnn::DataType DataType>
987 static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
988  unsigned int concatAxis)
989 {
990  Graph graph;
991  RefWorkloadFactory factory = GetFactory();
992  auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
993 
994  CheckInputsOutput(std::move(workload),
995  TensorInfo({ 2, 3, 2, 5 }, DataType),
996  TensorInfo({ 2, 3, 2, 5 }, DataType),
997  TensorInfo(outputShape, DataType));
998 }
999 
1000 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
1001 {
1002  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
1003 }
1004 
1005 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
1006 {
1007  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
1008 }
1009 
1010 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
1011 {
1012  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
1013 }
1014 
1015 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
1016 {
1017  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
1018 }
1019 
1020 BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
1021 {
1022  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
1023 }
1024 
1025 BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
1026 {
1027  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
1028 }
1029 
1030 BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
1031 {
1032  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
1033 }
1034 
1035 BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
1036 {
1037  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
1038 }
1039 
1040 BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
1041 {
1042  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
1043 }
1044 
1045 BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
1046 {
1047  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
1048 }
1049 
1050 template <typename ConstantWorkloadType, armnn::DataType DataType>
1051 static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
1052 {
1053  armnn::Graph graph;
1054  RefWorkloadFactory factory = GetFactory();
1055  auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
1056 
1057  // Check output is as expected
1058  auto queueDescriptor = workload->GetData();
1059  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1060  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
1061 }
1062 
1063 BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
1064 {
1065  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
1066 }
1067 
1068 BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
1069 {
1070  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
1071 }
1072 
1073 BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
1074 {
1075  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
1076 }
1077 
1078 BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
1079 {
1080  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
1081 }
1082 
1083 static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
1084  const armnn::TensorShape& alphaShape,
1085  const armnn::TensorShape& outputShape,
1086  armnn::DataType dataType)
1087 {
1088  armnn::Graph graph;
1089  RefWorkloadFactory factory;
1090  auto workload = CreatePreluWorkloadTest<RefPreluWorkload>(factory,
1091  graph,
1092  inputShape,
1093  alphaShape,
1094  outputShape,
1095  dataType);
1096 
1097  // Check output is as expected
1098  auto queueDescriptor = workload->GetData();
1099  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1100  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
1101 }
1102 
1103 BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
1104 {
1105  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
1106 }
1107 
1108 BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
1109 {
1110  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16);
1111 }
1112 
1113 BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
1114 {
1115  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
1116 }
1117 
1118 BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
1119 {
1120  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
1121 }
1122 
1123 BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
1124 {
1125  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1128 }
1129 
1130 BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
1131 {
1132  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1135 }
1136 
1137 BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
1138 {
1139  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1142 }
1143 
1144 BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
1145 {
1146  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1149 }
1150 
1151 template <typename SpaceToDepthWorkloadType, armnn::DataType DataType>
1152 static void RefCreateSpaceToDepthWorkloadTest()
1153 {
1154  Graph graph;
1155  RefWorkloadFactory factory;
1156 
1157  auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
1158 
1159  CheckInputOutput(std::move(workload),
1160  TensorInfo({ 1, 2, 2, 1 }, DataType),
1161  TensorInfo({ 1, 1, 1, 4 }, DataType));
1162 }
1163 
1164 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
1165 {
1166  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
1167 }
1168 
1169 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
1170 {
1171  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
1172 }
1173 
1174 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
1175 {
1176  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
1177 }
1178 
1179 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
1180 {
1181  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
1182 }
1183 
1184 template <armnn::DataType DataType>
1185 static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape,
1186  const armnn::TensorShape& outputShape,
1187  unsigned int axis,
1188  unsigned int numInputs)
1189 {
1190  armnn::Graph graph;
1191  RefWorkloadFactory factory;
1192  auto workload = CreateStackWorkloadTest<RefStackWorkload, DataType>(factory,
1193  graph,
1194  inputShape,
1195  outputShape,
1196  axis,
1197  numInputs);
1198 
1199  // Check inputs and output are as expected
1200  StackQueueDescriptor queueDescriptor = workload->GetData();
1201  for (unsigned int i = 0; i < numInputs; ++i)
1202  {
1203  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
1204  BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
1205  }
1206  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1207  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
1208 }
1209 
1210 BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
1211 {
1212  RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1213 }
1214 
1215 BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
1216 {
1217  RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1218 }
1219 
1220 BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
1221 {
1222  RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1223 }
1224 
1225 template <typename QLstmWorkloadType>
1226 static void RefCreateQLstmWorkloadTest()
1227 {
1228  Graph graph;
1229  RefWorkloadFactory factory;
1230 
1231  auto workload = CreateQLstmWorkloadTest<QLstmWorkloadType>(factory, graph);
1232 
1233  armnn::TensorInfo inputInfo({2 , 4}, armnn::DataType::QAsymmS8, 0.0078125f, 0);
1234 
1235  armnn::TensorInfo cellStateInfo({2 , 4}, armnn::DataType::QSymmS16, 3.05176e-05f, 0);
1236 
1237  armnn::TensorInfo outputInfo({2 , 4}, armnn::DataType::QAsymmS8, 0.007f, 0);
1238 
1239  QLstmQueueDescriptor queueDescriptor = workload->GetData();
1240  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
1241  auto cellStateOutHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
1242  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
1243 
1244  BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
1245  BOOST_TEST((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
1246  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
1247 }
1248 
1249 BOOST_AUTO_TEST_CASE(CreateQLstmWorkload)
1250 {
1251  RefCreateQLstmWorkloadTest<RefQLstmWorkload>();
1252 }
1253 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
DataLayout
Definition: Types.hpp:50
DataType
Definition: Types.hpp:32
This layer represents an addition operation.
BOOST_AUTO_TEST_SUITE_END()
This layer represents a subtraction operation.
This layer represents a division operation.
BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
This layer represents a multiplication operation.