ArmNN
 20.08
RefCreateWorkloadTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
12 
13 namespace
14 {
15 
16 template<typename Workload>
17 void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
18 {
19  auto queueDescriptor = workload->GetData();
20  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
21  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
22  BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
23  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
24 }
25 
26 template <typename Workload>
27 void CheckInputsOutput(std::unique_ptr<Workload> workload,
28  const TensorInfo& inputInfo0,
29  const TensorInfo& inputInfo1,
30  const TensorInfo& outputInfo)
31 {
32  auto queueDescriptor = workload->GetData();
33  auto inputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
34  auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[1]);
35  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
36  BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
37  BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
38  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
39 }
40 
41 armnn::RefWorkloadFactory GetFactory()
42 {
43  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
44  return RefWorkloadFactory(memoryManager);
45 }
46 
47 
48 }
49 
50 BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
51 
52 template <typename ActivationWorkloadType, armnn::DataType DataType>
53 static void RefCreateActivationWorkloadTest()
54 {
55  Graph graph;
56  RefWorkloadFactory factory = GetFactory();
57  auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
58 
59  // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
60  CheckInputOutput(std::move(workload),
61  TensorInfo({ 1, 1 }, DataType),
62  TensorInfo({ 1, 1 }, DataType));
63 }
64 
65 BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
66 {
67  RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
68 }
69 
70 BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
71 {
72  RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
73 }
74 
75 template <typename WorkloadType,
76  typename DescriptorType,
77  typename LayerType,
79 static void RefCreateElementwiseWorkloadTest()
80 {
81  Graph graph;
82  RefWorkloadFactory factory = GetFactory();
83  auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
84  factory, graph);
85 
86  CheckInputsOutput(std::move(workload),
87  TensorInfo({ 2, 3 }, DataType),
88  TensorInfo({ 2, 3 }, DataType),
89  TensorInfo({ 2, 3 }, DataType));
90 }
91 
92 BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
93 {
94  RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
98 }
99 
100 BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
101 {
102  RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
106 }
107 
108 BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
109 {
110  RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
114 }
115 
116 BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload)
117 {
118  RefCreateElementwiseWorkloadTest<RefAdditionWorkload<int32_t>,
122 }
123 
124 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
125 {
126  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
130 }
131 
132 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
133 {
134  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
138 }
139 
140 BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
141 {
142  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
146 }
147 
148 BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
149 {
150  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
154 }
155 
156 BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload)
157 {
158  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<int32_t>,
162 }
163 
164 BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
165 {
166  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
170 }
171 
172 BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
173 {
174  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
178 }
179 
180 BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
181 {
182  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
186 }
187 
188 BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload)
189 {
190  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<int32_t>,
194 }
195 
196 BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
197 {
198  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
202 }
203 
204 BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
205 {
206  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
210 }
211 
212 BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
213 {
214  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
218 }
219 
220 BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
221 {
222  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
226 }
227 
228 BOOST_AUTO_TEST_CASE(CreateDivisionInt32Workload)
229 {
230  RefCreateElementwiseWorkloadTest<RefDivisionWorkload<int32_t>,
234 }
235 
236 template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
237 static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
238 {
239  Graph graph;
240  RefWorkloadFactory factory = GetFactory();
241  auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory,
242  graph,
243  dataLayout);
244 
245  TensorShape inputShape;
246  TensorShape outputShape;
247 
248  switch (dataLayout)
249  {
250  case DataLayout::NHWC:
251  inputShape = { 2, 4, 4, 3 };
252  outputShape = { 2, 4, 4, 3 };
253  break;
254  case DataLayout::NCHW:
255  default:
256  inputShape = { 2, 3, 4, 4 };
257  outputShape = { 2, 3, 4, 4 };
258  break;
259  }
260 
261  // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
262  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
263 }
264 
265 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
266 {
267  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
268  (DataLayout::NCHW);
269 }
270 
271 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
272 {
273  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
274  (DataLayout::NHWC);
275 }
276 
277 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
278 {
279  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
280  (DataLayout::NCHW);
281 }
282 
283 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
284 {
285  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
286  (DataLayout::NHWC);
287 }
288 
289 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
290 {
291  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
292  (DataLayout::NCHW);
293 }
294 
295 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
296 {
297  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
298  (DataLayout::NHWC);
299 }
300 
301 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
302 {
303  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
304  (DataLayout::NCHW);
305 }
306 
307 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
308 {
309  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
310  (DataLayout::NHWC);
311 }
312 
313 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
314 {
315  Graph graph;
316  RefWorkloadFactory factory = GetFactory();
317  auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
318 
319  // Checks that outputs and inputs are as we expect them
320  CheckInputOutput(
321  std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
322 }
323 
324 BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
325 {
326  Graph graph;
327  RefWorkloadFactory factory = GetFactory();
328  auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
329 
330  // Checks that outputs and inputs are as we expect them
331  CheckInputOutput(
332  std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
333 }
334 
335 static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
336 {
337  Graph graph;
338  RefWorkloadFactory factory = GetFactory();
339  auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
340  (factory, graph, dataLayout);
341 
342  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 3, 8, 16})
343  : std::initializer_list<unsigned int>({2, 8, 16, 3});
344  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 2, 2, 10})
345  : std::initializer_list<unsigned int>({2, 2, 10, 2});
346 
347  // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
348  CheckInputOutput(std::move(workload),
349  TensorInfo(inputShape, DataType::Float32),
350  TensorInfo(outputShape, DataType::Float32));
351 }
352 
353 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
354 {
355  RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
356 }
357 
358 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
359 {
360  RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
361 }
362 
363 static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
364 {
365  Graph graph;
366  RefWorkloadFactory factory = GetFactory();
367  auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
368  (factory, graph, dataLayout);
369 
370  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
371  : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
372  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
373  : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
374 
375  // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
376  CheckInputOutput(std::move(workload),
377  TensorInfo(inputShape, DataType::Float32),
378  TensorInfo(outputShape, DataType::Float32));
379 }
380 
381 BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
382 {
383  RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
384 }
385 
386 template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
387 static void RefCreateFullyConnectedWorkloadTest()
388 {
389  Graph graph;
390  RefWorkloadFactory factory = GetFactory();
391  auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
392 
393  // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
394  float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
395  float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
396  CheckInputOutput(std::move(workload),
397  TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
398  TensorInfo({ 3, 7 }, DataType, outputQScale));
399 }
400 
401 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
402 {
403  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
404 }
405 
406 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
407 {
408  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
409 }
410 
411 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
412 {
413  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
414 }
415 
416 template <typename NormalizationWorkloadType, armnn::DataType DataType>
417 static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
418 {
419  Graph graph;
420  RefWorkloadFactory factory = GetFactory();
421  auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
422 
423  TensorShape inputShape;
424  TensorShape outputShape;
425 
426  switch (dataLayout)
427  {
428  case DataLayout::NHWC:
429  inputShape = { 3, 1, 5, 5 };
430  outputShape = { 3, 1, 5, 5 };
431  break;
432  case DataLayout::NCHW:
433  default:
434  inputShape = { 3, 5, 5, 1 };
435  outputShape = { 3, 5, 5, 1 };
436  break;
437  }
438 
439  // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
440  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
441 }
442 
443 BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
444 {
445  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
446 }
447 
448 BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
449 {
450  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
451 }
452 
453 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
454 {
455  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
456 }
457 
458 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
459 {
460  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
461 }
462 
463 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
464 {
465  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
466 }
467 
468 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
469 {
470  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
471 }
472 
473 template <typename Pooling2dWorkloadType, armnn::DataType DataType>
474 static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
475 {
476  Graph graph;
477  RefWorkloadFactory factory = GetFactory();
478  auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
479 
480  TensorShape inputShape;
481  TensorShape outputShape;
482 
483  switch (dataLayout)
484  {
485  case DataLayout::NHWC:
486  inputShape = { 3, 5, 5, 2 };
487  outputShape = { 3, 2, 4, 2 };
488  break;
489  case DataLayout::NCHW:
490  default:
491  inputShape = { 3, 2, 5, 5 };
492  outputShape = { 3, 2, 2, 4 };
493  }
494 
495  // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
496  CheckInputOutput(std::move(workload),
497  TensorInfo(inputShape, DataType),
498  TensorInfo(outputShape, DataType));
499 }
500 
501 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
502 {
503  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
504 }
505 
506 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
507 {
508  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
509 }
510 
511 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
512 {
513  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
514 }
515 
516 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
517 {
518  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
519 }
520 
521 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
522 {
523  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
524 }
525 
526 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
527 {
528  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
529 }
530 
531 template <typename SoftmaxWorkloadType, armnn::DataType DataType>
532 static void RefCreateSoftmaxWorkloadTest()
533 {
534  Graph graph;
535  RefWorkloadFactory factory = GetFactory();
536  auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
537 
538  // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
539 
540  armnn::TensorInfo tensorInfo({4, 1}, DataType);
542  {
543  tensorInfo.SetQuantizationOffset(0);
544  tensorInfo.SetQuantizationScale(1.f / 256);
545  }
547  {
548  tensorInfo.SetQuantizationOffset(-128);
549  tensorInfo.SetQuantizationScale(1.f / 256);
550  }
551  CheckInputOutput(
552  std::move(workload),
553  tensorInfo,
554  tensorInfo);
555 }
556 
557 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
558 {
559  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
560 }
561 
562 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
563 {
564  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
565 }
566 
567 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
568 {
569  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
570 }
571 
572 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
573 {
574  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
575 }
576 
577 template <typename SplitterWorkloadType, armnn::DataType DataType>
578 static void RefCreateSplitterWorkloadTest()
579 {
580  Graph graph;
581  RefWorkloadFactory factory = GetFactory();
582  auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
583 
584  // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
585  SplitterQueueDescriptor queueDescriptor = workload->GetData();
586  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
587  BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
588 
589  auto outputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
590  BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
591 
592  auto outputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
593  BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
594 
595  auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
596  BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
597 }
598 
599 BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
600 {
601  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
602 }
603 
604 BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
605 {
606  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
607 }
608 
609 BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
610 {
611  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
612 }
613 
614 template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
615 static void RefCreateSplitterConcatWorkloadTest()
616 {
617  // Tests that it is possible to decide which output of the splitter layer
618  // should be lined to which input of the concat layer.
619  // We tested that is is possible to specify 0th output
620  // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
621  // of the concat.
622 
623  Graph graph;
624  RefWorkloadFactory factory = GetFactory();
625  auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
626  (factory, graph);
627 
628  auto wlSplitter = std::move(workloads.first);
629  auto wlConcat = std::move(workloads.second);
630 
631  //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
632  armnn::RefTensorHandle* sOut0 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
633  armnn::RefTensorHandle* sOut1 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
634  armnn::RefTensorHandle* mIn0 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
635  armnn::RefTensorHandle* mIn1 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
636 
637  BOOST_TEST(sOut0);
638  BOOST_TEST(sOut1);
639  BOOST_TEST(mIn0);
640  BOOST_TEST(mIn1);
641 
642  bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
643 
644  BOOST_TEST(validDataPointers);
645 }
646 
647 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
648 {
649  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
650 }
651 
652 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
653 {
654  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
655 }
656 
657 BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
658 {
659  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
660 }
661 
662 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
663 static void RefCreateSingleOutputMultipleInputsTest()
664 {
665  // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
666  // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
667 
668  Graph graph;
669  RefWorkloadFactory factory = GetFactory();
670  std::unique_ptr<SplitterWorkloadType> wlSplitter;
671  std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
672  std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
673  std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
674  std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
675 
676  CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
677  ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
678 
679  armnn::RefTensorHandle* sOut0 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
680  armnn::RefTensorHandle* sOut1 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
681  armnn::RefTensorHandle* activ0_0Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
682  armnn::RefTensorHandle* activ0_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
683  armnn::RefTensorHandle* activ1_0Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
684  armnn::RefTensorHandle* activ1_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
685 
686 
687  BOOST_TEST(sOut0);
688  BOOST_TEST(sOut1);
689  BOOST_TEST(activ0_0Im);
690  BOOST_TEST(activ0_1Im);
691  BOOST_TEST(activ1_0Im);
692  BOOST_TEST(activ1_1Im);
693 
694  bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
695  (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
696 
697  BOOST_TEST(validDataPointers);
698 }
699 
700 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
701 {
702  RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
704 }
705 
706 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
707 {
708  RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
710 }
711 
712 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
713 static void RefCreateResizeBilinearTest(DataLayout dataLayout)
714 {
715  Graph graph;
716  RefWorkloadFactory factory = GetFactory();
717  auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
718 
719  TensorShape inputShape;
720  TensorShape outputShape;
721 
722  switch (dataLayout)
723  {
724  case DataLayout::NHWC:
725  inputShape = { 2, 4, 4, 3 };
726  outputShape = { 2, 2, 2, 3 };
727  break;
728  case DataLayout::NCHW:
729  default:
730  inputShape = { 2, 3, 4, 4 };
731  outputShape = { 2, 3, 2, 2 };
732  }
733 
734  // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
735  CheckInputOutput(std::move(workload),
736  TensorInfo(inputShape, DataType),
737  TensorInfo(outputShape, DataType));
738 }
739 
740 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
741 {
742  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
743 }
744 
745 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
746 {
747  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
748 }
749 
750 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
751 {
752  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
753 }
754 
755 BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
756 {
757  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
758 }
759 
760 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
761 {
762  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
763 }
764 
765 template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
766 static void RefCreateBatchToSpaceNdTest()
767 {
768  Graph graph;
769  RefWorkloadFactory factory;
770 
771  auto workload = CreateBatchToSpaceNdWorkloadTest<BatchToSpaceNdWorkloadType, DataType>(factory, graph);
772 
773  CheckInputOutput(std::move(workload),
774  TensorInfo({ 1, 1, 1, 1 }, DataType),
775  TensorInfo({ 1, 1, 1, 1 }, DataType));
776 }
777 
778 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32)
779 {
780  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
781 }
782 
783 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
784 {
785  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
786 }
787 
788 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
789 {
790  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
791 }
792 
793 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
794 {
795  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
796 }
797 
798 template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
799 static void RefCreateL2NormalizationTest(DataLayout dataLayout)
800 {
801  Graph graph;
802  RefWorkloadFactory factory = GetFactory();
803  auto workload =
804  CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
805 
806  TensorShape inputShape;
807  TensorShape outputShape;
808 
809  switch (dataLayout)
810  {
811  case DataLayout::NHWC:
812  inputShape = { 5, 50, 67, 20 };
813  outputShape = { 5, 50, 67, 20 };
814  break;
815  case DataLayout::NCHW:
816  default:
817  inputShape = { 5, 20, 50, 67 };
818  outputShape = { 5, 20, 50, 67 };
819  break;
820  }
821 
822  // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
823  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
824 }
825 
826 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
827 {
828  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
829 }
830 
831 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
832 {
833  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
834 }
835 
836 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
837 {
838  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
839 }
840 
841 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
842 {
843  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
844 }
845 
846 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
847 {
848  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
849 }
850 
851 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
852 {
853  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
854 }
855 
856 template <typename ReshapeWorkloadType, armnn::DataType DataType>
857 static void RefCreateReshapeWorkloadTest()
858 {
859  Graph graph;
860  RefWorkloadFactory factory = GetFactory();
861  auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
862 
863  // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
864  CheckInputOutput(
865  std::move(workload),
866  TensorInfo({ 4, 1 }, DataType),
867  TensorInfo({ 1, 4 }, DataType));
868 }
869 
870 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
871 {
872  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
873 }
874 
875 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
876 {
877  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
878 }
879 
880 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
881 {
882  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
883 }
884 
885 template <typename ConcatWorkloadType, armnn::DataType DataType>
886 static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
887  unsigned int concatAxis)
888 {
889  Graph graph;
890  RefWorkloadFactory factory = GetFactory();
891  auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
892 
893  CheckInputsOutput(std::move(workload),
894  TensorInfo({ 2, 3, 2, 5 }, DataType),
895  TensorInfo({ 2, 3, 2, 5 }, DataType),
896  TensorInfo(outputShape, DataType));
897 }
898 
899 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
900 {
901  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
902 }
903 
904 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
905 {
906  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
907 }
908 
909 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
910 {
911  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
912 }
913 
914 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
915 {
916  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
917 }
918 
919 BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
920 {
921  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
922 }
923 
924 BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
925 {
926  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
927 }
928 
929 BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
930 {
931  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
932 }
933 
934 BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
935 {
936  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
937 }
938 
939 BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
940 {
941  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
942 }
943 
944 BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
945 {
946  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
947 }
948 
949 template <typename ConstantWorkloadType, armnn::DataType DataType>
950 static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
951 {
952  armnn::Graph graph;
953  RefWorkloadFactory factory = GetFactory();
954  auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
955 
956  // Check output is as expected
957  auto queueDescriptor = workload->GetData();
958  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
959  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
960 }
961 
962 BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
963 {
964  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
965 }
966 
967 BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
968 {
969  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
970 }
971 
972 BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
973 {
974  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
975 }
976 
977 BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
978 {
979  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
980 }
981 
982 static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
983  const armnn::TensorShape& alphaShape,
984  const armnn::TensorShape& outputShape,
985  armnn::DataType dataType)
986 {
987  armnn::Graph graph;
988  RefWorkloadFactory factory;
989  auto workload = CreatePreluWorkloadTest<RefPreluWorkload>(factory,
990  graph,
991  inputShape,
992  alphaShape,
993  outputShape,
994  dataType);
995 
996  // Check output is as expected
997  auto queueDescriptor = workload->GetData();
998  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
999  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
1000 }
1001 
1002 BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
1003 {
1004  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
1005 }
1006 
1007 BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
1008 {
1009  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16);
1010 }
1011 
1012 BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
1013 {
1014  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
1015 }
1016 
1017 BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
1018 {
1019  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
1020 }
1021 
1022 BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
1023 {
1024  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1027 }
1028 
1029 BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
1030 {
1031  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1034 }
1035 
1036 BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
1037 {
1038  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1041 }
1042 
1043 BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
1044 {
1045  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1048 }
1049 
1050 template <typename SpaceToDepthWorkloadType, armnn::DataType DataType>
1051 static void RefCreateSpaceToDepthWorkloadTest()
1052 {
1053  Graph graph;
1054  RefWorkloadFactory factory;
1055 
1056  auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
1057 
1058  CheckInputOutput(std::move(workload),
1059  TensorInfo({ 1, 2, 2, 1 }, DataType),
1060  TensorInfo({ 1, 1, 1, 4 }, DataType));
1061 }
1062 
1063 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
1064 {
1065  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
1066 }
1067 
1068 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
1069 {
1070  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
1071 }
1072 
1073 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
1074 {
1075  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
1076 }
1077 
1078 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
1079 {
1080  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
1081 }
1082 
1083 template <armnn::DataType DataType>
1084 static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape,
1085  const armnn::TensorShape& outputShape,
1086  unsigned int axis,
1087  unsigned int numInputs)
1088 {
1089  armnn::Graph graph;
1090  RefWorkloadFactory factory;
1091  auto workload = CreateStackWorkloadTest<RefStackWorkload, DataType>(factory,
1092  graph,
1093  inputShape,
1094  outputShape,
1095  axis,
1096  numInputs);
1097 
1098  // Check inputs and output are as expected
1099  StackQueueDescriptor queueDescriptor = workload->GetData();
1100  for (unsigned int i = 0; i < numInputs; ++i)
1101  {
1102  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
1103  BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
1104  }
1105  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1106  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
1107 }
1108 
1109 BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
1110 {
1111  RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1112 }
1113 
1114 BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
1115 {
1116  RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1117 }
1118 
1119 BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
1120 {
1121  RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1122 }
1123 
1124 template <typename QLstmWorkloadType>
1125 static void RefCreateQLstmWorkloadTest()
1126 {
1127  Graph graph;
1128  RefWorkloadFactory factory;
1129 
1130  auto workload = CreateQLstmWorkloadTest<QLstmWorkloadType>(factory, graph);
1131 
1132  armnn::TensorInfo inputInfo({2 , 4}, armnn::DataType::QAsymmS8, 0.0078125f, 0);
1133 
1134  armnn::TensorInfo cellStateInfo({2 , 4}, armnn::DataType::QSymmS16, 3.05176e-05f, 0);
1135 
1136  armnn::TensorInfo outputInfo({2 , 4}, armnn::DataType::QAsymmS8, 0.007f, 0);
1137 
1138  QLstmQueueDescriptor queueDescriptor = workload->GetData();
1139  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
1140  auto cellStateOutHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
1141  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
1142 
1143  BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
1144  BOOST_TEST((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
1145  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
1146 }
1147 
1148 BOOST_AUTO_TEST_CASE(CreateQLstmWorkload)
1149 {
1150  RefCreateQLstmWorkloadTest<RefQLstmWorkload>();
1151 }
1152 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
DataLayout
Definition: Types.hpp:49
DataType
Definition: Types.hpp:32
This layer represents an addition operation.
BOOST_AUTO_TEST_SUITE_END()
This layer represents a subtraction operation.
This layer represents a division operation.
BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
This layer represents a multiplication operation.