ArmNN
 20.02
RefCreateWorkloadTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
11 
12 namespace
13 {
14 
15 template<typename Workload>
16 void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
17 {
18  auto queueDescriptor = workload->GetData();
19  auto inputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
20  auto outputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
21  BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
23 }
24 
25 template <typename Workload>
26 void CheckInputsOutput(std::unique_ptr<Workload> workload,
27  const TensorInfo& inputInfo0,
28  const TensorInfo& inputInfo1,
29  const TensorInfo& outputInfo)
30 {
31  auto queueDescriptor = workload->GetData();
32  auto inputHandle0 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
33  auto inputHandle1 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Inputs[1]);
34  auto outputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
35  BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36  BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
38 }
39 
40 armnn::RefWorkloadFactory GetFactory()
41 {
42  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
43  return RefWorkloadFactory(memoryManager);
44 }
45 
46 
47 }
48 
49 BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
50 
51 template <typename ActivationWorkloadType, armnn::DataType DataType>
52 static void RefCreateActivationWorkloadTest()
53 {
54  Graph graph;
55  RefWorkloadFactory factory = GetFactory();
56  auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
57 
58  // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
59  CheckInputOutput(std::move(workload),
60  TensorInfo({ 1, 1 }, DataType),
61  TensorInfo({ 1, 1 }, DataType));
62 }
63 
64 BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
65 {
66  RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
67 }
68 
69 BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
70 {
71  RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
72 }
73 
74 template <typename WorkloadType,
75  typename DescriptorType,
76  typename LayerType,
78 static void RefCreateElementwiseWorkloadTest()
79 {
80  Graph graph;
81  RefWorkloadFactory factory = GetFactory();
82  auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
83  factory, graph);
84 
85  CheckInputsOutput(std::move(workload),
86  TensorInfo({ 2, 3 }, DataType),
87  TensorInfo({ 2, 3 }, DataType),
88  TensorInfo({ 2, 3 }, DataType));
89 }
90 
91 BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
92 {
93  RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
97 }
98 
99 BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
100 {
101  RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
105 }
106 
107 BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
108 {
109  RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
113 }
114 
115 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
116 {
117  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
121 }
122 
123 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
124 {
125  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
129 }
130 
131 BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
132 {
133  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
137 }
138 
139 BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
140 {
141  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
145 }
146 
147 BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
148 {
149  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
153 }
154 
155 BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
156 {
157  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
161 }
162 
163 BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
164 {
165  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
169 }
170 
171 BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
172 {
173  RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
177 }
178 
179 BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
180 {
181  RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
185 }
186 
187 BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
188 {
189  RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
193 }
194 
195 BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
196 {
197  RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
201 }
202 
203 template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
204 static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
205 {
206  Graph graph;
207  RefWorkloadFactory factory = GetFactory();
208  auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory,
209  graph,
210  dataLayout);
211 
212  TensorShape inputShape;
213  TensorShape outputShape;
214 
215  switch (dataLayout)
216  {
217  case DataLayout::NHWC:
218  inputShape = { 2, 4, 4, 3 };
219  outputShape = { 2, 4, 4, 3 };
220  break;
221  case DataLayout::NCHW:
222  default:
223  inputShape = { 2, 3, 4, 4 };
224  outputShape = { 2, 3, 4, 4 };
225  break;
226  }
227 
228  // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
229  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
230 }
231 
232 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
233 {
234  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
235  (DataLayout::NCHW);
236 }
237 
238 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
239 {
240  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
241  (DataLayout::NHWC);
242 }
243 
244 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
245 {
246  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
247  (DataLayout::NCHW);
248 }
249 
250 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
251 {
252  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
253  (DataLayout::NHWC);
254 }
255 
256 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
257 {
258  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
259  (DataLayout::NCHW);
260 }
261 
262 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
263 {
264  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
265  (DataLayout::NHWC);
266 }
267 
268 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
269 {
270  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
271  (DataLayout::NCHW);
272 }
273 
274 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
275 {
276  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
277  (DataLayout::NHWC);
278 }
279 
280 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
281 {
282  Graph graph;
283  RefWorkloadFactory factory = GetFactory();
284  auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
285 
286  // Checks that outputs and inputs are as we expect them
287  CheckInputOutput(
288  std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
289 }
290 
291 BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
292 {
293  Graph graph;
294  RefWorkloadFactory factory = GetFactory();
295  auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
296 
297  // Checks that outputs and inputs are as we expect them
298  CheckInputOutput(
299  std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
300 }
301 
302 static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
303 {
304  Graph graph;
305  RefWorkloadFactory factory = GetFactory();
306  auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
307  (factory, graph, dataLayout);
308 
309  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 3, 8, 16})
310  : std::initializer_list<unsigned int>({2, 8, 16, 3});
311  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 2, 2, 10})
312  : std::initializer_list<unsigned int>({2, 2, 10, 2});
313 
314  // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
315  CheckInputOutput(std::move(workload),
316  TensorInfo(inputShape, DataType::Float32),
317  TensorInfo(outputShape, DataType::Float32));
318 }
319 
320 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
321 {
322  RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
323 }
324 
325 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
326 {
327  RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
328 }
329 
330 static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
331 {
332  Graph graph;
333  RefWorkloadFactory factory = GetFactory();
334  auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
335  (factory, graph, dataLayout);
336 
337  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
338  : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
339  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
340  : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
341 
342  // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
343  CheckInputOutput(std::move(workload),
344  TensorInfo(inputShape, DataType::Float32),
345  TensorInfo(outputShape, DataType::Float32));
346 }
347 
348 BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
349 {
350  RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
351 }
352 
353 template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
354 static void RefCreateFullyConnectedWorkloadTest()
355 {
356  Graph graph;
357  RefWorkloadFactory factory = GetFactory();
358  auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
359 
360  // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
361  float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
362  float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
363  CheckInputOutput(std::move(workload),
364  TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
365  TensorInfo({ 3, 7 }, DataType, outputQScale));
366 }
367 
368 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
369 {
370  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
371 }
372 
373 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
374 {
375  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
376 }
377 
378 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
379 {
380  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
381 }
382 
383 template <typename NormalizationWorkloadType, armnn::DataType DataType>
384 static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
385 {
386  Graph graph;
387  RefWorkloadFactory factory = GetFactory();
388  auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
389 
390  TensorShape inputShape;
391  TensorShape outputShape;
392 
393  switch (dataLayout)
394  {
395  case DataLayout::NHWC:
396  inputShape = { 3, 1, 5, 5 };
397  outputShape = { 3, 1, 5, 5 };
398  break;
399  case DataLayout::NCHW:
400  default:
401  inputShape = { 3, 5, 5, 1 };
402  outputShape = { 3, 5, 5, 1 };
403  break;
404  }
405 
406  // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
407  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
408 }
409 
410 BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
411 {
412  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
413 }
414 
415 BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
416 {
417  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
418 }
419 
420 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
421 {
422  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
423 }
424 
425 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
426 {
427  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
428 }
429 
430 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
431 {
432  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
433 }
434 
435 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
436 {
437  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
438 }
439 
440 template <typename Pooling2dWorkloadType, armnn::DataType DataType>
441 static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
442 {
443  Graph graph;
444  RefWorkloadFactory factory = GetFactory();
445  auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
446 
447  TensorShape inputShape;
448  TensorShape outputShape;
449 
450  switch (dataLayout)
451  {
452  case DataLayout::NHWC:
453  inputShape = { 3, 5, 5, 2 };
454  outputShape = { 3, 2, 4, 2 };
455  break;
456  case DataLayout::NCHW:
457  default:
458  inputShape = { 3, 2, 5, 5 };
459  outputShape = { 3, 2, 2, 4 };
460  }
461 
462  // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
463  CheckInputOutput(std::move(workload),
464  TensorInfo(inputShape, DataType),
465  TensorInfo(outputShape, DataType));
466 }
467 
468 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
469 {
470  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
471 }
472 
473 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
474 {
475  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
476 }
477 
478 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
479 {
480  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
481 }
482 
483 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
484 {
485  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
486 }
487 
488 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
489 {
490  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
491 }
492 
493 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
494 {
495  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
496 }
497 
498 template <typename SoftmaxWorkloadType, armnn::DataType DataType>
499 static void RefCreateSoftmaxWorkloadTest()
500 {
501  Graph graph;
502  RefWorkloadFactory factory = GetFactory();
503  auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
504 
505  // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
506  CheckInputOutput(
507  std::move(workload),
508  TensorInfo({4, 1}, DataType),
509  TensorInfo({4, 1}, DataType));
510 }
511 
512 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
513 {
514  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
515 }
516 
517 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
518 {
519  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
520 }
521 
522 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
523 {
524  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
525 }
526 
527 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
528 {
529  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
530 }
531 
532 template <typename SplitterWorkloadType, armnn::DataType DataType>
533 static void RefCreateSplitterWorkloadTest()
534 {
535  Graph graph;
536  RefWorkloadFactory factory = GetFactory();
537  auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
538 
539  // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
540  SplitterQueueDescriptor queueDescriptor = workload->GetData();
541  auto inputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
542  BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
543 
544  auto outputHandle0 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
545  BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
546 
547  auto outputHandle1 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
548  BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
549 
550  auto outputHandle2 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
551  BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
552 }
553 
554 BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
555 {
556  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
557 }
558 
559 BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
560 {
561  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
562 }
563 
564 BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
565 {
566  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
567 }
568 
569 template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
570 static void RefCreateSplitterConcatWorkloadTest()
571 {
572  // Tests that it is possible to decide which output of the splitter layer
573  // should be lined to which input of the concat layer.
574  // We tested that is is possible to specify 0th output
575  // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
576  // of the concat.
577 
578  Graph graph;
579  RefWorkloadFactory factory = GetFactory();
580  auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
581  (factory, graph);
582 
583  auto wlSplitter = std::move(workloads.first);
584  auto wlConcat = std::move(workloads.second);
585 
586  //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
587  armnn::RefTensorHandle* sOut0 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
588  armnn::RefTensorHandle* sOut1 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
589  armnn::RefTensorHandle* mIn0 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
590  armnn::RefTensorHandle* mIn1 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
591 
592  BOOST_TEST(sOut0);
593  BOOST_TEST(sOut1);
594  BOOST_TEST(mIn0);
595  BOOST_TEST(mIn1);
596 
597  bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
598 
599  BOOST_TEST(validDataPointers);
600 }
601 
602 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
603 {
604  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
605 }
606 
607 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
608 {
609  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
610 }
611 
612 BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
613 {
614  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
615 }
616 
617 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
618 static void RefCreateSingleOutputMultipleInputsTest()
619 {
620  // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
621  // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
622 
623  Graph graph;
624  RefWorkloadFactory factory = GetFactory();
625  std::unique_ptr<SplitterWorkloadType> wlSplitter;
626  std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
627  std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
628  std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
629  std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
630 
631  CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
632  ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
633 
634  armnn::RefTensorHandle* sOut0 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
635  armnn::RefTensorHandle* sOut1 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
636  armnn::RefTensorHandle* activ0_0Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
637  armnn::RefTensorHandle* activ0_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
638  armnn::RefTensorHandle* activ1_0Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
639  armnn::RefTensorHandle* activ1_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
640 
641 
642  BOOST_TEST(sOut0);
643  BOOST_TEST(sOut1);
644  BOOST_TEST(activ0_0Im);
645  BOOST_TEST(activ0_1Im);
646  BOOST_TEST(activ1_0Im);
647  BOOST_TEST(activ1_1Im);
648 
649  bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
650  (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
651 
652  BOOST_TEST(validDataPointers);
653 }
654 
655 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
656 {
657  RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
659 }
660 
661 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
662 {
663  RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
665 }
666 
667 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
668 static void RefCreateResizeBilinearTest(DataLayout dataLayout)
669 {
670  Graph graph;
671  RefWorkloadFactory factory = GetFactory();
672  auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
673 
674  TensorShape inputShape;
675  TensorShape outputShape;
676 
677  switch (dataLayout)
678  {
679  case DataLayout::NHWC:
680  inputShape = { 2, 4, 4, 3 };
681  outputShape = { 2, 2, 2, 3 };
682  break;
683  case DataLayout::NCHW:
684  default:
685  inputShape = { 2, 3, 4, 4 };
686  outputShape = { 2, 3, 2, 2 };
687  }
688 
689  // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
690  CheckInputOutput(std::move(workload),
691  TensorInfo(inputShape, DataType),
692  TensorInfo(outputShape, DataType));
693 }
694 
695 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
696 {
697  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
698 }
699 
700 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
701 {
702  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
703 }
704 
705 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
706 {
707  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
708 }
709 
710 BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
711 {
712  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
713 }
714 
715 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
716 {
717  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
718 }
719 
720 template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
721 static void RefCreateBatchToSpaceNdTest()
722 {
723  Graph graph;
724  RefWorkloadFactory factory;
725 
726  auto workload = CreateBatchToSpaceNdWorkloadTest<BatchToSpaceNdWorkloadType, DataType>(factory, graph);
727 
728  CheckInputOutput(std::move(workload),
729  TensorInfo({ 1, 1, 1, 1 }, DataType),
730  TensorInfo({ 1, 1, 1, 1 }, DataType));
731 }
732 
733 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32)
734 {
735  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
736 }
737 
738 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
739 {
740  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
741 }
742 
743 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
744 {
745  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
746 }
747 
748 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
749 {
750  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
751 }
752 
753 template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
754 static void RefCreateL2NormalizationTest(DataLayout dataLayout)
755 {
756  Graph graph;
757  RefWorkloadFactory factory = GetFactory();
758  auto workload =
759  CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
760 
761  TensorShape inputShape;
762  TensorShape outputShape;
763 
764  switch (dataLayout)
765  {
766  case DataLayout::NHWC:
767  inputShape = { 5, 50, 67, 20 };
768  outputShape = { 5, 50, 67, 20 };
769  break;
770  case DataLayout::NCHW:
771  default:
772  inputShape = { 5, 20, 50, 67 };
773  outputShape = { 5, 20, 50, 67 };
774  break;
775  }
776 
777  // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
778  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
779 }
780 
781 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
782 {
783  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
784 }
785 
786 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
787 {
788  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
789 }
790 
791 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
792 {
793  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
794 }
795 
796 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
797 {
798  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
799 }
800 
801 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
802 {
803  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
804 }
805 
806 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
807 {
808  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
809 }
810 
811 template <typename ReshapeWorkloadType, armnn::DataType DataType>
812 static void RefCreateReshapeWorkloadTest()
813 {
814  Graph graph;
815  RefWorkloadFactory factory = GetFactory();
816  auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
817 
818  // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
819  CheckInputOutput(
820  std::move(workload),
821  TensorInfo({ 4, 1 }, DataType),
822  TensorInfo({ 1, 4 }, DataType));
823 }
824 
825 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
826 {
827  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
828 }
829 
830 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
831 {
832  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
833 }
834 
835 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
836 {
837  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
838 }
839 
840 template <typename ConcatWorkloadType, armnn::DataType DataType>
841 static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
842  unsigned int concatAxis)
843 {
844  Graph graph;
845  RefWorkloadFactory factory = GetFactory();
846  auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
847 
848  CheckInputsOutput(std::move(workload),
849  TensorInfo({ 2, 3, 2, 5 }, DataType),
850  TensorInfo({ 2, 3, 2, 5 }, DataType),
851  TensorInfo(outputShape, DataType));
852 }
853 
854 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
855 {
856  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
857 }
858 
859 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
860 {
861  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
862 }
863 
864 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
865 {
866  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
867 }
868 
869 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
870 {
871  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
872 }
873 
874 BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
875 {
876  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
877 }
878 
879 BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
880 {
881  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
882 }
883 
884 BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
885 {
886  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
887 }
888 
889 BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
890 {
891  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
892 }
893 
894 BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
895 {
896  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
897 }
898 
899 BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
900 {
901  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
902 }
903 
904 template <typename ConstantWorkloadType, armnn::DataType DataType>
905 static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
906 {
907  armnn::Graph graph;
908  RefWorkloadFactory factory = GetFactory();
909  auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
910 
911  // Check output is as expected
912  auto queueDescriptor = workload->GetData();
913  auto outputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
914  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
915 }
916 
917 BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
918 {
919  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
920 }
921 
922 BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
923 {
924  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
925 }
926 
927 BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
928 {
929  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
930 }
931 
932 BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
933 {
934  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
935 }
936 
937 static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
938  const armnn::TensorShape& alphaShape,
939  const armnn::TensorShape& outputShape,
940  armnn::DataType dataType)
941 {
942  armnn::Graph graph;
943  RefWorkloadFactory factory;
944  auto workload = CreatePreluWorkloadTest<RefPreluWorkload>(factory,
945  graph,
946  inputShape,
947  alphaShape,
948  outputShape,
949  dataType);
950 
951  // Check output is as expected
952  auto queueDescriptor = workload->GetData();
953  auto outputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
954  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
955 }
956 
957 BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
958 {
959  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
960 }
961 
962 BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
963 {
964  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16);
965 }
966 
967 BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
968 {
969  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
970 }
971 
972 BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
973 {
974  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
975 }
976 
977 BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
978 {
979  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
982 }
983 
984 BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
985 {
986  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
989 }
990 
991 BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
992 {
993  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
996 }
997 
998 BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
999 {
1000  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1003 }
1004 
1005 template <typename SpaceToDepthWorkloadType, armnn::DataType DataType>
1006 static void RefCreateSpaceToDepthWorkloadTest()
1007 {
1008  Graph graph;
1009  RefWorkloadFactory factory;
1010 
1011  auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
1012 
1013  CheckInputOutput(std::move(workload),
1014  TensorInfo({ 1, 2, 2, 1 }, DataType),
1015  TensorInfo({ 1, 1, 1, 4 }, DataType));
1016 }
1017 
1018 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
1019 {
1020  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
1021 }
1022 
1023 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
1024 {
1025  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
1026 }
1027 
1028 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
1029 {
1030  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
1031 }
1032 
1033 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
1034 {
1035  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
1036 }
1037 
1038 template <armnn::DataType DataType>
1039 static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape,
1040  const armnn::TensorShape& outputShape,
1041  unsigned int axis,
1042  unsigned int numInputs)
1043 {
1044  armnn::Graph graph;
1045  RefWorkloadFactory factory;
1046  auto workload = CreateStackWorkloadTest<RefStackWorkload, DataType>(factory,
1047  graph,
1048  inputShape,
1049  outputShape,
1050  axis,
1051  numInputs);
1052 
1053  // Check inputs and output are as expected
1054  StackQueueDescriptor queueDescriptor = workload->GetData();
1055  for (unsigned int i = 0; i < numInputs; ++i)
1056  {
1057  auto inputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
1058  BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
1059  }
1060  auto outputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1061  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
1062 }
1063 
1064 BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
1065 {
1066  RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1067 }
1068 
1069 BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
1070 {
1071  RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1072 }
1073 
1074 BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
1075 {
1076  RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1077 }
1078 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
RefElementwiseWorkload< std::divides< float >, DivisionQueueDescriptor, StringMapping::RefDivisionWorkload_Execute > RefDivisionWorkload
RefElementwiseWorkload< std::minus< float >, SubtractionQueueDescriptor, StringMapping::RefSubtractionWorkload_Execute > RefSubtractionWorkload
DataLayout
Definition: Types.hpp:49
RefElementwiseWorkload< std::plus< float >, AdditionQueueDescriptor, StringMapping::RefAdditionWorkload_Execute > RefAdditionWorkload
RefElementwiseWorkload< std::multiplies< float >, MultiplicationQueueDescriptor, StringMapping::RefMultiplicationWorkload_Execute > RefMultiplicationWorkload
DataType
Definition: Types.hpp:32
This layer represents an addition operation.
BOOST_AUTO_TEST_SUITE_END()
This layer represents a subtraction operation.
This layer represents a division operation.
BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
This layer represents a multiplication operation.