ArmNN
 20.05
RefCreateWorkloadTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
12 
13 namespace
14 {
15 
16 template<typename Workload>
17 void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
18 {
19  auto queueDescriptor = workload->GetData();
20  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
21  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
22  BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
23  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
24 }
25 
26 template <typename Workload>
27 void CheckInputsOutput(std::unique_ptr<Workload> workload,
28  const TensorInfo& inputInfo0,
29  const TensorInfo& inputInfo1,
30  const TensorInfo& outputInfo)
31 {
32  auto queueDescriptor = workload->GetData();
33  auto inputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
34  auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[1]);
35  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
36  BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
37  BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
38  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
39 }
40 
41 armnn::RefWorkloadFactory GetFactory()
42 {
43  std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
44  return RefWorkloadFactory(memoryManager);
45 }
46 
47 
48 }
49 
50 BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
51 
52 template <typename ActivationWorkloadType, armnn::DataType DataType>
53 static void RefCreateActivationWorkloadTest()
54 {
55  Graph graph;
56  RefWorkloadFactory factory = GetFactory();
57  auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
58 
59  // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
60  CheckInputOutput(std::move(workload),
61  TensorInfo({ 1, 1 }, DataType),
62  TensorInfo({ 1, 1 }, DataType));
63 }
64 
65 BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
66 {
67  RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
68 }
69 
70 BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
71 {
72  RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
73 }
74 
75 template <typename WorkloadType,
76  typename DescriptorType,
77  typename LayerType,
79 static void RefCreateElementwiseWorkloadTest()
80 {
81  Graph graph;
82  RefWorkloadFactory factory = GetFactory();
83  auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
84  factory, graph);
85 
86  CheckInputsOutput(std::move(workload),
87  TensorInfo({ 2, 3 }, DataType),
88  TensorInfo({ 2, 3 }, DataType),
89  TensorInfo({ 2, 3 }, DataType));
90 }
91 
92 BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
93 {
94  RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
98 }
99 
100 BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
101 {
102  RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
106 }
107 
108 BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
109 {
110  RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
114 }
115 
116 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
117 {
118  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
122 }
123 
124 BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
125 {
126  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
130 }
131 
132 BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
133 {
134  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
138 }
139 
140 BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
141 {
142  RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
146 }
147 
148 BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
149 {
150  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
154 }
155 
156 BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
157 {
158  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
162 }
163 
164 BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
165 {
166  RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
170 }
171 
172 BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
173 {
174  RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
178 }
179 
180 BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
181 {
182  RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
186 }
187 
188 BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
189 {
190  RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
194 }
195 
196 BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
197 {
198  RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
202 }
203 
204 template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
205 static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
206 {
207  Graph graph;
208  RefWorkloadFactory factory = GetFactory();
209  auto workload = CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory,
210  graph,
211  dataLayout);
212 
213  TensorShape inputShape;
214  TensorShape outputShape;
215 
216  switch (dataLayout)
217  {
218  case DataLayout::NHWC:
219  inputShape = { 2, 4, 4, 3 };
220  outputShape = { 2, 4, 4, 3 };
221  break;
222  case DataLayout::NCHW:
223  default:
224  inputShape = { 2, 3, 4, 4 };
225  outputShape = { 2, 3, 4, 4 };
226  break;
227  }
228 
229  // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
230  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
231 }
232 
233 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
234 {
235  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
236  (DataLayout::NCHW);
237 }
238 
239 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
240 {
241  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
242  (DataLayout::NHWC);
243 }
244 
245 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
246 {
247  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
248  (DataLayout::NCHW);
249 }
250 
251 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
252 {
253  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
254  (DataLayout::NHWC);
255 }
256 
257 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
258 {
259  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
260  (DataLayout::NCHW);
261 }
262 
263 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
264 {
265  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
266  (DataLayout::NHWC);
267 }
268 
269 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
270 {
271  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
272  (DataLayout::NCHW);
273 }
274 
275 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
276 {
277  RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
278  (DataLayout::NHWC);
279 }
280 
281 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
282 {
283  Graph graph;
284  RefWorkloadFactory factory = GetFactory();
285  auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
286 
287  // Checks that outputs and inputs are as we expect them
288  CheckInputOutput(
289  std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
290 }
291 
292 BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
293 {
294  Graph graph;
295  RefWorkloadFactory factory = GetFactory();
296  auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
297 
298  // Checks that outputs and inputs are as we expect them
299  CheckInputOutput(
300  std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
301 }
302 
303 static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
304 {
305  Graph graph;
306  RefWorkloadFactory factory = GetFactory();
307  auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
308  (factory, graph, dataLayout);
309 
310  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 3, 8, 16})
311  : std::initializer_list<unsigned int>({2, 8, 16, 3});
312  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({2, 2, 2, 10})
313  : std::initializer_list<unsigned int>({2, 2, 10, 2});
314 
315  // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
316  CheckInputOutput(std::move(workload),
317  TensorInfo(inputShape, DataType::Float32),
318  TensorInfo(outputShape, DataType::Float32));
319 }
320 
321 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
322 {
323  RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
324 }
325 
326 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
327 {
328  RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
329 }
330 
331 static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
332 {
333  Graph graph;
334  RefWorkloadFactory factory = GetFactory();
335  auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
336  (factory, graph, dataLayout);
337 
338  TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
339  : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
340  TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
341  : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
342 
343  // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
344  CheckInputOutput(std::move(workload),
345  TensorInfo(inputShape, DataType::Float32),
346  TensorInfo(outputShape, DataType::Float32));
347 }
348 
349 BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
350 {
351  RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
352 }
353 
354 template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
355 static void RefCreateFullyConnectedWorkloadTest()
356 {
357  Graph graph;
358  RefWorkloadFactory factory = GetFactory();
359  auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
360 
361  // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
362  float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
363  float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
364  CheckInputOutput(std::move(workload),
365  TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
366  TensorInfo({ 3, 7 }, DataType, outputQScale));
367 }
368 
369 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
370 {
371  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
372 }
373 
374 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
375 {
376  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
377 }
378 
379 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
380 {
381  RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
382 }
383 
384 template <typename NormalizationWorkloadType, armnn::DataType DataType>
385 static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
386 {
387  Graph graph;
388  RefWorkloadFactory factory = GetFactory();
389  auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
390 
391  TensorShape inputShape;
392  TensorShape outputShape;
393 
394  switch (dataLayout)
395  {
396  case DataLayout::NHWC:
397  inputShape = { 3, 1, 5, 5 };
398  outputShape = { 3, 1, 5, 5 };
399  break;
400  case DataLayout::NCHW:
401  default:
402  inputShape = { 3, 5, 5, 1 };
403  outputShape = { 3, 5, 5, 1 };
404  break;
405  }
406 
407  // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
408  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
409 }
410 
411 BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
412 {
413  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
414 }
415 
416 BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
417 {
418  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
419 }
420 
421 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
422 {
423  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
424 }
425 
426 BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
427 {
428  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
429 }
430 
431 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
432 {
433  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
434 }
435 
436 BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
437 {
438  RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
439 }
440 
441 template <typename Pooling2dWorkloadType, armnn::DataType DataType>
442 static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
443 {
444  Graph graph;
445  RefWorkloadFactory factory = GetFactory();
446  auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
447 
448  TensorShape inputShape;
449  TensorShape outputShape;
450 
451  switch (dataLayout)
452  {
453  case DataLayout::NHWC:
454  inputShape = { 3, 5, 5, 2 };
455  outputShape = { 3, 2, 4, 2 };
456  break;
457  case DataLayout::NCHW:
458  default:
459  inputShape = { 3, 2, 5, 5 };
460  outputShape = { 3, 2, 2, 4 };
461  }
462 
463  // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
464  CheckInputOutput(std::move(workload),
465  TensorInfo(inputShape, DataType),
466  TensorInfo(outputShape, DataType));
467 }
468 
469 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
470 {
471  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
472 }
473 
474 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
475 {
476  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
477 }
478 
479 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
480 {
481  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
482 }
483 
484 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
485 {
486  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
487 }
488 
489 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
490 {
491  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
492 }
493 
494 BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
495 {
496  RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
497 }
498 
499 template <typename SoftmaxWorkloadType, armnn::DataType DataType>
500 static void RefCreateSoftmaxWorkloadTest()
501 {
502  Graph graph;
503  RefWorkloadFactory factory = GetFactory();
504  auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
505 
506  // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
507 
508  armnn::TensorInfo tensorInfo({4, 1}, DataType);
510  {
511  tensorInfo.SetQuantizationOffset(0);
512  tensorInfo.SetQuantizationScale(1.f / 256);
513  }
515  {
516  tensorInfo.SetQuantizationOffset(-128);
517  tensorInfo.SetQuantizationScale(1.f / 256);
518  }
519  CheckInputOutput(
520  std::move(workload),
521  tensorInfo,
522  tensorInfo);
523 }
524 
525 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
526 {
527  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
528 }
529 
530 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
531 {
532  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
533 }
534 
535 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
536 {
537  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
538 }
539 
540 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
541 {
542  RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
543 }
544 
545 template <typename SplitterWorkloadType, armnn::DataType DataType>
546 static void RefCreateSplitterWorkloadTest()
547 {
548  Graph graph;
549  RefWorkloadFactory factory = GetFactory();
550  auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
551 
552  // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
553  SplitterQueueDescriptor queueDescriptor = workload->GetData();
554  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
555  BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
556 
557  auto outputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
558  BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
559 
560  auto outputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
561  BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
562 
563  auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
564  BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
565 }
566 
567 BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
568 {
569  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
570 }
571 
572 BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
573 {
574  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
575 }
576 
577 BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
578 {
579  RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
580 }
581 
582 template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
583 static void RefCreateSplitterConcatWorkloadTest()
584 {
585  // Tests that it is possible to decide which output of the splitter layer
586  // should be lined to which input of the concat layer.
587  // We tested that is is possible to specify 0th output
588  // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
589  // of the concat.
590 
591  Graph graph;
592  RefWorkloadFactory factory = GetFactory();
593  auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
594  (factory, graph);
595 
596  auto wlSplitter = std::move(workloads.first);
597  auto wlConcat = std::move(workloads.second);
598 
599  //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
600  armnn::RefTensorHandle* sOut0 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
601  armnn::RefTensorHandle* sOut1 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
602  armnn::RefTensorHandle* mIn0 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
603  armnn::RefTensorHandle* mIn1 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
604 
605  BOOST_TEST(sOut0);
606  BOOST_TEST(sOut1);
607  BOOST_TEST(mIn0);
608  BOOST_TEST(mIn1);
609 
610  bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
611 
612  BOOST_TEST(validDataPointers);
613 }
614 
615 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
616 {
617  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
618 }
619 
620 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
621 {
622  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
623 }
624 
625 BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
626 {
627  RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
628 }
629 
630 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
631 static void RefCreateSingleOutputMultipleInputsTest()
632 {
633  // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
634  // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
635 
636  Graph graph;
637  RefWorkloadFactory factory = GetFactory();
638  std::unique_ptr<SplitterWorkloadType> wlSplitter;
639  std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
640  std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
641  std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
642  std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
643 
644  CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
645  ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
646 
647  armnn::RefTensorHandle* sOut0 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
648  armnn::RefTensorHandle* sOut1 = dynamic_cast<armnn::RefTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
649  armnn::RefTensorHandle* activ0_0Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
650  armnn::RefTensorHandle* activ0_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
651  armnn::RefTensorHandle* activ1_0Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
652  armnn::RefTensorHandle* activ1_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
653 
654 
655  BOOST_TEST(sOut0);
656  BOOST_TEST(sOut1);
657  BOOST_TEST(activ0_0Im);
658  BOOST_TEST(activ0_1Im);
659  BOOST_TEST(activ1_0Im);
660  BOOST_TEST(activ1_1Im);
661 
662  bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
663  (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
664 
665  BOOST_TEST(validDataPointers);
666 }
667 
668 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
669 {
670  RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
672 }
673 
674 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
675 {
676  RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
678 }
679 
680 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
681 static void RefCreateResizeBilinearTest(DataLayout dataLayout)
682 {
683  Graph graph;
684  RefWorkloadFactory factory = GetFactory();
685  auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
686 
687  TensorShape inputShape;
688  TensorShape outputShape;
689 
690  switch (dataLayout)
691  {
692  case DataLayout::NHWC:
693  inputShape = { 2, 4, 4, 3 };
694  outputShape = { 2, 2, 2, 3 };
695  break;
696  case DataLayout::NCHW:
697  default:
698  inputShape = { 2, 3, 4, 4 };
699  outputShape = { 2, 3, 2, 2 };
700  }
701 
702  // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
703  CheckInputOutput(std::move(workload),
704  TensorInfo(inputShape, DataType),
705  TensorInfo(outputShape, DataType));
706 }
707 
708 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
709 {
710  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
711 }
712 
713 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
714 {
715  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
716 }
717 
718 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
719 {
720  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
721 }
722 
723 BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
724 {
725  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
726 }
727 
728 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
729 {
730  RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
731 }
732 
733 template <typename BatchToSpaceNdWorkloadType, armnn::DataType DataType>
734 static void RefCreateBatchToSpaceNdTest()
735 {
736  Graph graph;
737  RefWorkloadFactory factory;
738 
739  auto workload = CreateBatchToSpaceNdWorkloadTest<BatchToSpaceNdWorkloadType, DataType>(factory, graph);
740 
741  CheckInputOutput(std::move(workload),
742  TensorInfo({ 1, 1, 1, 1 }, DataType),
743  TensorInfo({ 1, 1, 1, 1 }, DataType));
744 }
745 
746 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32)
747 {
748  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
749 }
750 
751 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
752 {
753  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
754 }
755 
756 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
757 {
758  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
759 }
760 
761 BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
762 {
763  RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
764 }
765 
766 template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
767 static void RefCreateL2NormalizationTest(DataLayout dataLayout)
768 {
769  Graph graph;
770  RefWorkloadFactory factory = GetFactory();
771  auto workload =
772  CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
773 
774  TensorShape inputShape;
775  TensorShape outputShape;
776 
777  switch (dataLayout)
778  {
779  case DataLayout::NHWC:
780  inputShape = { 5, 50, 67, 20 };
781  outputShape = { 5, 50, 67, 20 };
782  break;
783  case DataLayout::NCHW:
784  default:
785  inputShape = { 5, 20, 50, 67 };
786  outputShape = { 5, 20, 50, 67 };
787  break;
788  }
789 
790  // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
791  CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
792 }
793 
794 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
795 {
796  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
797 }
798 
799 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
800 {
801  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
802 }
803 
804 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
805 {
806  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
807 }
808 
809 BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
810 {
811  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
812 }
813 
814 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
815 {
816  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
817 }
818 
819 BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
820 {
821  RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
822 }
823 
824 template <typename ReshapeWorkloadType, armnn::DataType DataType>
825 static void RefCreateReshapeWorkloadTest()
826 {
827  Graph graph;
828  RefWorkloadFactory factory = GetFactory();
829  auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
830 
831  // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
832  CheckInputOutput(
833  std::move(workload),
834  TensorInfo({ 4, 1 }, DataType),
835  TensorInfo({ 1, 4 }, DataType));
836 }
837 
838 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
839 {
840  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
841 }
842 
843 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
844 {
845  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
846 }
847 
848 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
849 {
850  RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
851 }
852 
853 template <typename ConcatWorkloadType, armnn::DataType DataType>
854 static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
855  unsigned int concatAxis)
856 {
857  Graph graph;
858  RefWorkloadFactory factory = GetFactory();
859  auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
860 
861  CheckInputsOutput(std::move(workload),
862  TensorInfo({ 2, 3, 2, 5 }, DataType),
863  TensorInfo({ 2, 3, 2, 5 }, DataType),
864  TensorInfo(outputShape, DataType));
865 }
866 
867 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
868 {
869  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
870 }
871 
872 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
873 {
874  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
875 }
876 
877 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
878 {
879  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
880 }
881 
882 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
883 {
884  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
885 }
886 
887 BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
888 {
889  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
890 }
891 
892 BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
893 {
894  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
895 }
896 
897 BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
898 {
899  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
900 }
901 
902 BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
903 {
904  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
905 }
906 
907 BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
908 {
909  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
910 }
911 
912 BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
913 {
914  RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
915 }
916 
917 template <typename ConstantWorkloadType, armnn::DataType DataType>
918 static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
919 {
920  armnn::Graph graph;
921  RefWorkloadFactory factory = GetFactory();
922  auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
923 
924  // Check output is as expected
925  auto queueDescriptor = workload->GetData();
926  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
927  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
928 }
929 
930 BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
931 {
932  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
933 }
934 
935 BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
936 {
937  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
938 }
939 
940 BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
941 {
942  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
943 }
944 
945 BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
946 {
947  RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
948 }
949 
950 static void RefCreatePreluWorkloadTest(const armnn::TensorShape& inputShape,
951  const armnn::TensorShape& alphaShape,
952  const armnn::TensorShape& outputShape,
953  armnn::DataType dataType)
954 {
955  armnn::Graph graph;
956  RefWorkloadFactory factory;
957  auto workload = CreatePreluWorkloadTest<RefPreluWorkload>(factory,
958  graph,
959  inputShape,
960  alphaShape,
961  outputShape,
962  dataType);
963 
964  // Check output is as expected
965  auto queueDescriptor = workload->GetData();
966  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
967  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
968 }
969 
970 BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
971 {
972  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
973 }
974 
975 BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
976 {
977  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16);
978 }
979 
980 BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
981 {
982  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
983 }
984 
985 BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
986 {
987  RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
988 }
989 
990 BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
991 {
992  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
995 }
996 
997 BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
998 {
999  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1002 }
1003 
1004 BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
1005 {
1006  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1009 }
1010 
1011 BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
1012 {
1013  BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
1016 }
1017 
1018 template <typename SpaceToDepthWorkloadType, armnn::DataType DataType>
1019 static void RefCreateSpaceToDepthWorkloadTest()
1020 {
1021  Graph graph;
1022  RefWorkloadFactory factory;
1023 
1024  auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
1025 
1026  CheckInputOutput(std::move(workload),
1027  TensorInfo({ 1, 2, 2, 1 }, DataType),
1028  TensorInfo({ 1, 1, 1, 4 }, DataType));
1029 }
1030 
1031 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
1032 {
1033  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
1034 }
1035 
1036 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
1037 {
1038  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
1039 }
1040 
1041 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
1042 {
1043  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
1044 }
1045 
1046 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
1047 {
1048  RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
1049 }
1050 
1051 template <armnn::DataType DataType>
1052 static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape,
1053  const armnn::TensorShape& outputShape,
1054  unsigned int axis,
1055  unsigned int numInputs)
1056 {
1057  armnn::Graph graph;
1058  RefWorkloadFactory factory;
1059  auto workload = CreateStackWorkloadTest<RefStackWorkload, DataType>(factory,
1060  graph,
1061  inputShape,
1062  outputShape,
1063  axis,
1064  numInputs);
1065 
1066  // Check inputs and output are as expected
1067  StackQueueDescriptor queueDescriptor = workload->GetData();
1068  for (unsigned int i = 0; i < numInputs; ++i)
1069  {
1070  auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
1071  BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
1072  }
1073  auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
1074  BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
1075 }
1076 
1077 BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
1078 {
1079  RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1080 }
1081 
1082 BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
1083 {
1084  RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1085 }
1086 
1087 BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
1088 {
1089  RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
1090 }
1091 
1092 template <typename QLstmWorkloadType>
1093 static void RefCreateQLstmWorkloadTest()
1094 {
1095  Graph graph;
1096  RefWorkloadFactory factory;
1097 
1098  auto workload = CreateQLstmWorkloadTest<QLstmWorkloadType>(factory, graph);
1099 
1100  armnn::TensorInfo inputInfo({2 , 4}, armnn::DataType::QAsymmS8, 0.0078125f, 0);
1101 
1102  armnn::TensorInfo cellStateInfo({2 , 4}, armnn::DataType::QSymmS16, 3.05176e-05f, 0);
1103 
1104  armnn::TensorInfo outputInfo({2 , 4}, armnn::DataType::QAsymmS8, 0.007f, 0);
1105 
1106  QLstmQueueDescriptor queueDescriptor = workload->GetData();
1107  auto inputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
1108  auto cellStateOutHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
1109  auto outputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
1110 
1111  BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
1112  BOOST_TEST((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
1113  BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
1114 }
1115 
1116 BOOST_AUTO_TEST_CASE(CreateQLstmWorkloadTest)
1117 {
1118  RefCreateQLstmWorkloadTest<RefQLstmWorkload>();
1119 }
1120 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
RefElementwiseWorkload< std::divides< float >, DivisionQueueDescriptor, StringMapping::RefDivisionWorkload_Execute > RefDivisionWorkload
RefElementwiseWorkload< std::minus< float >, SubtractionQueueDescriptor, StringMapping::RefSubtractionWorkload_Execute > RefSubtractionWorkload
DataLayout
Definition: Types.hpp:49
RefElementwiseWorkload< std::plus< float >, AdditionQueueDescriptor, StringMapping::RefAdditionWorkload_Execute > RefAdditionWorkload
RefElementwiseWorkload< std::multiplies< float >, MultiplicationQueueDescriptor, StringMapping::RefMultiplicationWorkload_Execute > RefMultiplicationWorkload
DataType
Definition: Types.hpp:32
This layer represents an addition operation.
BOOST_AUTO_TEST_SUITE_END()
This layer represents a subtraction operation.
This layer represents a division operation.
BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:276
This layer represents a multiplication operation.