ArmNN
 22.02
RefEndToEndTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
32 
33 #include <doctest/doctest.h>
34 
35 TEST_SUITE("RefEndToEnd")
36 {
37 std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef};
38 
39 // Abs
40 TEST_CASE("RefAbsEndToEndTestFloat32")
41 {
42  std::vector<float> expectedOutput =
43  {
44  1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
45  3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
46  };
47 
48  ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
49  UnaryOperation::Abs,
50  expectedOutput);
51 }
52 
53 TEST_CASE("RefAbsEndToEndTestUint8")
54 {
55  // Note the expected output will be implicitly quantized by the below test function
56  std::vector<float> expectedOutput =
57  {
58  1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
59  3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
60  };
61 
62  ElementwiseUnarySimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
63  UnaryOperation::Abs,
64  expectedOutput);
65 }
66 
67 TEST_CASE("RefAbsEndToEndTestInt16")
68 {
69  // Note the expected output will be implicitly quantized by the below test function
70  std::vector<float> expectedOutput =
71  {
72  1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
73  3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
74  };
75 
76  ElementwiseUnarySimpleEndToEnd<armnn::DataType::QSymmS16>(defaultBackends,
77  UnaryOperation::Abs,
78  expectedOutput);
79 }
80 
81 // Constant
82 TEST_CASE("ConstantUsage_Ref_Float32")
83 {
84  CHECK(ConstantUsageFloat32Test(defaultBackends));
85 }
86 
87 TEST_CASE("ConstantUsage_Ref_Uint8")
88 {
89  CHECK(ConstantUsageUint8Test(defaultBackends));
90 }
91 
92 TEST_CASE("Unsigned8")
93 {
94  using namespace armnn;
95 
96  // Create runtime in which test will run
99 
100  // Builds up the structure of the network.
102 
103  IConnectableLayer* input = net->AddInputLayer(0, "input");
104  IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax");
105  IConnectableLayer* output = net->AddOutputLayer(0, "output");
106 
107  input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
108  softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
109 
110  // Sets the tensors in the network.
111  TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
112  inputTensorInfo.SetQuantizationOffset(100);
113  inputTensorInfo.SetQuantizationScale(10000.0f);
114  input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
115 
116  TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
117  outputTensorInfo.SetQuantizationOffset(0);
118  outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
119  softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
120 
121  // optimize the network
122  IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
123 
124  // Loads it into the runtime.
125  NetworkId netId;
126  auto error = runtime->LoadNetwork(netId, std::move(optNet));
127  CHECK(error == Status::Success);
128 
129  // Creates structures for input & output.
130  std::vector<uint8_t> inputData
131  {
132  1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
133  };
134  std::vector<uint8_t> outputData(5);
135 
136  TensorInfo inputTensorInfo2 = runtime->GetInputTensorInfo(netId, 0);
137  inputTensorInfo2.SetConstant(true);
138  armnn::InputTensors inputTensors
139  {
140  {0, armnn::ConstTensor(inputTensorInfo2, inputData.data())}
141  };
142  armnn::OutputTensors outputTensors
143  {
144  {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
145  };
146 
147  // Does the inference.
148  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
149 
150  // Checks the results.
151  CHECK(outputData[0] == 0);
152  CHECK(outputData[1] == 0);
153  CHECK(outputData[2] == 0);
154  CHECK(outputData[3] == 255); // softmax has been saturated.
155  CHECK(outputData[4] == 0);
156 }
157 
158 TEST_CASE("TrivialAdd")
159 {
160  // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp.
161 
162  using namespace armnn;
163 
164  // Create runtime in which test will run
167 
168  // Builds up the structure of the network.
170 
171  IConnectableLayer* input1 = net->AddInputLayer(0);
172  IConnectableLayer* input2 = net->AddInputLayer(1);
173  IConnectableLayer* add = net->AddAdditionLayer();
174  IConnectableLayer* output = net->AddOutputLayer(0);
175 
176  input1->GetOutputSlot(0).Connect(add->GetInputSlot(0));
177  input2->GetOutputSlot(0).Connect(add->GetInputSlot(1));
178  add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
179 
180  // Sets the tensors in the network.
181  TensorInfo tensorInfo(TensorShape({3, 4}), DataType::Float32);
182  input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
183  input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
184  add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
185 
186  // optimize the network
187  IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
188 
189  // Loads it into the runtime.
190  NetworkId netId;
191  runtime->LoadNetwork(netId, std::move(optNet));
192 
193  // Creates structures for input & output - matching android nn test.
194  std::vector<float> input1Data
195  {
196  1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
197  };
198  std::vector<float> input2Data
199  {
200  100.f, 200.f, 300.f, 400.f, 500.f, 600.f, 700.f, 800.f, 900.f, 1000.f, 1100.f, 1200.f
201  };
202  std::vector<float> outputData(12);
203 
204  TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
205  inputTensorInfo.SetConstant(true);
206  InputTensors inputTensors
207  {
208  {0,armnn::ConstTensor(inputTensorInfo, input1Data.data())},
209  {1,armnn::ConstTensor(inputTensorInfo, input2Data.data())}
210  };
211  OutputTensors outputTensors
212  {
213  {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
214  };
215 
216  // Does the inference.
217  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
218 
219  // Checks the results
220  CHECK(outputData[0] == 101);
221  CHECK(outputData[1] == 202);
222  CHECK(outputData[2] == 303);
223  CHECK(outputData[3] == 404);
224  CHECK(outputData[4] == 505);
225  CHECK(outputData[5] == 606);
226  CHECK(outputData[6] == 707);
227  CHECK(outputData[7] == 808);
228  CHECK(outputData[8] == 909);
229  CHECK(outputData[9] == 1010);
230  CHECK(outputData[10] == 1111);
231  CHECK(outputData[11] == 1212);
232 }
233 
234 TEST_CASE("MultipleOutputs")
235 {
236  using namespace armnn;
237 
238  // Create runtime in which test will run
241 
242  // Builds up the structure of the network.
244 
245  IConnectableLayer* input = net->AddInputLayer(0);
246 
247  // ReLu1
248  ActivationDescriptor activation1Descriptor;
249  activation1Descriptor.m_Function = ActivationFunction::BoundedReLu;
250  activation1Descriptor.m_A = 1.f;
251  activation1Descriptor.m_B = -1.f;
252  IConnectableLayer* activation1 = net->AddActivationLayer(activation1Descriptor);
253 
254  // ReLu6
255  ActivationDescriptor activation2Descriptor;
256  activation2Descriptor.m_Function = ActivationFunction::BoundedReLu;
257  activation2Descriptor.m_A = 6.0f;
258  IConnectableLayer* activation2 = net->AddActivationLayer(activation2Descriptor);
259 
260  // BoundedReLu(min=2, max=5)
261  ActivationDescriptor activation3Descriptor;
262  activation3Descriptor.m_Function = ActivationFunction::BoundedReLu;
263  activation3Descriptor.m_A = 5.0f;
264  activation3Descriptor.m_B = 2.0f;
265  IConnectableLayer* activation3 = net->AddActivationLayer(activation3Descriptor);
266 
267  IConnectableLayer* output1 = net->AddOutputLayer(0);
268  IConnectableLayer* output2 = net->AddOutputLayer(1);
269  IConnectableLayer* output3 = net->AddOutputLayer(2);
270 
271  input->GetOutputSlot(0).Connect(activation1->GetInputSlot(0));
272  input->GetOutputSlot(0).Connect(activation2->GetInputSlot(0));
273  input->GetOutputSlot(0).Connect(activation3->GetInputSlot(0));
274 
275  activation1->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
276  activation2->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
277  activation3->GetOutputSlot(0).Connect(output3->GetInputSlot(0));
278 
279  // Sets the tensors in the network.
280  TensorInfo tensorInfo(TensorShape({ 10 }), DataType::Float32);
281  input->GetOutputSlot(0).SetTensorInfo(tensorInfo);
282  activation1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
283  activation2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
284  activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
285 
286  // optimize the network
287  IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
288 
289  // Loads it into the runtime.
290  NetworkId netId;
291  runtime->LoadNetwork(netId, std::move(optNet));
292 
293  // Creates structures for input & output.
294  const std::vector<float> inputData{ 3.f, 5.f, 2.f, 3.f, 7.f, 0.f, -2.f, -1.f, 3.f, 3.f };
295 
296  std::vector<float> output1Data(inputData.size());
297  std::vector<float> output2Data(inputData.size());
298  std::vector<float> output3Data(inputData.size());
299 
300  TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
301  inputTensorInfo.SetConstant(true);
302  InputTensors inputTensors
303  {
304  {0,armnn::ConstTensor(inputTensorInfo, inputData.data())}
305  };
306  OutputTensors outputTensors
307  {
308  {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), output1Data.data())},
309  {1,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 1), output2Data.data())},
310  {2,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 2), output3Data.data())}
311  };
312 
313  // Does the inference.
314  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
315 
316  // Checks the results.
317  CHECK(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
318  CHECK(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
319  CHECK(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
320 }
321 
322 TEST_CASE("TrivialMin")
323 {
324  using namespace armnn;
325 
326  // Create runtime in which test will run
329 
330  // Builds up the structure of the network.
332 
333  IConnectableLayer* input1 = net->AddInputLayer(0);
334  IConnectableLayer* input2 = net->AddInputLayer(1);
335  IConnectableLayer* min = net->AddMinimumLayer();
336  IConnectableLayer* output = net->AddOutputLayer(0);
337 
338  input1->GetOutputSlot(0).Connect(min->GetInputSlot(0));
339  input2->GetOutputSlot(0).Connect(min->GetInputSlot(1));
340  min->GetOutputSlot(0).Connect(output->GetInputSlot(0));
341 
342  // Sets the tensors in the network.
343  TensorInfo tensorInfo(TensorShape({1, 1, 1, 4}), DataType::Float32);
344  input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
345  input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
346  min->GetOutputSlot(0).SetTensorInfo(tensorInfo);
347 
348  // optimize the network
349  IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
350 
351  // Loads it into the runtime.
352  NetworkId netId;
353  runtime->LoadNetwork(netId, std::move(optNet));
354 
355  // Creates structures for input & output - matching android nn test.
356  std::vector<float> input1Data
357  {
358  1.0f, 2.0f, 3.0f, 4.0f
359  };
360  std::vector<float> input2Data
361  {
362  2.0f, 1.0f, 5.0f, 2.0f
363  };
364  std::vector<float> outputData(4);
365 
366  TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(netId, 0);
367  inputTensorInfo.SetConstant(true);
368  InputTensors inputTensors
369  {
370  {0,armnn::ConstTensor(inputTensorInfo, input1Data.data())},
371  {1,armnn::ConstTensor(inputTensorInfo, input2Data.data())}
372  };
373  OutputTensors outputTensors
374  {
375  {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
376  };
377 
378  // Does the inference.
379  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
380 
381  // Checks the results
382  CHECK(outputData[0] == 1);
383  CHECK(outputData[1] == 1);
384  CHECK(outputData[2] == 3);
385  CHECK(outputData[3] == 2);
386 }
387 
388 TEST_CASE("RefEqualSimpleEndToEndTest")
389 {
390  const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
391  0, 0, 0, 0, 1, 1, 1, 1 });
392 
393  ComparisonSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
395  expectedOutput);
396 }
397 
398 TEST_CASE("RefGreaterSimpleEndToEndTest")
399 {
400  const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
401  0, 0, 0, 0, 0, 0, 0, 0 });
402 
403  ComparisonSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
405  expectedOutput);
406 }
407 
408 TEST_CASE("RefEqualSimpleEndToEndUint8Test")
409 {
410  const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
411  0, 0, 0, 0, 1, 1, 1, 1 });
412 
413  ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
415  expectedOutput);
416 }
417 
418 TEST_CASE("RefGreaterSimpleEndToEndUint8Test")
419 {
420  const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
421  0, 0, 0, 0, 0, 0, 0, 0 });
422 
423  ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
425  expectedOutput);
426 }
427 
428 TEST_CASE("RefEqualBroadcastEndToEndTest")
429 {
430  const std::vector<uint8_t> expectedOutput({ 1, 0, 1, 1, 0, 0,
431  0, 0, 0, 0, 0, 0 });
432 
433  ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends,
435  expectedOutput);
436 }
437 
438 TEST_CASE("RefGreaterBroadcastEndToEndTest")
439 {
440  const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
441  1, 1, 1, 1, 1, 1 });
442 
443  ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends,
445  expectedOutput);
446 }
447 
448 TEST_CASE("RefEqualBroadcastEndToEndUint8Test")
449 {
450  const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
451  0, 0, 0, 0, 0, 0 });
452 
453  ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
455  expectedOutput);
456 }
457 
458 TEST_CASE("RefGreaterBroadcastEndToEndUint8Test")
459 {
460  const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
461  1, 1, 1, 1, 1, 1 });
462 
463  ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
465  expectedOutput);
466 }
467 
468 TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NHWCTest")
469 {
470  BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
471 }
472 
473 TEST_CASE("RefBatchToSpaceNdEndToEndUint8NHWCTest")
474 {
475  BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
476 }
477 
478 TEST_CASE("RefBatchToSpaceNdEndToEndQSymm16NHWCTest")
479 {
480  BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
481 }
482 
483 TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NCHWTest")
484 {
485  BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
486 }
487 
488 TEST_CASE("RefBatchToSpaceNdEndToEndUint8NCHWTest")
489 {
490  BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
491 }
492 
493 TEST_CASE("RefBatchToSpaceNdEndToEndQSymm16NCHWTest")
494 {
495  BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
496 }
497 
498 TEST_CASE("RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest")
499 {
500  BatchToSpaceNdComplexEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
501 }
502 
503 TEST_CASE("RefBatchToSpaceNdEndToEndComplexUint8NHWCTest")
504 {
505  BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
506 }
507 
508 TEST_CASE("RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest")
509 {
510  BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
511 }
512 
513 TEST_CASE("RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest")
514 {
515  BatchToSpaceNdComplexEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
516 }
517 
518 TEST_CASE("RefBatchToSpaceNdEndToEndComplexUint8NCHWTest")
519 {
520  BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
521 }
522 
523 TEST_CASE("RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest")
524 {
525  BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
526 }
527 
528 TEST_CASE("RefChannelShuffleFloatTest")
529 {
530  ChannelShuffleEndToEnd<armnn::DataType::Float32>(defaultBackends);
531 }
532 
533 TEST_CASE("RefChannelShuffleUint8Test")
534 {
535  ChannelShuffleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
536 }
537 
538 TEST_CASE("RefConcatEndToEndDim0Test")
539 {
540  ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
541 }
542 
543 TEST_CASE("RefConcatEndToEndDim0Uint8Test")
544 {
545  ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
546 }
547 
548 TEST_CASE("RefConcatEndToEndDim1Test")
549 {
550  ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
551 }
552 
553 TEST_CASE("RefConcatEndToEndDim1Uint8Test")
554 {
555  ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
556 }
557 
558 TEST_CASE("RefConcatEndToEndDim2Test")
559 {
560  ConcatDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
561 }
562 
563 TEST_CASE("RefConcatEndToEndDim2Uint8Test")
564 {
565  ConcatDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
566 }
567 
568 TEST_CASE("RefConcatEndToEndDim3Test")
569 {
570  ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
571 }
572 
573 TEST_CASE("RefConcatEndToEndDim3Uint8Test")
574 {
575  ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
576 }
577 
578 TEST_CASE("RefConvolution3dFloat32Test")
579 {
580  Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
582 }
583 
584 TEST_CASE("RefConvolution3dNcdhwFloat32Test")
585 {
586  Convolution3dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(defaultBackends,
588 }
589 
590 TEST_CASE("RefConvolution3dFloat16Test")
591 {
592  Convolution3dEndToEnd<armnn::DataType::Float16, armnn::DataType::Float16>(defaultBackends,
594 }
595 
596 TEST_CASE("RefConvolution3dUint8Test")
597 {
598  Convolution3dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(defaultBackends,
600 }
601 
602 TEST_CASE("RefConvolution3dInt8Test")
603 {
604  Convolution3dEndToEnd<armnn::DataType::QAsymmS8, armnn::DataType::Signed32>(defaultBackends,
606 }
607 
608 TEST_CASE("RefEluEndToEndTestFloat32")
609 {
610  EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
611 }
612 
613 TEST_CASE("RefEluEndToEndTestFloat16")
614 {
615  EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
616 }
617 
618 TEST_CASE("RefEluEndToEndTestBFloat16")
619 {
620  EluEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
621 }
622 
623 TEST_CASE("RefEluEndToEndTestQAsymmS8")
624 {
625  EluEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
626 }
627 
628 TEST_CASE("RefEluEndToEndTestQAsymmU8")
629 {
630  EluEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
631 }
632 
633 TEST_CASE("RefEluEndToEndTestQSymmS16")
634 {
635  EluEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
636 }
637 
638 TEST_CASE("RefFillEndToEndTest")
639 {
640  FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
641 }
642 
643 TEST_CASE("RefFillEndToEndTestFloat16")
644 {
645  FillEndToEnd<armnn::DataType::Float16>(defaultBackends);
646 }
647 
648 TEST_CASE("RefFillEndToEndTestInt32")
649 {
650  FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
651 }
652 
653 TEST_CASE("RefFullyConnectedEndToEndTestFloat32")
654 {
655  FullyConnectedWithDynamicWeightsEndToEnd<armnn::DataType::Float32>(defaultBackends);
656 }
657 
658 TEST_CASE("RefFullyConnectedEndToEndTestNonConstantWeightsConstantBiasesFloat32")
659 {
660  FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, true);
661 }
662 
663 TEST_CASE("RefFullyConnectedEndToEndTestConstantWeightsNonConstantBiasesFloat32")
664 {
665  FullyConnectedWithDynamicOrConstantInputsEndToEnd<armnn::DataType::Float32>(defaultBackends, true, false);
666 }
667 
668 TEST_CASE("RefFullyConnectedEndToEndTestConstantWeightsTensorInfoNotSet")
669 {
670  FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, false, true, true, true, false);
671 }
672 
673 TEST_CASE("RefFullyConnectedEndToEndTestWeightsNotConnectedExplicitCheck")
674 {
675  FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, true, true, false, true, true);
676 }
677 
678 TEST_CASE("RefFullyConnectedEndToEndTestBiasNotConnectedExplicitCheck")
679 {
680  FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, true, true, true, false, true);
681 }
682 
683 TEST_CASE("RefFullyConnectedEndToEndTestWeightsAndBiasNotConnected")
684 {
685  FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, false, true, false, false, true);
686 }
687 
688 TEST_CASE("RefFullyConnectedEndToEndTestBiasDisabledConnectBias")
689 {
690  FullyConnectedErrorChecking<armnn::DataType::Float32>(defaultBackends, true, false, false, true, true);
691 }
692 
693 TEST_CASE("RefGatherFloatTest")
694 {
695  GatherEndToEnd<armnn::DataType::Float32>(defaultBackends);
696 }
697 
698 TEST_CASE("RefGatherUint8Test")
699 {
700  GatherEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
701 }
702 
703 TEST_CASE("RefGatherInt16Test")
704 {
705  GatherEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
706 }
707 
708 TEST_CASE("RefGatherMultiDimFloatTest")
709 {
710  GatherMultiDimEndToEnd<armnn::DataType::Float32>(defaultBackends);
711 }
712 
713 TEST_CASE("RefGatherMultiDimUint8Test")
714 {
715  GatherMultiDimEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
716 }
717 
718 TEST_CASE("RefGatherMultiDimInt16Test")
719 {
720  GatherMultiDimEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
721 }
722 
723 // DepthToSpace
724 TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
725 {
726  DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
727 }
728 
729 TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
730 {
731  DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
732 }
733 
734 TEST_CASE("DephtToSpaceEndToEndNchwUint8")
735 {
736  DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
737 }
738 
739 TEST_CASE("DephtToSpaceEndToEndNchwInt16")
740 {
741  DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
742 }
743 
744 TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
745 {
746  DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
747 }
748 
749 TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
750 {
751  DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
752 }
753 
754 TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
755 {
756  DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
757 }
758 
759 TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
760 {
761  DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
762 }
763 
764 // Dequantize
765 TEST_CASE("DequantizeEndToEndSimpleTest")
766 {
767  DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
768 }
769 
770 TEST_CASE("DequantizeEndToEndOffsetTest")
771 {
772  DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
773 }
774 
775 TEST_CASE("DequantizeEndToEndSimpleInt16Test")
776 {
777  DequantizeEndToEndSimple<armnn::DataType::QSymmS16>(defaultBackends);
778 }
779 
780 TEST_CASE("DequantizeEndToEndOffsetInt16Test")
781 {
782  DequantizeEndToEndOffset<armnn::DataType::QSymmS16>(defaultBackends);
783 }
784 
785 TEST_CASE("RefDetectionPostProcessRegularNmsTest")
786 {
787  std::vector<float> boxEncodings({
788  0.0f, 0.0f, 0.0f, 0.0f,
789  0.0f, 1.0f, 0.0f, 0.0f,
790  0.0f, -1.0f, 0.0f, 0.0f,
791  0.0f, 0.0f, 0.0f, 0.0f,
792  0.0f, 1.0f, 0.0f, 0.0f,
793  0.0f, 0.0f, 0.0f, 0.0f
794  });
795  std::vector<float> scores({
796  0.0f, 0.9f, 0.8f,
797  0.0f, 0.75f, 0.72f,
798  0.0f, 0.6f, 0.5f,
799  0.0f, 0.93f, 0.95f,
800  0.0f, 0.5f, 0.4f,
801  0.0f, 0.3f, 0.2f
802  });
803  std::vector<float> anchors({
804  0.5f, 0.5f, 1.0f, 1.0f,
805  0.5f, 0.5f, 1.0f, 1.0f,
806  0.5f, 0.5f, 1.0f, 1.0f,
807  0.5f, 10.5f, 1.0f, 1.0f,
808  0.5f, 10.5f, 1.0f, 1.0f,
809  0.5f, 100.5f, 1.0f, 1.0f
810  });
811  DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
812 }
813 
814 inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& info)
815 {
816  for (size_t i = 0; i < info.GetNumElements(); i++)
817  {
818  quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
819  }
820 }
821 
822 TEST_CASE("RefDetectionPostProcessRegularNmsUint8Test")
823 {
824  armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
825  armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
826  armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
827 
828  boxEncodingsInfo.SetQuantizationScale(1.0f);
829  boxEncodingsInfo.SetQuantizationOffset(1);
830  scoresInfo.SetQuantizationScale(0.01f);
831  scoresInfo.SetQuantizationOffset(0);
832  anchorsInfo.SetQuantizationScale(0.5f);
833  anchorsInfo.SetQuantizationOffset(0);
834 
835  std::vector<float> boxEncodings({
836  0.0f, 0.0f, 0.0f, 0.0f,
837  0.0f, 1.0f, 0.0f, 0.0f,
838  0.0f, -1.0f, 0.0f, 0.0f,
839  0.0f, 0.0f, 0.0f, 0.0f,
840  0.0f, 1.0f, 0.0f, 0.0f,
841  0.0f, 0.0f, 0.0f, 0.0f
842  });
843  std::vector<float> scores({
844  0.0f, 0.9f, 0.8f,
845  0.0f, 0.75f, 0.72f,
846  0.0f, 0.6f, 0.5f,
847  0.0f, 0.93f, 0.95f,
848  0.0f, 0.5f, 0.4f,
849  0.0f, 0.3f, 0.2f
850  });
851  std::vector<float> anchors({
852  0.5f, 0.5f, 1.0f, 1.0f,
853  0.5f, 0.5f, 1.0f, 1.0f,
854  0.5f, 0.5f, 1.0f, 1.0f,
855  0.5f, 10.5f, 1.0f, 1.0f,
856  0.5f, 10.5f, 1.0f, 1.0f,
857  0.5f, 100.5f, 1.0f, 1.0f
858  });
859 
860  std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
861  std::vector<uint8_t> qScores(scores.size(), 0);
862  std::vector<uint8_t> qAnchors(anchors.size(), 0);
863  QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
864  QuantizeData(qScores.data(), scores.data(), scoresInfo);
865  QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
866  DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
867  qScores, qAnchors,
868  1.0f, 1, 0.01f, 0, 0.5f, 0);
869 }
870 
871 TEST_CASE("RefDetectionPostProcessFastNmsTest")
872 {
873  std::vector<float> boxEncodings({
874  0.0f, 0.0f, 0.0f, 0.0f,
875  0.0f, 1.0f, 0.0f, 0.0f,
876  0.0f, -1.0f, 0.0f, 0.0f,
877  0.0f, 0.0f, 0.0f, 0.0f,
878  0.0f, 1.0f, 0.0f, 0.0f,
879  0.0f, 0.0f, 0.0f, 0.0f
880  });
881  std::vector<float> scores({
882  0.0f, 0.9f, 0.8f,
883  0.0f, 0.75f, 0.72f,
884  0.0f, 0.6f, 0.5f,
885  0.0f, 0.93f, 0.95f,
886  0.0f, 0.5f, 0.4f,
887  0.0f, 0.3f, 0.2f
888  });
889  std::vector<float> anchors({
890  0.5f, 0.5f, 1.0f, 1.0f,
891  0.5f, 0.5f, 1.0f, 1.0f,
892  0.5f, 0.5f, 1.0f, 1.0f,
893  0.5f, 10.5f, 1.0f, 1.0f,
894  0.5f, 10.5f, 1.0f, 1.0f,
895  0.5f, 100.5f, 1.0f, 1.0f
896  });
897  DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
898 }
899 
900 TEST_CASE("RefDetectionPostProcessFastNmsUint8Test")
901 {
902  armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
903  armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
904  armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
905 
906  boxEncodingsInfo.SetQuantizationScale(1.0f);
907  boxEncodingsInfo.SetQuantizationOffset(1);
908  scoresInfo.SetQuantizationScale(0.01f);
909  scoresInfo.SetQuantizationOffset(0);
910  anchorsInfo.SetQuantizationScale(0.5f);
911  anchorsInfo.SetQuantizationOffset(0);
912 
913  std::vector<float> boxEncodings({
914  0.0f, 0.0f, 0.0f, 0.0f,
915  0.0f, 1.0f, 0.0f, 0.0f,
916  0.0f, -1.0f, 0.0f, 0.0f,
917  0.0f, 0.0f, 0.0f, 0.0f,
918  0.0f, 1.0f, 0.0f, 0.0f,
919  0.0f, 0.0f, 0.0f, 0.0f
920  });
921  std::vector<float> scores({
922  0.0f, 0.9f, 0.8f,
923  0.0f, 0.75f, 0.72f,
924  0.0f, 0.6f, 0.5f,
925  0.0f, 0.93f, 0.95f,
926  0.0f, 0.5f, 0.4f,
927  0.0f, 0.3f, 0.2f
928  });
929  std::vector<float> anchors({
930  0.5f, 0.5f, 1.0f, 1.0f,
931  0.5f, 0.5f, 1.0f, 1.0f,
932  0.5f, 0.5f, 1.0f, 1.0f,
933  0.5f, 10.5f, 1.0f, 1.0f,
934  0.5f, 10.5f, 1.0f, 1.0f,
935  0.5f, 100.5f, 1.0f, 1.0f
936  });
937 
938  std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
939  std::vector<uint8_t> qScores(scores.size(), 0);
940  std::vector<uint8_t> qAnchors(anchors.size(), 0);
941  QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
942  QuantizeData(qScores.data(), scores.data(), scoresInfo);
943  QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
944  DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
945  qScores, qAnchors,
946  1.0f, 1, 0.01f, 0, 0.5f, 0);
947 }
948 
949 // HardSwish
950 TEST_CASE("RefHardSwishEndToEndTestFloat32")
951 {
952  HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
953 }
954 
955 TEST_CASE("RefHardSwishEndToEndTestFloat16")
956 {
957  HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
958 }
959 
960 TEST_CASE("RefHardSwishEndToEndTestBFloat16")
961 {
962  HardSwishEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
963 }
964 
965 TEST_CASE("RefHardSwishEndToEndTestQAsymmS8")
966 {
967  HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
968 }
969 
970 TEST_CASE("RefHardSwishEndToEndTestQAsymmU8")
971 {
972  HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
973 }
974 
975 TEST_CASE("RefHardSwishEndToEndTestQSymmS16")
976 {
977  HardSwishEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
978 }
979 
980 // LogSoftmax
981 TEST_CASE("RefLogSoftmaxEndToEndTest")
982 {
983  LogSoftmaxEndToEndTest(defaultBackends);
984 }
985 
986 TEST_CASE("RefPreluEndToEndTestFloat32")
987 {
988  PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
989 }
990 
991 TEST_CASE("RefPreluEndToEndTestUint8")
992 {
993  PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
994 }
995 
996 TEST_CASE("RefPreluEndToEndTestQSymm16")
997 {
998  PreluEndToEndPositiveTest<armnn::DataType::QSymmS16>(defaultBackends);
999 }
1000 
1001 TEST_CASE("RefSpaceToDepthNhwcEndToEndTest1")
1002 {
1003  SpaceToDepthNhwcEndToEndTest1(defaultBackends);
1004 }
1005 
1006 TEST_CASE("RefSpaceToDepthNchwEndToEndTest1")
1007 {
1008  SpaceToDepthNchwEndToEndTest1(defaultBackends);
1009 }
1010 
1011 TEST_CASE("RefSpaceToDepthNhwcEndToEndTest2")
1012 {
1013  SpaceToDepthNhwcEndToEndTest2(defaultBackends);
1014 }
1015 
1016 TEST_CASE("RefSpaceToDepthNchwEndToEndTest2")
1017 {
1018  SpaceToDepthNchwEndToEndTest2(defaultBackends);
1019 }
1020 
1021 TEST_CASE("RefSplitter1dEndToEndTest")
1022 {
1023  Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
1024 }
1025 
1026 TEST_CASE("RefSplitter1dEndToEndUint8Test")
1027 {
1028  Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1029 }
1030 
1031 TEST_CASE("RefSplitter2dDim0EndToEndTest")
1032 {
1033  Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
1034 }
1035 
1036 TEST_CASE("RefSplitter2dDim1EndToEndTest")
1037 {
1038  Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
1039 }
1040 
1041 TEST_CASE("RefSplitter2dDim0EndToEndUint8Test")
1042 {
1043  Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1044 }
1045 
1046 TEST_CASE("RefSplitter2dDim1EndToEndUint8Test")
1047 {
1048  Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1049 }
1050 
1051 TEST_CASE("RefSplitter3dDim0EndToEndTest")
1052 {
1053  Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
1054 }
1055 
1056 TEST_CASE("RefSplitter3dDim1EndToEndTest")
1057 {
1058  Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
1059 }
1060 
1061 TEST_CASE("RefSplitter3dDim2EndToEndTest")
1062 {
1063  Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
1064 }
1065 
1066 TEST_CASE("RefSplitter3dDim0EndToEndUint8Test")
1067 {
1068  Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1069 }
1070 
1071 TEST_CASE("RefSplitter3dDim1EndToEndUint8Test")
1072 {
1073  Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1074 }
1075 
1076 TEST_CASE("RefSplitter3dDim2EndToEndUint8Test")
1077 {
1078  Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1079 }
1080 
1081 TEST_CASE("RefSplitter4dDim0EndToEndTest")
1082 {
1083  Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
1084 }
1085 
1086 TEST_CASE("RefSplitter4dDim1EndToEndTest")
1087 {
1088  Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
1089 }
1090 
1091 TEST_CASE("RefSplitter4dDim2EndToEndTest")
1092 {
1093  Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
1094 }
1095 
1096 TEST_CASE("RefSplitter4dDim3EndToEndTest")
1097 {
1098  Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
1099 }
1100 
1101 TEST_CASE("RefSplitter4dDim0EndToEndUint8Test")
1102 {
1103  Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1104 }
1105 
1106 TEST_CASE("RefSplitter4dDim1EndToEndUint8Test")
1107 {
1108  Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1109 }
1110 
1111 TEST_CASE("RefSplitter4dDim2EndToEndUint8Test")
1112 {
1113  Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1114 }
1115 
1116 TEST_CASE("RefSplitter4dDim3EndToEndUint8Test")
1117 {
1118  Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1119 }
1120 
1121 // TransposeConvolution2d
1122 TEST_CASE("RefTransposeConvolution2dEndToEndFloatNchwTest")
1123 {
1124  TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
1125  defaultBackends, armnn::DataLayout::NCHW);
1126 }
1127 
1128 TEST_CASE("RefTransposeConvolution2dEndToEndUint8NchwTest")
1129 {
1130  TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
1131  defaultBackends, armnn::DataLayout::NCHW);
1132 }
1133 
1134 TEST_CASE("RefTransposeConvolution2dEndToEndInt16NchwTest")
1135 {
1136  TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
1137  defaultBackends, armnn::DataLayout::NCHW);
1138 }
1139 
1140 TEST_CASE("RefTransposeConvolution2dEndToEndFloatNhwcTest")
1141 {
1142  TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
1143  defaultBackends, armnn::DataLayout::NHWC);
1144 }
1145 
1146 TEST_CASE("RefTransposeConvolution2dEndToEndUint8NhwcTest")
1147 {
1148  TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
1149  defaultBackends, armnn::DataLayout::NHWC);
1150 }
1151 
1152 TEST_CASE("RefTransposeConvolution2dEndToEndInt16NhwcTest")
1153 {
1154  TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
1155  defaultBackends, armnn::DataLayout::NHWC);
1156 }
1157 
1158 // Resize Bilinear
1159 TEST_CASE("RefResizeBilinearEndToEndFloatNchwTest")
1160 {
1161  ResizeBilinearEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
1162 }
1163 
1164 TEST_CASE("RefResizeBilinearEndToEndUint8NchwTest")
1165 {
1166  ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
1167 }
1168 
1169 TEST_CASE("RefResizeBilinearEndToEndInt16NchwTest")
1170 {
1171  ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
1172 }
1173 
1174 TEST_CASE("RefResizeBilinearEndToEndFloatNhwcTest")
1175 {
1176  ResizeBilinearEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
1177 }
1178 
1179 TEST_CASE("RefResizeBilinearEndToEndUint8NhwcTest")
1180 {
1181  ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
1182 }
1183 
1184 TEST_CASE("RefResizeBilinearEndToEndInt16NhwcTest")
1185 {
1186  ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
1187 }
1188 
1189 // Resize NearestNeighbor
1190 TEST_CASE("RefResizeNearestNeighborEndToEndFloatNchwTest")
1191 {
1192  ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
1193 }
1194 
1195 TEST_CASE("RefResizeNearestNeighborEndToEndUint8NchwTest")
1196 {
1197  ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
1198 }
1199 
1200 TEST_CASE("RefResizeNearestNeighborEndToEndInt16NchwTest")
1201 {
1202  ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
1203 }
1204 
1205 TEST_CASE("RefResizeNearestNeighborEndToEndFloatNhwcTest")
1206 {
1207  ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
1208 }
1209 
1210 TEST_CASE("RefResizeNearestNeighborEndToEndUint8NhwcTest")
1211 {
1212  ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
1213 }
1214 
1215 TEST_CASE("RefResizeNearestNeighborEndToEndInt16NhwcTest")
1216 {
1217  ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
1218 }
1219 
1220 // InstanceNormalization
1221 TEST_CASE("RefInstanceNormalizationNhwcEndToEndTest1")
1222 {
1223  InstanceNormalizationNhwcEndToEndTest1(defaultBackends);
1224 }
1225 
1226 TEST_CASE("RefInstanceNormalizationNchwEndToEndTest1")
1227 {
1228  InstanceNormalizationNchwEndToEndTest1(defaultBackends);
1229 }
1230 
1231 TEST_CASE("RefInstanceNormalizationNhwcEndToEndTest2")
1232 {
1233  InstanceNormalizationNhwcEndToEndTest2(defaultBackends);
1234 }
1235 
1236 TEST_CASE("RefInstanceNormalizationNchwEndToEndTest2")
1237 {
1238  InstanceNormalizationNchwEndToEndTest2(defaultBackends);
1239 }
1240 
1241 // ArgMinMax
1242 TEST_CASE("RefArgMaxSimpleTest")
1243 {
1244  ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
1245 }
1246 
1247 TEST_CASE("RefArgMaxSimpleUint8Test")
1248 {
1249  ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
1250 }
1251 
1252 TEST_CASE("RefArgMinSimpleTest")
1253 {
1254  ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
1255 }
1256 
1257 TEST_CASE("RefArgMinSimpleUint8Test")
1258 {
1259  ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
1260 }
1261 
1262 TEST_CASE("RefArgMaxAxis0Test")
1263 {
1264  ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
1265 }
1266 
1267 TEST_CASE("RefArgMaxAxis0Uint8Test")
1268 {
1269  ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1270 }
1271 
1272 TEST_CASE("RefArgMinAxis0Test")
1273 {
1274  ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
1275 }
1276 
1277 TEST_CASE("RefArgMinAxis0Uint8Test")
1278 {
1279 
1280  ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1281 }
1282 
1283 TEST_CASE("RefArgMaxAxis1Test")
1284 {
1285  ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
1286 }
1287 
1288 TEST_CASE("RefArgMaxAxis1Uint8Test")
1289 {
1290  ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1291 }
1292 
1293 TEST_CASE("RefArgMinAxis1Test")
1294 {
1295  ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
1296 }
1297 
1298 TEST_CASE("RefArgMinAxis1Uint8Test")
1299 {
1300 
1301  ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1302 }
1303 
1304 TEST_CASE("RefArgMaxAxis2Test")
1305 {
1306  ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
1307 }
1308 
1309 TEST_CASE("RefArgMaxAxis2Uint8Test")
1310 {
1311  ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1312 }
1313 
1314 TEST_CASE("RefArgMinAxis2Test")
1315 {
1316  ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
1317 }
1318 
1319 TEST_CASE("RefArgMinAxis2Uint8Test")
1320 {
1321 
1322  ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1323 }
1324 
1325 TEST_CASE("RefArgMaxAxis3Test")
1326 {
1327  ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
1328 }
1329 
1330 TEST_CASE("RefArgMaxAxis3Uint8Test")
1331 {
1332  ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1333 }
1334 
1335 TEST_CASE("RefArgMinAxis3Test")
1336 {
1337  ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
1338 }
1339 
1340 TEST_CASE("RefArgMinAxis3Uint8Test")
1341 {
1342 
1343  ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
1344 }
1345 
1346 TEST_CASE("RefQLstmEndToEndTest")
1347 {
1348  QLstmEndToEnd(defaultBackends);
1349 }
1350 
1351 TEST_CASE("RefRankEndToEndTest")
1352 {
1353  RankEndToEnd<armnn::DataType::Float32>(defaultBackends);
1354 }
1355 
1356 TEST_CASE("RefRankEndToEndTestFloat16")
1357 {
1358  RankEndToEnd<armnn::DataType::Float16>(defaultBackends);
1359 }
1360 
1361 TEST_CASE("RefRankEndToEndTestInt32")
1362 {
1363  RankEndToEnd<armnn::DataType::Signed32>(defaultBackends);
1364 }
1365 
1366 TEST_CASE("RefRankEndToEndTestQAsymmS8")
1367 {
1368  RankEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
1369 }
1370 
1371 TEST_CASE("RefRankEndToEndTestQSymmS16")
1372 {
1373  RankEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
1374 }
1375 
1376 TEST_CASE("RefRankEndToEndTestQSymmS8")
1377 {
1378  RankEndToEnd<armnn::DataType::QSymmS8>(defaultBackends);
1379 }
1380 
1381 TEST_CASE("RefForceImportWithAlignedBuffersEndToEndTest")
1382 {
1383  ForceImportWithAlignedBuffersEndToEndTest(defaultBackends);
1384 }
1385 
1386 TEST_CASE("RefForceImportWithMisalignedInputBuffersEndToEndTest")
1387 {
1388  ForceImportWithMisalignedInputBuffersEndToEndTest(defaultBackends);
1389 }
1390 
1391 TEST_CASE("RefForceImportWithMisalignedOutputBuffersEndToEndTest")
1392 {
1393  ForceImportWithMisalignedOutputBuffersEndToEndTest(defaultBackends);
1394 }
1395 
1396 TEST_CASE("RefForceImportWithMisalignedInputAndOutputBuffersEndToEndTest")
1397 {
1398  ForceImportWithMisalignedInputAndOutputBuffersEndToEndTest(defaultBackends);
1399 }
1400 
1401 TEST_CASE("RefForceImportRepeatedInferencesEndToEndTest")
1402 {
1403  ForceImportRepeatedInferencesEndToEndTest(defaultBackends);
1404 }
1405 
1406 TEST_CASE("RefForceImportRepeatedInferencesInvertedEndToEndTest")
1407 {
1408  ForceImportRepeatedInferencesInvertedEndToEndTest(defaultBackends);
1409 }
1410 
1411 #if !defined(__ANDROID__)
1412 // Only run these tests on non Android platforms
1413 TEST_CASE("RefImportNonAlignedPointerTest")
1414 {
1415  ImportNonAlignedInputPointerTest(defaultBackends);
1416 }
1417 
1418 TEST_CASE("RefExportNonAlignedPointerTest")
1419 {
1420  ExportNonAlignedOutputPointerTest(defaultBackends);
1421 }
1422 
1423 TEST_CASE("RefImportAlignedPointerTest")
1424 {
1425  ImportAlignedPointerTest(defaultBackends);
1426 }
1427 
1428 TEST_CASE("RefImportOnlyWorkload")
1429 {
1430  ImportOnlyWorkload(defaultBackends);
1431 }
1432 
1433 TEST_CASE("RefExportOnlyWorkload")
1434 {
1435  ExportOnlyWorkload(defaultBackends);
1436 }
1437 
1438 TEST_CASE("RefImportAndExportWorkload")
1439 {
1440  ImportAndExportWorkload(defaultBackends);
1441 }
1442 
1443 TEST_CASE("RefExportOutputWithSeveralOutputSlotConnectionsTest")
1444 {
1445  ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends);
1446 }
1447 
1448 TEST_CASE("RefStridedSliceInvalidSliceEndToEndTest")
1449 {
1450  StridedSliceInvalidSliceEndToEndTest(defaultBackends);
1451 }
1452 
1453 TEST_CASE("RefThreadSafeFP32StridedSlicedEndToEndTest")
1454 {
1455  armnn::experimental::StridedSlicedEndToEndTest<armnn::DataType::Float32>(defaultBackends, 1);
1456 }
1457 
1458 TEST_CASE("RefAsyncFP32StridedSlicedMultiThreadedEndToEndTest")
1459 {
1460  armnn::experimental::StridedSlicedMultiThreadedEndToEndTest<armnn::DataType::Float32>(defaultBackends);
1461 }
1462 
1463 TEST_CASE("RefAsyncFP32StridedSlicedScheduledMultiThreadedEndToEndTest")
1464 {
1465  armnn::experimental::StridedSlicedEndToEndTest<armnn::DataType::Float32>(defaultBackends, 3);
1466 }
1467 #endif
1468 
1469 }
void SpaceToDepthNchwEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:40
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
CPU Execution: Reference C++ kernels.
void InstanceNormalizationNhwcEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
void QuantizeData(RawType *quant, const float *dequant, const armnn::TensorInfo &info)
void LogSoftmaxEndToEndTest(const std::vector< armnn::BackendId > &defaultBackends)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:31
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
void InstanceNormalizationNchwEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
Copyright (c) 2021 ARM Limited and Contributors.
void QLstmEndToEnd(const std::vector< armnn::BackendId > &backends)
void SpaceToDepthNhwcEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void SpaceToDepthNhwcEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
void SpaceToDepthNchwEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1680
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:480
float GetQuantizationScale() const
Definition: Tensor.cpp:463
int NetworkId
Definition: IRuntime.hpp:25
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:242
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
min(a, max(b, input)) ReLu1 & ReLu6.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:516
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void InstanceNormalizationNhwcEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
TEST_SUITE("RefEndToEnd")
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:492
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
A SoftmaxDescriptor for the SoftmaxLayer.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void InstanceNormalizationNchwEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)