ArmNN
 20.05
NeonEndToEndTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
23 
24 #include <boost/test/unit_test.hpp>
25 
26 BOOST_AUTO_TEST_SUITE(NeonEndToEnd)
27 
29 
30 // Abs
31 BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32)
32 {
33  std::vector<float> expectedOutput =
34  {
35  1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
36  3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
37  };
38 
39  ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
40  UnaryOperation::Abs,
41  expectedOutput);
42 }
43 
44 // Constant
45 BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32)
46 {
47  BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
48 }
49 
50 #if defined(ARMNNREF_ENABLED)
51 
52 // This test unit needs the reference backend, it's not available if the reference backend is not built
53 
54 BOOST_AUTO_TEST_CASE(FallbackToCpuRef)
55 {
56  using namespace armnn;
57 
58  // Create runtime in which test will run and allow fallback to CpuRef.
60  IRuntimePtr runtime(IRuntime::Create(options));
61 
62  // Builds up the structure of the network.
64 
65  IConnectableLayer* input = net->AddInputLayer(0);
66 
67  // This layer configuration isn't supported by CpuAcc but we allow fallback to CpuRef so it shoud pass.
68  NormalizationDescriptor descriptor;
69  IConnectableLayer* pooling = net->AddNormalizationLayer(descriptor);
70 
71  IConnectableLayer* output = net->AddOutputLayer(0);
72 
73  input->GetOutputSlot(0).Connect(pooling->GetInputSlot(0));
74  pooling->GetOutputSlot(0).Connect(output->GetInputSlot(0));
75 
76  input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
77  pooling->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
78 
79  // optimize the network
80  std::vector<BackendId> backends = {Compute::CpuAcc, Compute::CpuRef};
81  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
82 
83  // Load it into the runtime. It should pass.
84  NetworkId netId;
85  BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
86 }
87 
88 #endif
89 
90 BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndTest)
91 {
92  const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
93  0, 0, 0, 0, 0, 0, 0, 0 });
94 
95  ComparisonSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
97  expectedOutput);
98 }
99 
100 BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndUint8Test)
101 {
102  const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
103  0, 0, 0, 0, 0, 0, 0, 0 });
104 
105  ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
107  expectedOutput);
108 }
109 
110 BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndTest)
111 {
112  const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
113  1, 1, 1, 1, 1, 1 });
114 
115  ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends,
117  expectedOutput);
118 }
119 
120 BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test)
121 {
122  const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
123  1, 1, 1, 1, 1, 1 });
124 
125  ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
127  expectedOutput);
128 }
129 
130 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
131 {
132  ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
133 }
134 
135 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
136 {
137  ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
138 }
139 
140 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
141 {
142  ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
143 }
144 
145 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
146 {
147  ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
148 }
149 
150 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
151 {
152  ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
153 }
154 
155 BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
156 {
157  ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
158 }
159 
160 // DepthToSpace
161 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32)
162 {
163  DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
164 }
165 
166 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
167 {
168  DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
169 }
170 
171 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
172 {
173  DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
174 }
175 
176 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
177 {
178  DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
179 }
180 
181 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
182 {
183  DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
184 }
185 
186 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
187 {
188  DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
189 }
190 
191 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
192 {
193  DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
194 }
195 
196 BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
197 {
198  DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
199 }
200 
201 // Dequantize
202 BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
203 {
204  DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
205 }
206 
207 BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
208 {
209  DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
210 }
211 
212 BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat32)
213 {
214  EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
215 }
216 
217 BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat16)
218 {
219  EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
220 }
221 
222 // HardSwish
223 BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestFloat32)
224 {
225  HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
226 }
227 
228 BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestFloat16)
229 {
230  HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
231 }
232 
233 BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestQAsymmS8)
234 {
235  HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
236 }
237 
238 BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestQAsymmU8)
239 {
240  HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
241 }
242 
243 BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
244 {
245  PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
246 }
247 
248 BOOST_AUTO_TEST_CASE(NeonPreluEndToEndTestUint8Test)
249 {
250  PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
251 }
252 
253 BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest1)
254 {
256 }
257 
258 BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNchwEndToEndTest1)
259 {
261 }
262 
263 BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest2)
264 {
266 }
267 
268 BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNchwEndToEndTest2)
269 {
271 }
272 
273 BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndTest)
274 {
275  Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
276 }
277 
278 BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndUint8Test)
279 {
280  Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
281 }
282 
283 BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndTest)
284 {
285  Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
286 }
287 
288 BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndTest)
289 {
290  Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
291 }
292 
293 BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndUint8Test)
294 {
295  Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
296 }
297 
298 BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndUint8Test)
299 {
300  Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
301 }
302 
303 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndTest)
304 {
305  Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
306 }
307 
308 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndTest)
309 {
310  Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
311 }
312 
313 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndTest)
314 {
315  Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
316 }
317 
318 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndUint8Test)
319 {
320  Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
321 }
322 
323 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndUint8Test)
324 {
325  Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
326 }
327 
328 BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndUint8Test)
329 {
330  Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
331 }
332 
333 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndTest)
334 {
335  Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
336 }
337 
338 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndTest)
339 {
340  Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
341 }
342 
343 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndTest)
344 {
345  Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
346 }
347 
348 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndTest)
349 {
350  Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
351 }
352 
353 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndUint8Test)
354 {
355  Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
356 }
357 
358 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndUint8Test)
359 {
360  Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
361 }
362 
363 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndUint8Test)
364 {
365  Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
366 }
367 
368 BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndUint8Test)
369 {
370  Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
371 }
372 
373 BOOST_AUTO_TEST_CASE(NeonQuantizedLstmEndToEndTest)
374 {
376 }
377 
378 BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNchwTest)
379 {
380  TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
382 }
383 
384 BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NchwTest)
385 {
386  TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
388 }
389 
390 BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNhwcTest)
391 {
392  TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
394 }
395 
396 BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NhwcTest)
397 {
398  TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
400 }
401 
402 BOOST_AUTO_TEST_CASE(NeonImportNonAlignedInputPointerTest)
403 {
404  ImportNonAlignedInputPointerTest(defaultBackends);
405 }
406 
407 BOOST_AUTO_TEST_CASE(NeonExportNonAlignedOutputPointerTest)
408 {
409  ExportNonAlignedOutputPointerTest(defaultBackends);
410 }
411 
412 BOOST_AUTO_TEST_CASE(NeonImportAlignedPointerTest, * boost::unit_test::disabled())
413 {
414  ImportAlignedPointerTest(defaultBackends);
415 }
416 
417 BOOST_AUTO_TEST_CASE(NeonImportOnlyWorkload, * boost::unit_test::disabled())
418 {
419  ImportOnlyWorkload(defaultBackends);
420 }
421 
422 BOOST_AUTO_TEST_CASE(NeonExportOnlyWorkload, * boost::unit_test::disabled())
423 {
424  ExportOnlyWorkload(defaultBackends);
425 }
426 
427 BOOST_AUTO_TEST_CASE(NeonImportAndExportWorkload, * boost::unit_test::disabled())
428 {
429  ImportAndExportWorkload(defaultBackends);
430 }
431 
432 BOOST_AUTO_TEST_CASE(NeonExportOutputWithSeveralOutputSlotConnectionsTest, * boost::unit_test::disabled())
433 {
434  ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends);
435 }
436 
437 // InstanceNormalization
438 BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest1)
439 {
441 }
442 
443 BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest2)
444 {
446 }
447 
448 // ArgMinMax
449 BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTest)
450 {
451  ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
452 }
453 
454 BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTest)
455 {
456  ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
457 }
458 
459 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0Test)
460 {
461  ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
462 }
463 
464 BOOST_AUTO_TEST_CASE(NeonArgMinAxis0Test)
465 {
466  ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
467 }
468 
469 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1Test)
470 {
471  ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
472 }
473 
474 BOOST_AUTO_TEST_CASE(NeonArgMinAxis1Test)
475 {
476  ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
477 }
478 
479 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2Test)
480 {
481  ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
482 }
483 
484 BOOST_AUTO_TEST_CASE(NeonArgMinAxis2Test)
485 {
486  ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
487 }
488 
489 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3Test)
490 {
491  ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
492 }
493 
494 BOOST_AUTO_TEST_CASE(NeonArgMinAxis3Test)
495 {
496  ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
497 }
498 
499 BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTestQuantisedAsymm8)
500 {
501  ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
502 }
503 
504 BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTestQuantisedAsymm8)
505 {
506  ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
507 }
508 
509 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0TestQuantisedAsymm8)
510 {
511  ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
512 }
513 
514 BOOST_AUTO_TEST_CASE(NeonArgMinAxis0TestQuantisedAsymm8)
515 {
516  ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
517 }
518 
519 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1TestQuantisedAsymm8)
520 {
521  ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
522 }
523 
524 BOOST_AUTO_TEST_CASE(NeonArgMinAxis1TestQuantisedAsymm8)
525 {
526  ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
527 }
528 
529 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2TestQuantisedAsymm8)
530 {
531  ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
532 }
533 
534 BOOST_AUTO_TEST_CASE(NeonArgMinAxis2TestQuantisedAsymm8)
535 {
536  ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
537 }
538 
539 BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3TestQuantisedAsymm8)
540 {
541  ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
542 }
543 
544 BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
545 {
546  ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
547 }
548 
549 BOOST_AUTO_TEST_CASE(NeonStridedSliceInvalidSliceEndToEndTest)
550 {
551  StridedSliceInvalidSliceEndToEndTest(defaultBackends);
552 }
553 
554 BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest, * boost::unit_test::disabled())
555 {
556  std::vector<float> boxEncodings({
557  0.0f, 0.0f, 0.0f, 0.0f,
558  0.0f, 1.0f, 0.0f, 0.0f,
559  0.0f, -1.0f, 0.0f, 0.0f,
560  0.0f, 0.0f, 0.0f, 0.0f,
561  0.0f, 1.0f, 0.0f, 0.0f,
562  0.0f, 0.0f, 0.0f, 0.0f
563  });
564  std::vector<float> scores({
565  0.0f, 0.9f, 0.8f,
566  0.0f, 0.75f, 0.72f,
567  0.0f, 0.6f, 0.5f,
568  0.0f, 0.93f, 0.95f,
569  0.0f, 0.5f, 0.4f,
570  0.0f, 0.3f, 0.2f
571  });
572  std::vector<float> anchors({
573  0.5f, 0.5f, 1.0f, 1.0f,
574  0.5f, 0.5f, 1.0f, 1.0f,
575  0.5f, 0.5f, 1.0f, 1.0f,
576  0.5f, 10.5f, 1.0f, 1.0f,
577  0.5f, 10.5f, 1.0f, 1.0f,
578  0.5f, 100.5f, 1.0f, 1.0f
579  });
580  DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
581 }
582 
583 inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& info)
584 {
585  for (size_t i = 0; i < info.GetNumElements(); i++)
586  {
587  quant[i] = armnn::Quantize<uint8_t>(dequant[i], info.GetQuantizationScale(), info.GetQuantizationOffset());
588  }
589 }
590 
591 BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test, * boost::unit_test::disabled())
592 {
593  armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
596 
597  boxEncodingsInfo.SetQuantizationScale(1.0f);
598  boxEncodingsInfo.SetQuantizationOffset(1);
603 
604  std::vector<float> boxEncodings({
605  0.0f, 0.0f, 0.0f, 0.0f,
606  0.0f, 1.0f, 0.0f, 0.0f,
607  0.0f, -1.0f, 0.0f, 0.0f,
608  0.0f, 0.0f, 0.0f, 0.0f,
609  0.0f, 1.0f, 0.0f, 0.0f,
610  0.0f, 0.0f, 0.0f, 0.0f
611  });
612  std::vector<float> scores({
613  0.0f, 0.9f, 0.8f,
614  0.0f, 0.75f, 0.72f,
615  0.0f, 0.6f, 0.5f,
616  0.0f, 0.93f, 0.95f,
617  0.0f, 0.5f, 0.4f,
618  0.0f, 0.3f, 0.2f
619  });
620  std::vector<float> anchors({
621  0.5f, 0.5f, 1.0f, 1.0f,
622  0.5f, 0.5f, 1.0f, 1.0f,
623  0.5f, 0.5f, 1.0f, 1.0f,
624  0.5f, 10.5f, 1.0f, 1.0f,
625  0.5f, 10.5f, 1.0f, 1.0f,
626  0.5f, 100.5f, 1.0f, 1.0f
627  });
628 
629  std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
630  std::vector<uint8_t> qScores(scores.size(), 0);
631  std::vector<uint8_t> qAnchors(anchors.size(), 0);
632  QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
633  QuantizeData(qScores.data(), scores.data(), scoresInfo);
634  QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
635  DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
636  qScores, qAnchors,
637  1.0f, 1, 0.01f, 0, 0.5f, 0);
638 }
639 
640 BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest, * boost::unit_test::disabled())
641 {
642  std::vector<float> boxEncodings({
643  0.0f, 0.0f, 0.0f, 0.0f,
644  0.0f, 1.0f, 0.0f, 0.0f,
645  0.0f, -1.0f, 0.0f, 0.0f,
646  0.0f, 0.0f, 0.0f, 0.0f,
647  0.0f, 1.0f, 0.0f, 0.0f,
648  0.0f, 0.0f, 0.0f, 0.0f
649  });
650  std::vector<float> scores({
651  0.0f, 0.9f, 0.8f,
652  0.0f, 0.75f, 0.72f,
653  0.0f, 0.6f, 0.5f,
654  0.0f, 0.93f, 0.95f,
655  0.0f, 0.5f, 0.4f,
656  0.0f, 0.3f, 0.2f
657  });
658  std::vector<float> anchors({
659  0.5f, 0.5f, 1.0f, 1.0f,
660  0.5f, 0.5f, 1.0f, 1.0f,
661  0.5f, 0.5f, 1.0f, 1.0f,
662  0.5f, 10.5f, 1.0f, 1.0f,
663  0.5f, 10.5f, 1.0f, 1.0f,
664  0.5f, 100.5f, 1.0f, 1.0f
665  });
666  DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
667 }
668 
669 BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsUint8Test, * boost::unit_test::disabled())
670 {
671  armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
674 
675  boxEncodingsInfo.SetQuantizationScale(1.0f);
676  boxEncodingsInfo.SetQuantizationOffset(1);
681 
682  std::vector<float> boxEncodings({
683  0.0f, 0.0f, 0.0f, 0.0f,
684  0.0f, 1.0f, 0.0f, 0.0f,
685  0.0f, -1.0f, 0.0f, 0.0f,
686  0.0f, 0.0f, 0.0f, 0.0f,
687  0.0f, 1.0f, 0.0f, 0.0f,
688  0.0f, 0.0f, 0.0f, 0.0f
689  });
690  std::vector<float> scores({
691  0.0f, 0.9f, 0.8f,
692  0.0f, 0.75f, 0.72f,
693  0.0f, 0.6f, 0.5f,
694  0.0f, 0.93f, 0.95f,
695  0.0f, 0.5f, 0.4f,
696  0.0f, 0.3f, 0.2f
697  });
698  std::vector<float> anchors({
699  0.5f, 0.5f, 1.0f, 1.0f,
700  0.5f, 0.5f, 1.0f, 1.0f,
701  0.5f, 0.5f, 1.0f, 1.0f,
702  0.5f, 10.5f, 1.0f, 1.0f,
703  0.5f, 10.5f, 1.0f, 1.0f,
704  0.5f, 100.5f, 1.0f, 1.0f
705  });
706 
707  std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
708  std::vector<uint8_t> qScores(scores.size(), 0);
709  std::vector<uint8_t> qAnchors(anchors.size(), 0);
710  QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
711  QuantizeData(qScores.data(), scores.data(), scoresInfo);
712  QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
713  DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
714  qScores, qAnchors,
715  1.0f, 1, 0.01f, 0, 0.5f, 0);
716 }
717 
718 BOOST_AUTO_TEST_CASE(NeonQLstmEndToEndTest)
719 {
721 }
722 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
void SpaceToDepthNchwEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:31
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:25
armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32)
void InstanceNormalizationNchwEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
std::vector< float > boxEncodings({ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f })
int NetworkId
Definition: IRuntime.hpp:20
Copyright (c) 2020 ARM Limited.
void QLstmEndToEnd(const std::vector< armnn::BackendId > &backends)
void SpaceToDepthNhwcEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
void SpaceToDepthNhwcEndToEndTest1(const std::vector< armnn::BackendId > &defaultBackends)
void SpaceToDepthNchwEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1003
std::vector< uint8_t > qBoxEncodings(boxEncodings.size(), 0)
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:265
float GetQuantizationScale() const
Definition: Tensor.cpp:248
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:573
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
std::vector< uint8_t > qAnchors(anchors.size(), 0)
BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
BOOST_AUTO_TEST_SUITE_END()
void QuantizedLstmEndToEnd(const std::vector< armnn::BackendId > &backends)
std::vector< float > scores({ 0.0f, 0.9f, 0.8f, 0.0f, 0.75f, 0.72f, 0.0f, 0.6f, 0.5f, 0.0f, 0.93f, 0.95f, 0.0f, 0.5f, 0.4f, 0.0f, 0.3f, 0.2f })
std::vector< armnn::BackendId > defaultBackends
CPU Execution: NEON: ArmCompute.
std::vector< uint8_t > qScores(scores.size(), 0)
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32)
void QuantizeData(uint8_t *quant, const float *dequant, const TensorInfo &info)
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:276
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
A NormalizationDescriptor for the NormalizationLayer.
BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32)
static INetworkPtr Create()
Definition: Network.cpp:50
unsigned int GetNumElements() const
Definition: Tensor.hpp:93
void InstanceNormalizationNchwEndToEndTest2(const std::vector< armnn::BackendId > &defaultBackends)
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })