ArmNN
 20.11
ActivationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ActivationTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
15 
17 
18 #include <test/TensorHelpers.hpp>
19 
20 #include <boost/multi_array.hpp>
21 
22 #include <algorithm>
23 
24 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
26  armnn::IWorkloadFactory& workloadFactory,
28  const armnn::ITensorHandleFactory& tensorHandleFactory,
29  float upperBound,
30  float lowerBound,
31  float inputScale,
32  int32_t inputOffset,
33  float outputScale,
34  int32_t outputOffset,
35  const std::vector<T>& inputData,
36  const std::vector<T>& outputExpectedData,
37  unsigned int inputWidth,
38  unsigned int inputHeight,
39  unsigned int inputChannels,
40  unsigned int inputBatchSize)
41 {
42  IgnoreUnused(memoryManager);
43  unsigned int outputWidth = inputWidth;
44  unsigned int outputHeight = inputHeight;
45  unsigned int outputChannels = inputChannels;
46  unsigned int outputBatchSize = inputBatchSize;
47 
48  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
49 
50  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
51 
52  if(armnn::IsQuantizedType<T>())
53  {
54  inputTensorInfo.SetQuantizationScale(inputScale);
55  inputTensorInfo.SetQuantizationOffset(inputOffset);
56 
57  outputTensorInfo.SetQuantizationScale(outputScale);
58  outputTensorInfo.SetQuantizationOffset(outputOffset);
59  }
60 
61  LayerTestResult<T, 4> result(inputTensorInfo);
62 
63  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
64 
65  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
66  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
67 
68  // Setup bounded ReLu.
70  armnn::WorkloadInfo workloadInfo;
71  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
72  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
73 
75  descriptor.m_Parameters.m_A = upperBound;
76  descriptor.m_Parameters.m_B = lowerBound;
77 
78  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
79 
80  inputHandle->Allocate();
81  outputHandle->Allocate();
82 
83  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
84 
85  workload->Execute();
86 
87  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
88 
89  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
90 
91  return result;
92 }
93 
95  armnn::IWorkloadFactory& workloadFactory,
97  const armnn::ITensorHandleFactory& tensorHandleFactory)
98 {
99  unsigned int inputWidth = 4u;
100  unsigned int inputHeight = 5u;
101  unsigned int inputChannels = 1u;
102  unsigned int inputBatchSize = 1;
103 
104  std::vector<float> input = std::vector<float>{
105  -2.0f, 0.1f, 0.5f, 1.25f,
106  0.786f, 0.9875f, -1.5f, 0.384f,
107  1.0001f, 3.5f, 7.5f, 0.896f,
108  2.126f, 2.0f, 0.3f, 0.15f,
109  0.999f, 1.2f, 0.89f, 6.1f,
110  };
111 
112  // Calculated manually.
113  std::vector<float> output = std::vector<float>{
114  -1.0f, 0.1f, 0.5f, 1.0f,
115  0.786f, 0.9875f, -1.0f, 0.384f,
116  1.0f, 1.0f, 1.0f, 0.896f,
117  1.0f, 1.0f, 0.3f, 0.15f,
118  0.999f, 1.0f, 0.89f, 1.0f,
119  };
120 
121  return BoundedReLuTestCommon<armnn::DataType::Float32>(
122  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
123  inputWidth, inputHeight, inputChannels, inputBatchSize);
124 }
125 
127  armnn::IWorkloadFactory& workloadFactory,
129  const armnn::ITensorHandleFactory& tensorHandleFactory)
130 {
131  unsigned int inputWidth = 4u;
132  unsigned int inputHeight = 5u;
133  unsigned int inputChannels = 1u;
134  unsigned int inputBatchSize = 1;
135 
136  std::vector<float> input = std::vector<float>{
137  -1.0f, 0.1f, 0.5f, 6.25f,
138  0.786f, 5.9875f, -0.5f, 0.384f,
139  6.0001f, 3.5f, 7.5f, 0.896f,
140  2.126f, 12.0f, 0.3f, 0.15f,
141  0.999f, 1.2f, 0.89f, 6.1f,
142  };
143 
144  // Calculated manually.
145  std::vector<float> output = std::vector<float>{
146  0.0f, 0.1f, 0.5f, 6.0f,
147  0.786f, 5.9875f, 0.0f, 0.384f,
148  6.0f, 3.5f, 6.0f, 0.896f,
149  2.126f, 6.0f, 0.3f, 0.15f,
150  0.999f, 1.2f, 0.89f, 6.0f,
151  };
152 
153  return BoundedReLuTestCommon<armnn::DataType::Float32>(
154  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
155  inputWidth, inputHeight, inputChannels, inputBatchSize);
156 }
157 
159  armnn::IWorkloadFactory& workloadFactory,
161  const armnn::ITensorHandleFactory& tensorHandleFactory)
162 {
163  unsigned int inputWidth = 3u;
164  unsigned int inputHeight = 2u;
165  unsigned int inputChannels = 1u;
166  unsigned int inputBatchSize = 1;
167 
168  std::vector<uint8_t> input = std::vector<uint8_t>{
169  51, 124, 28,
170  251, 8, 92
171  };
172 
173  // Calculated manually.
174  std::vector<uint8_t> output = std::vector<uint8_t>{
175  0, 122, 0,
176  255, 0, 58
177  };
178 
179  float inputScale = 12.0f / 255.0f;
180  int32_t inputOffset = 63;
181  float outputScale = 6.0f / 255.0f;
182  int32_t outputOffset = 0;
183 
184  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
185  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
186  inputScale, inputOffset, outputScale, outputOffset,
187  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
188 }
189 
191  armnn::IWorkloadFactory& workloadFactory,
193  const armnn::ITensorHandleFactory& tensorHandleFactory)
194 {
195  unsigned int inputWidth = 3u;
196  unsigned int inputHeight = 2u;
197  unsigned int inputChannels = 1u;
198  unsigned int inputBatchSize = 1;
199 
200  std::vector<uint8_t> input = std::vector<uint8_t>{
201  51, 230, 28,
202  251, 8, 92
203  };
204 
205  // Calculated manually.
206  std::vector<uint8_t> output = std::vector<uint8_t>{
207  51, 192, 32,
208  192, 32, 92
209  };
210 
211  int32_t inputOffset = 112;
212  float inputScale = 0.0125f;
213 
214  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
215  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
216  inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
217  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
218 }
219 
220 namespace
221 {
222 
223 struct BoundedReLuRandomInputTestTraits
224 {
225  constexpr static unsigned int inputHeight = 31u;
226  constexpr static unsigned int inputWidth = 19u;
227  constexpr static unsigned int inputChannels = 4u;
228  constexpr static unsigned int inputBatchSize = 2;
229 
230  constexpr static unsigned int outputHeight = inputHeight;
231  constexpr static unsigned int outputWidth = inputWidth;
232  constexpr static unsigned int outputChannels = inputChannels;
233  constexpr static unsigned int outputBatchSize = inputBatchSize;
234 
236  {
237  return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
239  }
240 
241  static armnn::TensorInfo GetOutputTensorInfo()
242  {
243  return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
245  }
246 };
247 
248 boost::multi_array<float, 4> BoundedReLuRandomInputTest(
249  armnn::IWorkloadFactory& workloadFactory,
251  const armnn::ITensorHandleFactory& tensorHandleFactory,
252  float lowerBound,
253  float upperBound,
254  const armnn::ActivationDescriptor& activationDescriptor)
255 {
256  IgnoreUnused(memoryManager);
258  const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
259 
260  boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
261 
262  // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
263  // range [lowerBound, upperBound].
264  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
265 
266  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
267  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
268 
269  // Set up bounded ReLu.
271  armnn::WorkloadInfo workloadInfo;
272  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
273  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
274  descriptor.m_Parameters = activationDescriptor;
275 
276  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
277 
278  inputHandle->Allocate();
279  outputHandle->Allocate();
280 
281  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
282 
283  workload->Execute();
284 
285  CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
286 
287  return output;
288 }
289 
290 } // namespace
291 
293  armnn::IWorkloadFactory& workloadFactory,
295  armnn::IWorkloadFactory& refWorkloadFactory,
296  const armnn::ITensorHandleFactory& tensorHandleFactory,
297  const armnn::ITensorHandleFactory& refTensorHandleFactory,
298  float upperBound,
299  float lowerBound)
300 {
301  LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
302 
303  armnn::ActivationDescriptor activationDescriptor;
304  activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
305  activationDescriptor.m_A = upperBound;
306  activationDescriptor.m_B = lowerBound;
307 
308  result.output = BoundedReLuRandomInputTest(
309  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
310  result.outputExpected = BoundedReLuRandomInputTest(
311  refWorkloadFactory, nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
312 
313  return result;
314 }
315 
316 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
318  armnn::IWorkloadFactory& workloadFactory,
320  const armnn::ITensorHandleFactory& tensorHandleFactory,
321  float qScale = 0.0f,
322  int32_t qOffset = 0)
323 {
324  IgnoreUnused(memoryManager);
325  unsigned int inputHeight = 20;
326  unsigned int inputWidth = 17;
327  unsigned int inputChannels = 3;
328  unsigned int batchSize = 5;
329 
330  armnn::TensorInfo inputTensorInfo;
331  armnn::TensorInfo outputTensorInfo;
332 
333  unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
334 
335  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
336  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
337 
338  // Set quantization parameters if the requested type is a quantized type.
339  if(armnn::IsQuantizedType<T>())
340  {
341  inputTensorInfo.SetQuantizationScale(qScale);
342  inputTensorInfo.SetQuantizationOffset(qOffset);
343  outputTensorInfo.SetQuantizationScale(qScale);
344  outputTensorInfo.SetQuantizationOffset(qOffset);
345  }
346 
347  LayerTestResult<T, 4> ret(outputTensorInfo);
348  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
349  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
350 
351  // Do linear activation that should leave the tensor unchanged.
353  armnn::WorkloadInfo info;
354  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
355  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
356  data.m_Parameters.m_A = 1.0f;
357  data.m_Parameters.m_B = 0.0f;
359 
360  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
361 
362  inputHandle->Allocate();
363  outputHandle->Allocate();
364 
365  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
366  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
367 
368  workload->Execute();
369 
370  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
371 
372  // Ensure output equals input.
373  ret.outputExpected = input;
374 
375  return ret;
376 }
377 
379  armnn::IWorkloadFactory& workloadFactory,
381  const armnn::ITensorHandleFactory& tensorHandleFactory)
382 {
383  return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
384  memoryManager,
385  tensorHandleFactory);
386 }
387 
389  armnn::IWorkloadFactory& workloadFactory,
391  const armnn::ITensorHandleFactory& tensorHandleFactory)
392 {
393  return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
394  workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
395 }
396 
398  armnn::IWorkloadFactory& workloadFactory,
400  const armnn::ITensorHandleFactory& tensorHandleFactory)
401 {
402  return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
403  workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
404 }
405 
406 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
408  armnn::IWorkloadFactory& workloadFactory,
410  const armnn::ITensorHandleFactory& tensorHandleFactory,
411  armnn::ActivationFunction activationFunction,
412  float activationParameterA,
413  float activationParameterB,
414  float scale,
415  int32_t offset,
416  const std::vector<float>& inputData,
417  float outScale,
418  int32_t outOffset,
419  const std::vector<float>& outputExpectedData)
420 {
421  IgnoreUnused(memoryManager);
422  constexpr static unsigned int inputWidth = 16u;
423  constexpr static unsigned int inputHeight = 1u;
424  constexpr static unsigned int inputChannels = 1u;
425  constexpr static unsigned int inputBatchSize = 1u;
426 
427  constexpr static unsigned int outputWidth = inputWidth;
428  constexpr static unsigned int outputHeight = inputHeight;
429  constexpr static unsigned int outputChannels = inputChannels;
430  constexpr static unsigned int outputBatchSize = inputBatchSize;
431 
432  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
433  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
434 
435  // Set quantization parameters if the requested type is a quantized type.
436  if(armnn::IsQuantizedType<T>())
437  {
438  inputTensorInfo.SetQuantizationScale(scale);
439  inputTensorInfo.SetQuantizationOffset(offset);
440  outputTensorInfo.SetQuantizationScale(outScale);
441  outputTensorInfo.SetQuantizationOffset(outOffset);
442  }
443 
444  LayerTestResult<T, 4> result(inputTensorInfo);
445 
446  auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
447 
448  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
449  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
450 
451  // Setup bounded ReLu.
453  armnn::WorkloadInfo workloadInfo;
454  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
455  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
456 
457  descriptor.m_Parameters.m_Function = activationFunction;
458  descriptor.m_Parameters.m_A = activationParameterA;
459  descriptor.m_Parameters.m_B = activationParameterB;
460 
461  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
462 
463  inputHandle->Allocate();
464  outputHandle->Allocate();
465 
466  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
467 
468  workload->Execute();
469 
470  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
471 
472  // Calculated manually.
473  result.outputExpected =
474  MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
475 
476  return result;
477 }
478 
479 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
481  armnn::IWorkloadFactory& workloadFactory,
483  const armnn::ITensorHandleFactory& tensorHandleFactory,
484  float qScale,
485  int32_t qOffset)
486 {
487  std::vector<float> inputData =
488  {
489  -0.1f, -0.2f, -0.3f, -0.4f,
490  0.1f, 0.2f, 0.3f, 0.4f,
491  -1.0f, -2.0f, -3.0f, -4.0f,
492  1.0f, 2.0f, 3.0f, 4.0f
493  };
494 
495  // Calculate output values for input.
496  auto f = [](float value)
497  {
498  return 1.0f / (1.0f + std::exp(-value));
499  };
500  std::vector<float> outputExpectedData(inputData.size());
501  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
502 
503  return SimpleActivationTest<ArmnnType>(workloadFactory,
504  memoryManager,
505  tensorHandleFactory,
507  0.f,
508  0.f,
509  qScale,
510  qOffset,
511  inputData,
512  1.f / 256.f,
513  0,
514  outputExpectedData);
515 }
516 
518  armnn::IWorkloadFactory& workloadFactory,
520  const armnn::ITensorHandleFactory& tensorHandleFactory)
521 {
522  return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
523  tensorHandleFactory, 0.0f, 0);
524 }
525 
527  armnn::IWorkloadFactory& workloadFactory,
529  const armnn::ITensorHandleFactory& tensorHandleFactory)
530 {
531  return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
532  tensorHandleFactory, 0.1f, 50);
533 }
534 
536  armnn::IWorkloadFactory& workloadFactory,
538  const armnn::ITensorHandleFactory& tensorHandleFactory)
539 {
540  return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
541  tensorHandleFactory, 0.1f, 0);
542 }
543 
544 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
546  armnn::IWorkloadFactory& workloadFactory,
548  const armnn::ITensorHandleFactory& tensorHandleFactory,
549  float qScale,
550  int32_t qOffset)
551 {
552  std::vector<float> inputData = {
553  -0.1f, -0.2f, -0.3f, -0.4f,
554  0.1f, 0.2f, 0.3f, 0.4f,
555  -1.0f, -2.0f, -3.0f, -4.0f,
556  1.0f, 2.0f, 3.0f, 4.0f
557  };
558 
559  // Calculate output values for input.
560  auto f = [](float value)
561  {
562  return std::fmax(0.0f, value);
563  };
564  std::vector<float> outputExpectedData(inputData.size());
565  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
566 
567  return SimpleActivationTest<ArmnnType>(workloadFactory,
568  memoryManager,
569  tensorHandleFactory,
571  0.f,
572  0.f,
573  qScale,
574  qOffset,
575  inputData,
576  qScale,
577  qOffset,
578  outputExpectedData);
579 }
580 
582  armnn::IWorkloadFactory& workloadFactory,
584  const armnn::ITensorHandleFactory& tensorHandleFactory)
585 {
586  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
587 }
588 
589 
591  armnn::IWorkloadFactory& workloadFactory,
593  const armnn::ITensorHandleFactory& tensorHandleFactory)
594 {
595  return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
596 }
597 
599  armnn::IWorkloadFactory& workloadFactory,
601  const armnn::ITensorHandleFactory& tensorHandleFactory)
602 {
603  return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
604 }
605 
606 
607 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
609  armnn::IWorkloadFactory& workloadFactory,
611  const armnn::ITensorHandleFactory& tensorHandleFactory,
612  float qScale,
613  int32_t qOffset)
614 {
615  std::vector<float> inputData = {
616  -0.1f, -0.2f, -0.3f, -0.4f,
617  0.1f, 0.2f, 0.3f, 0.4f,
618  -1.0f, -2.0f, -3.0f, -4.0f,
619  1.0f, 2.0f, 3.0f, 4.0f
620  };
621  const float a = 1.0f;
622  const float b = -1.0f;
623  // Calculate output values for input.
624  auto f = [a, b](float value)
625  {
626  return std::min(a, std::max(b, value));
627  };
628  std::vector<float> outputExpectedData(inputData.size());
629  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
630 
631  return SimpleActivationTest<ArmnnType>(workloadFactory,
632  memoryManager,
633  tensorHandleFactory,
635  a,
636  b,
637  qScale,
638  qOffset,
639  inputData,
640  qScale,
641  qOffset,
642  outputExpectedData);
643 }
644 
646  armnn::IWorkloadFactory& workloadFactory,
648  const armnn::ITensorHandleFactory& tensorHandleFactory)
649 {
650  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
651 }
652 
653 
654 
655 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
657  armnn::IWorkloadFactory& workloadFactory,
659  const armnn::ITensorHandleFactory& tensorHandleFactory,
660  float qScale,
661  int32_t qOffset)
662 {
663  std::vector<float> inputData = {
664  -0.1f, -0.2f, -0.3f, -0.4f,
665  0.1f, 0.2f, 0.3f, 0.4f,
666  -1.0f, -2.0f, -3.0f, -4.0f,
667  1.0f, 2.0f, 3.0f, 4.0f
668  };
669 
670  // Calculate output values for input.
671  auto f = [](float value)
672  {
673  return std::log(1.0f + std::exp(value));
674  };
675  std::vector<float> outputExpectedData(inputData.size());
676  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
677 
678  return SimpleActivationTest<ArmnnType>(workloadFactory,
679  memoryManager,
680  tensorHandleFactory,
682  0.f,
683  0.f,
684  qScale,
685  qOffset,
686  inputData,
687  qScale,
688  qOffset,
689  outputExpectedData);
690 }
691 
693  armnn::IWorkloadFactory& workloadFactory,
695  const armnn::ITensorHandleFactory& tensorHandleFactory)
696 {
697  return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
698 }
699 
701  armnn::IWorkloadFactory& workloadFactory,
703  const armnn::ITensorHandleFactory& tensorHandleFactory)
704 {
705  return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
706  tensorHandleFactory, 0.0625f, 64);
707 }
708 
710  armnn::IWorkloadFactory& workloadFactory,
712  const armnn::ITensorHandleFactory& tensorHandleFactory)
713 {
714  return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
715 }
716 
717 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
719  armnn::IWorkloadFactory& workloadFactory,
721  const armnn::ITensorHandleFactory& tensorHandleFactory,
722  float qScale,
723  int32_t qOffset)
724 {
725  std::vector<float> inputData = {
726  -0.1f, -0.2f, -0.3f, -0.4f,
727  0.1f, 0.2f, 0.3f, 0.4f,
728  -1.0f, -2.0f, -3.0f, -4.0f,
729  1.0f, 2.0f, 3.0f, 4.0f
730  };
731 
732  const float a = 0.01f;
733  // Calculate output values for input.
734  auto f = [a](float value)
735  {
736  return value > 0.0f ? value : (value * a);
737  };
738  std::vector<float> outputExpectedData(inputData.size());
739  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
740 
741  return SimpleActivationTest<ArmnnType>(workloadFactory,
742  memoryManager,
743  tensorHandleFactory,
745  a,
746  0.f,
747  qScale,
748  qOffset,
749  inputData,
750  qScale,
751  qOffset,
752  outputExpectedData);
753 }
754 
756  armnn::IWorkloadFactory& workloadFactory,
758  const armnn::ITensorHandleFactory& tensorHandleFactory)
759 {
760  return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
761 }
762 
764  armnn::IWorkloadFactory& workloadFactory,
766  const armnn::ITensorHandleFactory& tensorHandleFactory)
767 {
768  return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
769  tensorHandleFactory, 0.0625f, 64);
770 }
771 
773  armnn::IWorkloadFactory& workloadFactory,
775  const armnn::ITensorHandleFactory& tensorHandleFactory)
776 {
777  return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
778 }
779 
780 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
782  armnn::IWorkloadFactory& workloadFactory,
784  const armnn::ITensorHandleFactory& tensorHandleFactory,
785  float qScale,
786  int32_t qOffset)
787 {
788  std::vector<float> inputData = {
789  -0.1f, -0.2f, -0.3f, -0.4f,
790  0.1f, 0.2f, 0.3f, 0.4f,
791  -1.0f, -2.0f, -3.0f, -4.0f,
792  1.0f, 2.0f, 3.0f, 4.0f
793  };
794 
795  // Calculate output values for input.
796  auto f = [](float value)
797  {
798  return std::abs(value);
799  };
800  std::vector<float> outputExpectedData(inputData.size());
801  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
802 
803  return SimpleActivationTest<ArmnnType>(workloadFactory,
804  memoryManager,
805  tensorHandleFactory,
807  0.f,
808  0.f,
809  qScale,
810  qOffset,
811  inputData,
812  qScale,
813  qOffset,
814  outputExpectedData);
815 }
816 
818  armnn::IWorkloadFactory& workloadFactory,
820  const armnn::ITensorHandleFactory& tensorHandleFactory)
821 {
822  return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
823 }
824 
826  armnn::IWorkloadFactory& workloadFactory,
828  const armnn::ITensorHandleFactory& tensorHandleFactory)
829 {
830  return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
831 }
832 
834  armnn::IWorkloadFactory& workloadFactory,
836  const armnn::ITensorHandleFactory& tensorHandleFactory)
837 {
838  return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
839 }
840 
842  armnn::IWorkloadFactory& workloadFactory,
844  const armnn::ITensorHandleFactory& tensorHandleFactory)
845 {
846  IgnoreUnused(memoryManager);
847  const int inputDataSize = 120;
848  std::vector<float> inputData(inputDataSize);
849 
850  for (unsigned int i = 0u; i < inputDataSize; ++i)
851  {
852  inputData[i] = static_cast<float>(i) / 10;
853  }
854 
855  auto f = [](float value)
856  {
857  return std::sqrt(value);
858  };
859  std::vector<float> outputExpectedData(inputDataSize);
860  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
861 
862  armnn::TensorInfo inputTensorInfo(
863  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
864  armnn::TensorInfo outputTensorInfo(
865  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
866 
867  LayerTestResult<float, 5> result(inputTensorInfo);
868 
869  auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
870 
871  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
872  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
873 
875  armnn::WorkloadInfo workloadInfo;
876  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
877  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
878 
880 
881  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
882 
883  inputHandle->Allocate();
884  outputHandle->Allocate();
885 
886  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]);
887 
888  workload->Execute();
889 
890  CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
891 
892  // Calculated manually.
893  result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
894 
895  return result;
896 };
897 
898 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
900  armnn::IWorkloadFactory& workloadFactory,
902  const armnn::ITensorHandleFactory& tensorHandleFactory,
903  float qScale,
904  int32_t qOffset)
905 {
906  std::vector<float> inputData = {
907  0.1f, 0.2f, 0.3f, 0.4f,
908  0.1f, 0.2f, 0.3f, 0.4f,
909  1.0f, 2.0f, 3.0f, 4.0f,
910  1.0f, 2.0f, 3.0f, 4.0f
911  };
912 
913  // Calculate output values for input.
914  auto f = [](float value)
915  {
916  return std::sqrt(value);
917  };
918  std::vector<float> outputExpectedData(inputData.size());
919  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
920 
921  return SimpleActivationTest<ArmnnType>(workloadFactory,
922  memoryManager,
923  tensorHandleFactory,
925  0.f,
926  0.f,
927  qScale,
928  qOffset,
929  inputData,
930  qScale,
931  qOffset,
932  outputExpectedData);
933 }
934 
936  armnn::IWorkloadFactory& workloadFactory,
938  const armnn::ITensorHandleFactory& tensorHandleFactory)
939 {
940  return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
941 }
942 
944  armnn::IWorkloadFactory& workloadFactory,
946  const armnn::ITensorHandleFactory& tensorHandleFactory)
947 {
948  return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
949 }
950 
952  armnn::IWorkloadFactory& workloadFactory,
954  const armnn::ITensorHandleFactory& tensorHandleFactory)
955 {
956  return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
957 }
958 
959 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
961  armnn::IWorkloadFactory& workloadFactory,
963  const armnn::ITensorHandleFactory& tensorHandleFactory,
964  float qScale,
965  int32_t qOffset)
966 {
967  std::vector<float> inputData = {
968  -0.1f, -0.2f, -0.3f, -0.4f,
969  0.1f, 0.2f, 0.3f, 0.4f,
970  -1.0f, -2.0f, -3.0f, -4.0f,
971  1.0f, 2.0f, 3.0f, 4.0f
972  };
973 
974  // Calculate output values for input.
975  auto f = [](float value)
976  {
977  return std::pow(value,2);
978  };
979  std::vector<float> outputExpectedData(inputData.size());
980  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
981 
982  return SimpleActivationTest<ArmnnType>(workloadFactory,
983  memoryManager,
984  tensorHandleFactory,
986  0.f,
987  0.f,
988  qScale,
989  qOffset,
990  inputData,
991  qScale,
992  qOffset,
993  outputExpectedData);
994 }
995 
997  armnn::IWorkloadFactory& workloadFactory,
999  const armnn::ITensorHandleFactory& tensorHandleFactory)
1000 {
1001  return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1002 }
1003 
1005  armnn::IWorkloadFactory& workloadFactory,
1007  const armnn::ITensorHandleFactory& tensorHandleFactory)
1008 {
1009  return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1010  tensorHandleFactory, 0.0625f, 64);
1011 }
1012 
1014  armnn::IWorkloadFactory& workloadFactory,
1016  const armnn::ITensorHandleFactory& tensorHandleFactory)
1017 {
1018  return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1019 }
1020 
1021 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1023  armnn::IWorkloadFactory& workloadFactory,
1025  const armnn::ITensorHandleFactory& tensorHandleFactory,
1026  float qScale,
1027  int32_t qOffset)
1028 {
1029  std::vector<float> inputData = {
1030  -0.1f, -0.2f, -0.3f, -0.4f,
1031  0.1f, 0.2f, 0.3f, 0.4f,
1032  -1.0f, -2.0f, -3.0f, -4.0f,
1033  1.0f, 2.0f, 3.0f, 4.0f
1034  };
1035 
1036  const float a = 2.0f;
1037  const float b = 3.0f;
1038  // Calculate output values for input.
1039  auto f = [a, b](float value)
1040  {
1041  return a * tanhf(b * value);
1042  };
1043  std::vector<float> outputExpectedData(inputData.size());
1044  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1045 
1046  return SimpleActivationTest<ArmnnType>(workloadFactory,
1047  memoryManager,
1048  tensorHandleFactory,
1050  a,
1051  b,
1052  qScale,
1053  qOffset,
1054  inputData,
1055  qScale,
1056  qOffset,
1057  outputExpectedData);
1058 }
1059 
1061  armnn::IWorkloadFactory& workloadFactory,
1063  const armnn::ITensorHandleFactory& tensorHandleFactory)
1064 {
1065  return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1066 }
1067 
1069  armnn::IWorkloadFactory& workloadFactory,
1071  const armnn::ITensorHandleFactory& tensorHandleFactory)
1072 {
1073  return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1074 }
1075 
1077  armnn::IWorkloadFactory& workloadFactory,
1079  const armnn::ITensorHandleFactory& tensorHandleFactory)
1080 {
1081  return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1082 }
1083 
1084 
1085 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1087  armnn::IWorkloadFactory& workloadFactory,
1089  const armnn::ITensorHandleFactory& tensorHandleFactory,
1090  float qScale,
1091  int32_t qOffset)
1092 {
1093  std::vector<float> inputData = {
1094  -0.1f, -0.2f, -0.3f, -0.4f,
1095  0.1f, 0.2f, 0.3f, 0.4f,
1096  -1.0f, -2.0f, -3.0f, -4.0f,
1097  1.0f, 2.0f, 3.0f, 4.0f
1098  };
1099 
1100 
1101  const float a = 0.01f;
1102  // Calculate output values for input.
1103  auto f = [a](float value)
1104  {
1105  return (value >= 0) ? value : a * (expf(value) - 1);
1106  };
1107  std::vector<float> outputExpectedData(inputData.size());
1108  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1109 
1110  return SimpleActivationTest<ArmnnType>(workloadFactory,
1111  memoryManager,
1112  tensorHandleFactory,
1114  a,
1115  0.0f,
1116  qScale,
1117  qOffset,
1118  inputData,
1119  qScale,
1120  qOffset,
1121  outputExpectedData);
1122 }
1123 
1125  armnn::IWorkloadFactory& workloadFactory,
1127  const armnn::ITensorHandleFactory& tensorHandleFactory)
1128 {
1129  return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1130 }
1131 
1133  armnn::IWorkloadFactory& workloadFactory,
1135  const armnn::ITensorHandleFactory& tensorHandleFactory)
1136 {
1137  return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1138 }
1139 
1141  armnn::IWorkloadFactory& workloadFactory,
1143  const armnn::ITensorHandleFactory& tensorHandleFactory)
1144 {
1145  return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1146 }
1147 
1148 
1149 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1151  armnn::IWorkloadFactory& workloadFactory,
1153  const armnn::ITensorHandleFactory& tensorHandleFactory,
1154  float qScale,
1155  int32_t qOffset)
1156 {
1157  std::vector<float> inputData = {
1158  -0.1f, -0.2f, -0.3f, -0.4f,
1159  0.1f, 0.2f, 0.3f, 0.4f,
1160  -1.0f, -2.0f, -3.0f, -4.0f,
1161  1.0f, 2.0f, 3.0f, 4.0f
1162  };
1163  // Calculate output values for input.
1164  auto f = [](float x)
1165  {
1166  // Break down the calculation to help with verification.
1167  // hard_swish(x) = x * relu6(x+3) / 6
1168  // relu6(x) = min(max(x,0),6)
1169  float reLu6_step1 = std::max((x + 3),0.0f);
1170  float reLu6Complete = std::min(reLu6_step1, 6.0f);
1171  float hardSwish_step1 = x * reLu6Complete;
1172  float result = hardSwish_step1 / 6;
1173  return result;
1174  };
1175  std::vector<float> outputExpectedData(inputData.size());
1176  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1177 
1178  return SimpleActivationTest<ArmnnType>(workloadFactory,
1179  memoryManager,
1180  tensorHandleFactory,
1182  0.f,
1183  0.f,
1184  qScale,
1185  qOffset,
1186  inputData,
1187  qScale,
1188  qOffset,
1189  outputExpectedData);
1190 }
1191 
1193  armnn::IWorkloadFactory& workloadFactory,
1195  const armnn::ITensorHandleFactory& tensorHandleFactory)
1196 {
1197  return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1198 }
1199 
1201  armnn::IWorkloadFactory& workloadFactory,
1203  const armnn::ITensorHandleFactory& tensorHandleFactory)
1204 {
1205  return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1206  tensorHandleFactory, 0.1f, 64);
1207 }
1208 
1210  armnn::IWorkloadFactory& workloadFactory,
1212  const armnn::ITensorHandleFactory& tensorHandleFactory)
1213 {
1214  return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1215 }
1216 
1217 
1218 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1220  armnn::IWorkloadFactory& workloadFactory,
1222  armnn::IWorkloadFactory& refWorkloadFactory,
1223  const armnn::ITensorHandleFactory& tensorHandleFactory,
1224  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1226  unsigned int batchSize = 5,
1227  float qScale = 0.0f,
1228  int32_t qOffset = 0)
1229 {
1230  IgnoreUnused(memoryManager);
1231  unsigned int width = 17;
1232  unsigned int height = 29;
1233  unsigned int channels = 2;
1234 
1235  float a = 0.234f;
1236  float b = -12.345f;
1237 
1238  armnn::TensorInfo inputTensorInfo;
1239  armnn::TensorInfo outputTensorInfo;
1240 
1241  unsigned int shape[] = {batchSize, channels, height, width};
1242 
1243  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1244  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1245 
1246  // Set quantization parameters if the requested type is a quantized type.
1247  if(armnn::IsQuantizedType<T>())
1248  {
1249  inputTensorInfo.SetQuantizationScale(qScale);
1250  inputTensorInfo.SetQuantizationOffset(qOffset);
1251  outputTensorInfo.SetQuantizationScale(qScale);
1252  outputTensorInfo.SetQuantizationOffset(qOffset);
1253  }
1254 
1255  float minVal = -10.f;
1257  {
1258  minVal = 0.f;
1259  }
1260 
1261  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1262 
1263 
1264  LayerTestResult<T,4> ret(outputTensorInfo);
1265  auto boostArrayExtents = boost::extents
1266  [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1267  [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1268  [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1269  [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1270  ret.output.resize(boostArrayExtents);
1271  ret.outputExpected.resize(boostArrayExtents);
1272 
1273  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1274  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1275 
1276  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1277  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1278 
1280  armnn::WorkloadInfo info;
1281  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1282  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1283  data.m_Parameters.m_A = a;
1284  data.m_Parameters.m_B = b;
1285  data.m_Parameters.m_Function = f;
1286 
1287  armnn::ActivationQueueDescriptor refData = data;
1288  armnn::WorkloadInfo refInfo = info;
1289  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1290  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1291 
1292  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1293  ARMNN_ASSERT(workload != nullptr);
1294  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1295  ARMNN_ASSERT(workloadRef != nullptr);
1296 
1297  inputHandle->Allocate();
1298  outputHandle->Allocate();
1299  inputHandleRef->Allocate();
1300  outputHandleRef->Allocate();
1301 
1302  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1303  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1304 
1305  workload->Execute();
1306  workloadRef->Execute();
1307 
1308  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1309  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1310 
1311  return ret;
1312 }
1313 
1315  armnn::IWorkloadFactory& workloadFactory,
1317  armnn::IWorkloadFactory& refWorkloadFactory,
1318  const armnn::ITensorHandleFactory& tensorHandleFactory,
1319  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1321  unsigned int batchSize)
1322 {
1323  return CompareActivationTestImpl<armnn::DataType::Float32>(
1324  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1325  refTensorHandleFactory, f, batchSize);
1326 }
1327 
1329  armnn::IWorkloadFactory& workloadFactory,
1331  armnn::IWorkloadFactory& refWorkloadFactory,
1332  const armnn::ITensorHandleFactory& tensorHandleFactory,
1333  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1335 {
1336  return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1337  workloadFactory, memoryManager, refWorkloadFactory,
1338  tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1339 }
1340 
1342  armnn::IWorkloadFactory& workloadFactory,
1344  armnn::IWorkloadFactory& refWorkloadFactory,
1345  const armnn::ITensorHandleFactory& tensorHandleFactory,
1346  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1348 {
1349  return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1350  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1351  refTensorHandleFactory, f, 5, 0.1f, 0);
1352 }
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
boost::multi_array< T, n > outputExpected
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > output
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:45
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:47
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
TensorInfo GetInputTensorInfo(const Network *network)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
ActivationFunction
Definition: Types.hpp:56
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)