ArmNN
 21.08
ActivationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ActivationTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
15 
17 
18 #include <test/TensorHelpers.hpp>
19 
20 #include <algorithm>
21 
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
24  armnn::IWorkloadFactory& workloadFactory,
26  const armnn::ITensorHandleFactory& tensorHandleFactory,
27  float upperBound,
28  float lowerBound,
29  float inputScale,
30  int32_t inputOffset,
31  float outputScale,
32  int32_t outputOffset,
33  const std::vector<T>& inputData,
34  const std::vector<T>& outputExpectedData,
35  unsigned int inputWidth,
36  unsigned int inputHeight,
37  unsigned int inputChannels,
38  unsigned int inputBatchSize)
39 {
40  IgnoreUnused(memoryManager);
41  unsigned int outputWidth = inputWidth;
42  unsigned int outputHeight = inputHeight;
43  unsigned int outputChannels = inputChannels;
44  unsigned int outputBatchSize = inputBatchSize;
45 
46  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
47 
48  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
49 
50  if(armnn::IsQuantizedType<T>())
51  {
52  inputTensorInfo.SetQuantizationScale(inputScale);
53  inputTensorInfo.SetQuantizationOffset(inputOffset);
54 
55  outputTensorInfo.SetQuantizationScale(outputScale);
56  outputTensorInfo.SetQuantizationOffset(outputOffset);
57  }
58 
59  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
60 
61  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
62  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
63 
64  // Setup bounded ReLu.
66  armnn::WorkloadInfo workloadInfo;
67  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
69 
71  descriptor.m_Parameters.m_A = upperBound;
72  descriptor.m_Parameters.m_B = lowerBound;
73 
74  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
75 
76  inputHandle->Allocate();
77  outputHandle->Allocate();
78 
79  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
80 
81  workload->Execute();
82 
83  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
84 
85  return LayerTestResult<T, 4>(actualOutput,
86  outputExpectedData,
87  outputHandle->GetShape(),
88  outputTensorInfo.GetShape());
89 }
90 
92  armnn::IWorkloadFactory& workloadFactory,
94  const armnn::ITensorHandleFactory& tensorHandleFactory)
95 {
96  unsigned int inputWidth = 4u;
97  unsigned int inputHeight = 5u;
98  unsigned int inputChannels = 1u;
99  unsigned int inputBatchSize = 1;
100 
101  std::vector<float> input = std::vector<float>{
102  -2.0f, 0.1f, 0.5f, 1.25f,
103  0.786f, 0.9875f, -1.5f, 0.384f,
104  1.0001f, 3.5f, 7.5f, 0.896f,
105  2.126f, 2.0f, 0.3f, 0.15f,
106  0.999f, 1.2f, 0.89f, 6.1f,
107  };
108 
109  // Calculated manually.
110  std::vector<float> output = std::vector<float>{
111  -1.0f, 0.1f, 0.5f, 1.0f,
112  0.786f, 0.9875f, -1.0f, 0.384f,
113  1.0f, 1.0f, 1.0f, 0.896f,
114  1.0f, 1.0f, 0.3f, 0.15f,
115  0.999f, 1.0f, 0.89f, 1.0f,
116  };
117 
118  return BoundedReLuTestCommon<armnn::DataType::Float32>(
119  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
120  inputWidth, inputHeight, inputChannels, inputBatchSize);
121 }
122 
124  armnn::IWorkloadFactory& workloadFactory,
126  const armnn::ITensorHandleFactory& tensorHandleFactory)
127 {
128  unsigned int inputWidth = 4u;
129  unsigned int inputHeight = 5u;
130  unsigned int inputChannels = 1u;
131  unsigned int inputBatchSize = 1;
132 
133  std::vector<float> input = std::vector<float>{
134  -1.0f, 0.1f, 0.5f, 6.25f,
135  0.786f, 5.9875f, -0.5f, 0.384f,
136  6.0001f, 3.5f, 7.5f, 0.896f,
137  2.126f, 12.0f, 0.3f, 0.15f,
138  0.999f, 1.2f, 0.89f, 6.1f,
139  };
140 
141  // Calculated manually.
142  std::vector<float> output = std::vector<float>{
143  0.0f, 0.1f, 0.5f, 6.0f,
144  0.786f, 5.9875f, 0.0f, 0.384f,
145  6.0f, 3.5f, 6.0f, 0.896f,
146  2.126f, 6.0f, 0.3f, 0.15f,
147  0.999f, 1.2f, 0.89f, 6.0f,
148  };
149 
150  return BoundedReLuTestCommon<armnn::DataType::Float32>(
151  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
152  inputWidth, inputHeight, inputChannels, inputBatchSize);
153 }
154 
156  armnn::IWorkloadFactory& workloadFactory,
158  const armnn::ITensorHandleFactory& tensorHandleFactory)
159 {
160  unsigned int inputWidth = 3u;
161  unsigned int inputHeight = 2u;
162  unsigned int inputChannels = 1u;
163  unsigned int inputBatchSize = 1;
164 
165  std::vector<uint8_t> input = std::vector<uint8_t>{
166  51, 124, 28,
167  251, 8, 92
168  };
169 
170  // Calculated manually.
171  std::vector<uint8_t> output = std::vector<uint8_t>{
172  0, 122, 0,
173  255, 0, 58
174  };
175 
176  float inputScale = 12.0f / 255.0f;
177  int32_t inputOffset = 63;
178  float outputScale = 6.0f / 255.0f;
179  int32_t outputOffset = 0;
180 
181  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
182  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
183  inputScale, inputOffset, outputScale, outputOffset,
184  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
185 }
186 
188  armnn::IWorkloadFactory& workloadFactory,
190  const armnn::ITensorHandleFactory& tensorHandleFactory)
191 {
192  unsigned int inputWidth = 3u;
193  unsigned int inputHeight = 2u;
194  unsigned int inputChannels = 1u;
195  unsigned int inputBatchSize = 1;
196 
197  std::vector<uint8_t> input = std::vector<uint8_t>{
198  51, 230, 28,
199  251, 8, 92
200  };
201 
202  // Calculated manually.
203  std::vector<uint8_t> output = std::vector<uint8_t>{
204  51, 192, 32,
205  192, 32, 92
206  };
207 
208  int32_t inputOffset = 112;
209  float inputScale = 0.0125f;
210 
211  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
212  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
213  inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
214  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
215 }
216 
217 namespace
218 {
219 
220 struct BoundedReLuRandomInputTestTraits
221 {
222  constexpr static unsigned int inputHeight = 31u;
223  constexpr static unsigned int inputWidth = 19u;
224  constexpr static unsigned int inputChannels = 4u;
225  constexpr static unsigned int inputBatchSize = 2;
226 
227  constexpr static unsigned int outputHeight = inputHeight;
228  constexpr static unsigned int outputWidth = inputWidth;
229  constexpr static unsigned int outputChannels = inputChannels;
230  constexpr static unsigned int outputBatchSize = inputBatchSize;
231 
232  static armnn::TensorInfo GetInputTensorInfo()
233  {
234  return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
236  }
237 
238  static armnn::TensorInfo GetOutputTensorInfo()
239  {
240  return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
242  }
243 };
244 
245 std::vector<float> BoundedReLuRandomInputTest(
246  armnn::IWorkloadFactory& workloadFactory,
248  const armnn::ITensorHandleFactory& tensorHandleFactory,
249  float lowerBound,
250  float upperBound,
251  const armnn::ActivationDescriptor& activationDescriptor)
252 {
253  IgnoreUnused(memoryManager);
254  const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
255  const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
256 
257  // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
258  // range [lowerBound, upperBound].
259  std::vector<float> input = MakeRandomTensor<float>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
260  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
261 
262  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
263  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
264 
265  // Set up bounded ReLu.
267  armnn::WorkloadInfo workloadInfo;
268  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
269  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
270  descriptor.m_Parameters = activationDescriptor;
271 
272  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
273 
274  inputHandle->Allocate();
275  outputHandle->Allocate();
276 
277  CopyDataToITensorHandle(inputHandle.get(), input.data());
278 
279  workload->Execute();
280 
281  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
282 
283  return actualOutput;
284 }
285 
286 } // namespace
287 
289  armnn::IWorkloadFactory& workloadFactory,
291  armnn::IWorkloadFactory& refWorkloadFactory,
292  const armnn::ITensorHandleFactory& tensorHandleFactory,
293  const armnn::ITensorHandleFactory& refTensorHandleFactory,
294  float upperBound,
295  float lowerBound)
296 {
297  LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
298 
299  armnn::ActivationDescriptor activationDescriptor;
300  activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
301  activationDescriptor.m_A = upperBound;
302  activationDescriptor.m_B = lowerBound;
303 
304  result.m_ActualData = BoundedReLuRandomInputTest(
305  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
306  result.m_ExpectedData = BoundedReLuRandomInputTest(
307  refWorkloadFactory, nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
308 
309  return result;
310 }
311 
312 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
314  armnn::IWorkloadFactory& workloadFactory,
316  const armnn::ITensorHandleFactory& tensorHandleFactory,
317  float qScale = 0.0f,
318  int32_t qOffset = 0)
319 {
320  IgnoreUnused(memoryManager);
321  unsigned int inputHeight = 20;
322  unsigned int inputWidth = 17;
323  unsigned int inputChannels = 3;
324  unsigned int batchSize = 5;
325 
326  armnn::TensorInfo inputTensorInfo;
327  armnn::TensorInfo outputTensorInfo;
328 
329  unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
330 
331  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
332  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
333 
334  // Set quantization parameters if the requested type is a quantized type.
335  if(armnn::IsQuantizedType<T>())
336  {
337  inputTensorInfo.SetQuantizationScale(qScale);
338  inputTensorInfo.SetQuantizationOffset(qOffset);
339  outputTensorInfo.SetQuantizationScale(qScale);
340  outputTensorInfo.SetQuantizationOffset(qOffset);
341  }
342 
343  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
344  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
345 
346  // Do linear activation that should leave the tensor unchanged.
348  armnn::WorkloadInfo info;
349  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
350  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
351  data.m_Parameters.m_A = 1.0f;
352  data.m_Parameters.m_B = 0.0f;
354 
355  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
356 
357  inputHandle->Allocate();
358  outputHandle->Allocate();
359 
360  std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 7123561);
361  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
362 
363  CopyDataToITensorHandle(inputHandle.get(), input.data());
364 
365  workload->Execute();
366 
367  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
368 
369  // Use input as ExpectedData as tensor doesn't change.
370  return LayerTestResult<T, 4>(actualOutput,
371  input,
372  outputHandle->GetShape(),
373  outputTensorInfo.GetShape());
374 }
375 
377  armnn::IWorkloadFactory& workloadFactory,
379  const armnn::ITensorHandleFactory& tensorHandleFactory)
380 {
381  return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
382  memoryManager,
383  tensorHandleFactory);
384 }
385 
387  armnn::IWorkloadFactory& workloadFactory,
389  const armnn::ITensorHandleFactory& tensorHandleFactory)
390 {
391  return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
392  workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
393 }
394 
396  armnn::IWorkloadFactory& workloadFactory,
398  const armnn::ITensorHandleFactory& tensorHandleFactory)
399 {
400  return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
401  workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
402 }
403 
404 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
406  armnn::IWorkloadFactory& workloadFactory,
408  const armnn::ITensorHandleFactory& tensorHandleFactory,
409  armnn::ActivationFunction activationFunction,
410  float activationParameterA,
411  float activationParameterB,
412  float scale,
413  int32_t offset,
414  const std::vector<float>& inputData,
415  float outScale,
416  int32_t outOffset,
417  const std::vector<float>& outputExpectedData)
418 {
419  IgnoreUnused(memoryManager);
420  constexpr static unsigned int inputWidth = 16u;
421  constexpr static unsigned int inputHeight = 1u;
422  constexpr static unsigned int inputChannels = 1u;
423  constexpr static unsigned int inputBatchSize = 1u;
424 
425  constexpr static unsigned int outputWidth = inputWidth;
426  constexpr static unsigned int outputHeight = inputHeight;
427  constexpr static unsigned int outputChannels = inputChannels;
428  constexpr static unsigned int outputBatchSize = inputBatchSize;
429 
430  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
431  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
432 
433  // Set quantization parameters if the requested type is a quantized type.
434  if(armnn::IsQuantizedType<T>())
435  {
436  inputTensorInfo.SetQuantizationScale(scale);
437  inputTensorInfo.SetQuantizationOffset(offset);
438  outputTensorInfo.SetQuantizationScale(outScale);
439  outputTensorInfo.SetQuantizationOffset(outOffset);
440  }
441 
442  std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, scale, offset);
443 
444  // Calculated outputExpected manually.
445  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
446  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset);
447 
448  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
449  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
450 
451  // Setup bounded ReLu.
453  armnn::WorkloadInfo workloadInfo;
454  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
455  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
456 
457  descriptor.m_Parameters.m_Function = activationFunction;
458  descriptor.m_Parameters.m_A = activationParameterA;
459  descriptor.m_Parameters.m_B = activationParameterB;
460 
461  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
462 
463  inputHandle->Allocate();
464  outputHandle->Allocate();
465 
466  CopyDataToITensorHandle(inputHandle.get(), input.data());
467 
468  workload->Execute();
469 
470  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
471 
472  return LayerTestResult<T, 4>(actualOutput,
473  outputExpected,
474  outputHandle->GetShape(),
475  outputTensorInfo.GetShape());
476 }
477 
478 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
480  armnn::IWorkloadFactory& workloadFactory,
482  const armnn::ITensorHandleFactory& tensorHandleFactory,
483  float qScale,
484  int32_t qOffset)
485 {
486  std::vector<float> inputData =
487  {
488  -0.1f, -0.2f, -0.3f, -0.4f,
489  0.1f, 0.2f, 0.3f, 0.4f,
490  -1.0f, -2.0f, -3.0f, -4.0f,
491  1.0f, 2.0f, 3.0f, 4.0f
492  };
493 
494  // Calculate output values for input.
495  auto f = [](float value)
496  {
497  return 1.0f / (1.0f + std::exp(-value));
498  };
499  std::vector<float> m_OutputExpected(inputData.size());
500  std::transform(inputData.begin(), inputData.end(), m_OutputExpected.begin(), f);
501 
502  return SimpleActivationTest<ArmnnType>(workloadFactory,
503  memoryManager,
504  tensorHandleFactory,
506  0.f,
507  0.f,
508  qScale,
509  qOffset,
510  inputData,
511  1.f / 256.f,
512  0,
513  m_OutputExpected);
514 }
515 
517  armnn::IWorkloadFactory& workloadFactory,
519  const armnn::ITensorHandleFactory& tensorHandleFactory)
520 {
521  return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
522  tensorHandleFactory, 0.0f, 0);
523 }
524 
526  armnn::IWorkloadFactory& workloadFactory,
528  const armnn::ITensorHandleFactory& tensorHandleFactory)
529 {
530  return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
531  tensorHandleFactory, 0.1f, 50);
532 }
533 
535  armnn::IWorkloadFactory& workloadFactory,
537  const armnn::ITensorHandleFactory& tensorHandleFactory)
538 {
539  return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
540  tensorHandleFactory, 0.1f, 0);
541 }
542 
543 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
545  armnn::IWorkloadFactory& workloadFactory,
547  const armnn::ITensorHandleFactory& tensorHandleFactory,
548  float qScale,
549  int32_t qOffset)
550 {
551  std::vector<float> inputData = {
552  -0.1f, -0.2f, -0.3f, -0.4f,
553  0.1f, 0.2f, 0.3f, 0.4f,
554  -1.0f, -2.0f, -3.0f, -4.0f,
555  1.0f, 2.0f, 3.0f, 4.0f
556  };
557 
558  // Calculate output values for input.
559  auto f = [](float value)
560  {
561  return std::fmax(0.0f, value);
562  };
563  std::vector<float> outputExpected(inputData.size());
564  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
565 
566  return SimpleActivationTest<ArmnnType>(workloadFactory,
567  memoryManager,
568  tensorHandleFactory,
570  0.f,
571  0.f,
572  qScale,
573  qOffset,
574  inputData,
575  qScale,
576  qOffset,
577  outputExpected);
578 }
579 
581  armnn::IWorkloadFactory& workloadFactory,
583  const armnn::ITensorHandleFactory& tensorHandleFactory)
584 {
585  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
586 }
587 
588 
590  armnn::IWorkloadFactory& workloadFactory,
592  const armnn::ITensorHandleFactory& tensorHandleFactory)
593 {
594  return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
595 }
596 
598  armnn::IWorkloadFactory& workloadFactory,
600  const armnn::ITensorHandleFactory& tensorHandleFactory)
601 {
602  return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
603 }
604 
605 
606 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
608  armnn::IWorkloadFactory& workloadFactory,
610  const armnn::ITensorHandleFactory& tensorHandleFactory,
611  float qScale,
612  int32_t qOffset)
613 {
614  std::vector<float> inputData = {
615  -0.1f, -0.2f, -0.3f, -0.4f,
616  0.1f, 0.2f, 0.3f, 0.4f,
617  -1.0f, -2.0f, -3.0f, -4.0f,
618  1.0f, 2.0f, 3.0f, 4.0f
619  };
620  const float a = 1.0f;
621  const float b = -1.0f;
622  // Calculate output values for input.
623  auto f = [a, b](float value)
624  {
625  return std::min(a, std::max(b, value));
626  };
627  std::vector<float> outputExpected(inputData.size());
628  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
629 
630  return SimpleActivationTest<ArmnnType>(workloadFactory,
631  memoryManager,
632  tensorHandleFactory,
634  a,
635  b,
636  qScale,
637  qOffset,
638  inputData,
639  qScale,
640  qOffset,
641  outputExpected);
642 }
643 
645  armnn::IWorkloadFactory& workloadFactory,
647  const armnn::ITensorHandleFactory& tensorHandleFactory)
648 {
649  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
650 }
651 
652 
653 
654 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
656  armnn::IWorkloadFactory& workloadFactory,
658  const armnn::ITensorHandleFactory& tensorHandleFactory,
659  float qScale,
660  int32_t qOffset)
661 {
662  std::vector<float> inputData = {
663  -0.1f, -0.2f, -0.3f, -0.4f,
664  0.1f, 0.2f, 0.3f, 0.4f,
665  -1.0f, -2.0f, -3.0f, -4.0f,
666  1.0f, 2.0f, 3.0f, 4.0f
667  };
668 
669  // Calculate output values for input.
670  auto f = [](float value)
671  {
672  return std::log(1.0f + std::exp(value));
673  };
674  std::vector<float> outputExpected(inputData.size());
675  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
676 
677  return SimpleActivationTest<ArmnnType>(workloadFactory,
678  memoryManager,
679  tensorHandleFactory,
681  0.f,
682  0.f,
683  qScale,
684  qOffset,
685  inputData,
686  qScale,
687  qOffset,
688  outputExpected);
689 }
690 
692  armnn::IWorkloadFactory& workloadFactory,
694  const armnn::ITensorHandleFactory& tensorHandleFactory)
695 {
696  return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
697 }
698 
700  armnn::IWorkloadFactory& workloadFactory,
702  const armnn::ITensorHandleFactory& tensorHandleFactory)
703 {
704  return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
705  tensorHandleFactory, 0.0625f, 64);
706 }
707 
709  armnn::IWorkloadFactory& workloadFactory,
711  const armnn::ITensorHandleFactory& tensorHandleFactory)
712 {
713  return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
714 }
715 
716 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
718  armnn::IWorkloadFactory& workloadFactory,
720  const armnn::ITensorHandleFactory& tensorHandleFactory,
721  float qScale,
722  int32_t qOffset)
723 {
724  std::vector<float> inputData = {
725  -0.1f, -0.2f, -0.3f, -0.4f,
726  0.1f, 0.2f, 0.3f, 0.4f,
727  -1.0f, -2.0f, -3.0f, -4.0f,
728  1.0f, 2.0f, 3.0f, 4.0f
729  };
730 
731  const float a = 0.01f;
732  // Calculate output values for input.
733  auto f = [a](float value)
734  {
735  return value > 0.0f ? value : (value * a);
736  };
737  std::vector<float> outputExpected(inputData.size());
738  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
739 
740  return SimpleActivationTest<ArmnnType>(workloadFactory,
741  memoryManager,
742  tensorHandleFactory,
744  a,
745  0.f,
746  qScale,
747  qOffset,
748  inputData,
749  qScale,
750  qOffset,
751  outputExpected);
752 }
753 
755  armnn::IWorkloadFactory& workloadFactory,
757  const armnn::ITensorHandleFactory& tensorHandleFactory)
758 {
759  return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
760 }
761 
763  armnn::IWorkloadFactory& workloadFactory,
765  const armnn::ITensorHandleFactory& tensorHandleFactory)
766 {
767  return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
768  tensorHandleFactory, 0.0625f, 64);
769 }
770 
772  armnn::IWorkloadFactory& workloadFactory,
774  const armnn::ITensorHandleFactory& tensorHandleFactory)
775 {
776  return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
777 }
778 
779 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
781  armnn::IWorkloadFactory& workloadFactory,
783  const armnn::ITensorHandleFactory& tensorHandleFactory,
784  float qScale,
785  int32_t qOffset)
786 {
787  std::vector<float> inputData = {
788  -0.1f, -0.2f, -0.3f, -0.4f,
789  0.1f, 0.2f, 0.3f, 0.4f,
790  -1.0f, -2.0f, -3.0f, -4.0f,
791  1.0f, 2.0f, 3.0f, 4.0f
792  };
793 
794  // Calculate output values for input.
795  auto f = [](float value)
796  {
797  return std::abs(value);
798  };
799  std::vector<float> outputExpected(inputData.size());
800  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
801 
802  return SimpleActivationTest<ArmnnType>(workloadFactory,
803  memoryManager,
804  tensorHandleFactory,
806  0.f,
807  0.f,
808  qScale,
809  qOffset,
810  inputData,
811  qScale,
812  qOffset,
813  outputExpected);
814 }
815 
817  armnn::IWorkloadFactory& workloadFactory,
819  const armnn::ITensorHandleFactory& tensorHandleFactory)
820 {
821  return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
822 }
823 
825  armnn::IWorkloadFactory& workloadFactory,
827  const armnn::ITensorHandleFactory& tensorHandleFactory)
828 {
829  return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
830 }
831 
833  armnn::IWorkloadFactory& workloadFactory,
835  const armnn::ITensorHandleFactory& tensorHandleFactory)
836 {
837  return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
838 }
839 
841  armnn::IWorkloadFactory& workloadFactory,
843  const armnn::ITensorHandleFactory& tensorHandleFactory)
844 {
845  IgnoreUnused(memoryManager);
846  const int inputDataSize = 120;
847  std::vector<float> inputData(inputDataSize);
848 
849  for (unsigned int i = 0u; i < inputDataSize; ++i)
850  {
851  inputData[i] = static_cast<float>(i) / 10;
852  }
853 
854  auto f = [](float value)
855  {
856  return std::sqrt(value);
857  };
858  std::vector<float> expectedOutput(inputDataSize);
859  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
860 
861  armnn::TensorInfo inputTensorInfo(
862  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
863  armnn::TensorInfo outputTensorInfo(
864  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
865 
866  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
867 
868  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
869  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
870 
872  armnn::WorkloadInfo workloadInfo;
873  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
874  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
875 
877 
878  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
879 
880  inputHandle->Allocate();
881  outputHandle->Allocate();
882 
883  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
884 
885  workload->Execute();
886 
887  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
888 
889  return LayerTestResult<float, 5>(actualOutput,
890  expectedOutput,
891  outputHandle->GetShape(),
892  outputTensorInfo.GetShape());
893 };
894 
895 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
897  armnn::IWorkloadFactory& workloadFactory,
899  const armnn::ITensorHandleFactory& tensorHandleFactory,
900  float qScale,
901  int32_t qOffset)
902 {
903  std::vector<float> inputData = {
904  0.1f, 0.2f, 0.3f, 0.4f,
905  0.1f, 0.2f, 0.3f, 0.4f,
906  1.0f, 2.0f, 3.0f, 4.0f,
907  1.0f, 2.0f, 3.0f, 4.0f
908  };
909 
910  // Calculate output values for input.
911  auto f = [](float value)
912  {
913  return std::sqrt(value);
914  };
915  std::vector<float> expectedOutput(inputData.size());
916  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
917 
918  return SimpleActivationTest<ArmnnType>(workloadFactory,
919  memoryManager,
920  tensorHandleFactory,
922  0.f,
923  0.f,
924  qScale,
925  qOffset,
926  inputData,
927  qScale,
928  qOffset,
929  expectedOutput);
930 }
931 
933  armnn::IWorkloadFactory& workloadFactory,
935  const armnn::ITensorHandleFactory& tensorHandleFactory)
936 {
937  return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
938 }
939 
941  armnn::IWorkloadFactory& workloadFactory,
943  const armnn::ITensorHandleFactory& tensorHandleFactory)
944 {
945  return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
946 }
947 
949  armnn::IWorkloadFactory& workloadFactory,
951  const armnn::ITensorHandleFactory& tensorHandleFactory)
952 {
953  return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
954 }
955 
956 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
958  armnn::IWorkloadFactory& workloadFactory,
960  const armnn::ITensorHandleFactory& tensorHandleFactory,
961  float qScale,
962  int32_t qOffset)
963 {
964  std::vector<float> inputData = {
965  -0.1f, -0.2f, -0.3f, -0.4f,
966  0.1f, 0.2f, 0.3f, 0.4f,
967  -1.0f, -2.0f, -3.0f, -4.0f,
968  1.0f, 2.0f, 3.0f, 4.0f
969  };
970 
971  // Calculate output values for input.
972  auto f = [](float value)
973  {
974  return std::pow(value,2);
975  };
976  std::vector<float> expectedOutput(inputData.size());
977  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
978 
979  return SimpleActivationTest<ArmnnType>(workloadFactory,
980  memoryManager,
981  tensorHandleFactory,
983  0.f,
984  0.f,
985  qScale,
986  qOffset,
987  inputData,
988  qScale,
989  qOffset,
990  expectedOutput);
991 }
992 
994  armnn::IWorkloadFactory& workloadFactory,
996  const armnn::ITensorHandleFactory& tensorHandleFactory)
997 {
998  return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
999 }
1000 
1002  armnn::IWorkloadFactory& workloadFactory,
1004  const armnn::ITensorHandleFactory& tensorHandleFactory)
1005 {
1006  return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1007  tensorHandleFactory, 0.0625f, 64);
1008 }
1009 
1011  armnn::IWorkloadFactory& workloadFactory,
1013  const armnn::ITensorHandleFactory& tensorHandleFactory)
1014 {
1015  return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1016 }
1017 
1018 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1020  armnn::IWorkloadFactory& workloadFactory,
1022  const armnn::ITensorHandleFactory& tensorHandleFactory,
1023  float qScale,
1024  int32_t qOffset)
1025 {
1026  std::vector<float> inputData = {
1027  -0.1f, -0.2f, -0.3f, -0.4f,
1028  0.1f, 0.2f, 0.3f, 0.4f,
1029  -1.0f, -2.0f, -3.0f, -4.0f,
1030  1.0f, 2.0f, 3.0f, 4.0f
1031  };
1032 
1033  const float a = 2.0f;
1034  const float b = 3.0f;
1035  // Calculate output values for input.
1036  auto f = [a, b](float value)
1037  {
1038  return a * tanhf(b * value);
1039  };
1040  std::vector<float> expectedOutput(inputData.size());
1041  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1042 
1043  return SimpleActivationTest<ArmnnType>(workloadFactory,
1044  memoryManager,
1045  tensorHandleFactory,
1047  a,
1048  b,
1049  qScale,
1050  qOffset,
1051  inputData,
1052  qScale,
1053  qOffset,
1054  expectedOutput);
1055 }
1056 
1058  armnn::IWorkloadFactory& workloadFactory,
1060  const armnn::ITensorHandleFactory& tensorHandleFactory)
1061 {
1062  return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1063 }
1064 
1066  armnn::IWorkloadFactory& workloadFactory,
1068  const armnn::ITensorHandleFactory& tensorHandleFactory)
1069 {
1070  return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1071 }
1072 
1074  armnn::IWorkloadFactory& workloadFactory,
1076  const armnn::ITensorHandleFactory& tensorHandleFactory)
1077 {
1078  return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1079 }
1080 
1081 
1082 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1084  armnn::IWorkloadFactory& workloadFactory,
1086  const armnn::ITensorHandleFactory& tensorHandleFactory,
1087  float qScale,
1088  int32_t qOffset)
1089 {
1090  std::vector<float> inputData = {
1091  -0.1f, -0.2f, -0.3f, -0.4f,
1092  0.1f, 0.2f, 0.3f, 0.4f,
1093  -1.0f, -2.0f, -3.0f, -4.0f,
1094  1.0f, 2.0f, 3.0f, 4.0f
1095  };
1096 
1097 
1098  const float a = 0.01f;
1099  // Calculate output values for input.
1100  auto f = [a](float value)
1101  {
1102  return (value >= 0) ? value : a * (expf(value) - 1);
1103  };
1104  std::vector<float> expectedOutput(inputData.size());
1105  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1106 
1107  return SimpleActivationTest<ArmnnType>(workloadFactory,
1108  memoryManager,
1109  tensorHandleFactory,
1111  a,
1112  0.0f,
1113  qScale,
1114  qOffset,
1115  inputData,
1116  qScale,
1117  qOffset,
1118  expectedOutput);
1119 }
1120 
1122  armnn::IWorkloadFactory& workloadFactory,
1124  const armnn::ITensorHandleFactory& tensorHandleFactory)
1125 {
1126  return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1127 }
1128 
1130  armnn::IWorkloadFactory& workloadFactory,
1132  const armnn::ITensorHandleFactory& tensorHandleFactory)
1133 {
1134  return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1135 }
1136 
1138  armnn::IWorkloadFactory& workloadFactory,
1140  const armnn::ITensorHandleFactory& tensorHandleFactory)
1141 {
1142  return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1143 }
1144 
1145 
1146 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1148  armnn::IWorkloadFactory& workloadFactory,
1150  const armnn::ITensorHandleFactory& tensorHandleFactory,
1151  float qScale,
1152  int32_t qOffset)
1153 {
1154  std::vector<float> inputData = {
1155  -0.1f, -0.2f, -0.3f, -0.4f,
1156  0.1f, 0.2f, 0.3f, 0.4f,
1157  -1.0f, -2.0f, -3.0f, -4.0f,
1158  1.0f, 2.0f, 3.0f, 4.0f
1159  };
1160  // Calculate output values for input.
1161  auto f = [](float x)
1162  {
1163  // Break down the calculation to help with verification.
1164  // hard_swish(x) = x * relu6(x+3) / 6
1165  // relu6(x) = min(max(x,0),6)
1166  float reLu6_step1 = std::max((x + 3),0.0f);
1167  float reLu6Complete = std::min(reLu6_step1, 6.0f);
1168  float hardSwish_step1 = x * reLu6Complete;
1169  float result = hardSwish_step1 / 6;
1170  return result;
1171  };
1172  std::vector<float> expectedOutput(inputData.size());
1173  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1174 
1175  return SimpleActivationTest<ArmnnType>(workloadFactory,
1176  memoryManager,
1177  tensorHandleFactory,
1179  0.f,
1180  0.f,
1181  qScale,
1182  qOffset,
1183  inputData,
1184  qScale,
1185  qOffset,
1186  expectedOutput);
1187 }
1188 
1190  armnn::IWorkloadFactory& workloadFactory,
1192  const armnn::ITensorHandleFactory& tensorHandleFactory)
1193 {
1194  return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1195 }
1196 
1198  armnn::IWorkloadFactory& workloadFactory,
1200  const armnn::ITensorHandleFactory& tensorHandleFactory)
1201 {
1202  return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1203  tensorHandleFactory, 0.1f, 64);
1204 }
1205 
1207  armnn::IWorkloadFactory& workloadFactory,
1209  const armnn::ITensorHandleFactory& tensorHandleFactory)
1210 {
1211  return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1212 }
1213 
1214 
1215 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1217  armnn::IWorkloadFactory& workloadFactory,
1219  armnn::IWorkloadFactory& refWorkloadFactory,
1220  const armnn::ITensorHandleFactory& tensorHandleFactory,
1221  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1223  unsigned int batchSize = 5,
1224  float qScale = 0.0f,
1225  int32_t qOffset = 0)
1226 {
1227  IgnoreUnused(memoryManager);
1228  unsigned int width = 17;
1229  unsigned int height = 29;
1230  unsigned int channels = 2;
1231 
1232  float a = 0.234f;
1233  float b = -12.345f;
1234 
1235  armnn::TensorInfo inputTensorInfo;
1236  armnn::TensorInfo outputTensorInfo;
1237 
1238  unsigned int shape[] = {batchSize, channels, height, width};
1239 
1240  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1241  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1242 
1243  // Set quantization parameters if the requested type is a quantized type.
1244  if(armnn::IsQuantizedType<T>())
1245  {
1246  inputTensorInfo.SetQuantizationScale(qScale);
1247  inputTensorInfo.SetQuantizationOffset(qOffset);
1248  outputTensorInfo.SetQuantizationScale(qScale);
1249  outputTensorInfo.SetQuantizationOffset(qOffset);
1250  }
1251 
1252  float minVal = -10.f;
1254  {
1255  minVal = 0.f;
1256  }
1257 
1258  std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 21453, minVal, 10.f);
1259  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1260  std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
1261 
1262  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1263  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1264 
1265  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1266  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1267 
1269  armnn::WorkloadInfo info;
1270  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1271  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1272  data.m_Parameters.m_A = a;
1273  data.m_Parameters.m_B = b;
1274  data.m_Parameters.m_Function = f;
1275 
1276  armnn::ActivationQueueDescriptor refData = data;
1277  armnn::WorkloadInfo refInfo = info;
1278  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1279  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1280 
1281  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1282  ARMNN_ASSERT(workload != nullptr);
1283  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1284  ARMNN_ASSERT(workloadRef != nullptr);
1285 
1286  inputHandle->Allocate();
1287  outputHandle->Allocate();
1288  inputHandleRef->Allocate();
1289  outputHandleRef->Allocate();
1290 
1291  CopyDataToITensorHandle(inputHandle.get(), input.data());
1292  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
1293 
1294  workload->Execute();
1295  workloadRef->Execute();
1296 
1297  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1298  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
1299 
1300  return LayerTestResult<T, 4>(actualOutput,
1301  expectedOutput,
1302  outputHandle->GetShape(),
1303  outputTensorInfo.GetShape());
1304 
1305 }
1306 
1308  armnn::IWorkloadFactory& workloadFactory,
1310  armnn::IWorkloadFactory& refWorkloadFactory,
1311  const armnn::ITensorHandleFactory& tensorHandleFactory,
1312  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1314  unsigned int batchSize)
1315 {
1316  return CompareActivationTestImpl<armnn::DataType::Float32>(
1317  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1318  refTensorHandleFactory, f, batchSize);
1319 }
1320 
1322  armnn::IWorkloadFactory& workloadFactory,
1324  armnn::IWorkloadFactory& refWorkloadFactory,
1325  const armnn::ITensorHandleFactory& tensorHandleFactory,
1326  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1328 {
1329  return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1330  workloadFactory, memoryManager, refWorkloadFactory,
1331  tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1332 }
1333 
1335  armnn::IWorkloadFactory& workloadFactory,
1337  armnn::IWorkloadFactory& refWorkloadFactory,
1338  const armnn::ITensorHandleFactory& tensorHandleFactory,
1339  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1341 {
1342  return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1343  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1344  refTensorHandleFactory, f, 5, 0.1f, 0);
1345 }
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< T > m_ExpectedData
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
std::vector< T > m_ActualData
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
ActivationFunction
Definition: Types.hpp:66
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)