ArmNN
 22.08
ActivationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ActivationTestImpl.hpp"
7 
9 #include <ResolveType.hpp>
10 
15 
17 
19 
20 #include <algorithm>
21 
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
24  armnn::IWorkloadFactory& workloadFactory,
26  const armnn::ITensorHandleFactory& tensorHandleFactory,
27  float upperBound,
28  float lowerBound,
29  float inputScale,
30  int32_t inputOffset,
31  float outputScale,
32  int32_t outputOffset,
33  const std::vector<T>& inputData,
34  const std::vector<T>& outputExpectedData,
35  unsigned int inputWidth,
36  unsigned int inputHeight,
37  unsigned int inputChannels,
38  unsigned int inputBatchSize)
39 {
40  IgnoreUnused(memoryManager);
41  unsigned int outputWidth = inputWidth;
42  unsigned int outputHeight = inputHeight;
43  unsigned int outputChannels = inputChannels;
44  unsigned int outputBatchSize = inputBatchSize;
45 
46  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
47 
48  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
49 
50  if(armnn::IsQuantizedType<T>())
51  {
52  inputTensorInfo.SetQuantizationScale(inputScale);
53  inputTensorInfo.SetQuantizationOffset(inputOffset);
54 
55  outputTensorInfo.SetQuantizationScale(outputScale);
56  outputTensorInfo.SetQuantizationOffset(outputOffset);
57  }
58 
59  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
60 
61  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
62  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
63 
64  // Setup bounded ReLu.
66  armnn::WorkloadInfo workloadInfo;
67  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
69 
71  descriptor.m_Parameters.m_A = upperBound;
72  descriptor.m_Parameters.m_B = lowerBound;
73 
74  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
75  descriptor, workloadInfo);
76 
77  inputHandle->Allocate();
78  outputHandle->Allocate();
79 
80  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
81 
82  workload->Execute();
83 
84  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
85 
86  return LayerTestResult<T, 4>(actualOutput,
87  outputExpectedData,
88  outputHandle->GetShape(),
89  outputTensorInfo.GetShape());
90 }
91 
93  armnn::IWorkloadFactory& workloadFactory,
95  const armnn::ITensorHandleFactory& tensorHandleFactory)
96 {
97  unsigned int inputWidth = 4u;
98  unsigned int inputHeight = 5u;
99  unsigned int inputChannels = 1u;
100  unsigned int inputBatchSize = 1;
101 
102  std::vector<float> input = std::vector<float>{
103  -2.0f, 0.1f, 0.5f, 1.25f,
104  0.786f, 0.9875f, -1.5f, 0.384f,
105  1.0001f, 3.5f, 7.5f, 0.896f,
106  2.126f, 2.0f, 0.3f, 0.15f,
107  0.999f, 1.2f, 0.89f, 6.1f,
108  };
109 
110  // Calculated manually.
111  std::vector<float> output = std::vector<float>{
112  -1.0f, 0.1f, 0.5f, 1.0f,
113  0.786f, 0.9875f, -1.0f, 0.384f,
114  1.0f, 1.0f, 1.0f, 0.896f,
115  1.0f, 1.0f, 0.3f, 0.15f,
116  0.999f, 1.0f, 0.89f, 1.0f,
117  };
118 
119  return BoundedReLuTestCommon<armnn::DataType::Float32>(
120  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
121  inputWidth, inputHeight, inputChannels, inputBatchSize);
122 }
123 
125  armnn::IWorkloadFactory& workloadFactory,
127  const armnn::ITensorHandleFactory& tensorHandleFactory)
128 {
129  unsigned int inputWidth = 4u;
130  unsigned int inputHeight = 5u;
131  unsigned int inputChannels = 1u;
132  unsigned int inputBatchSize = 1;
133 
134  std::vector<float> input = std::vector<float>{
135  -1.0f, 0.1f, 0.5f, 6.25f,
136  0.786f, 5.9875f, -0.5f, 0.384f,
137  6.0001f, 3.5f, 7.5f, 0.896f,
138  2.126f, 12.0f, 0.3f, 0.15f,
139  0.999f, 1.2f, 0.89f, 6.1f,
140  };
141 
142  // Calculated manually.
143  std::vector<float> output = std::vector<float>{
144  0.0f, 0.1f, 0.5f, 6.0f,
145  0.786f, 5.9875f, 0.0f, 0.384f,
146  6.0f, 3.5f, 6.0f, 0.896f,
147  2.126f, 6.0f, 0.3f, 0.15f,
148  0.999f, 1.2f, 0.89f, 6.0f,
149  };
150 
151  return BoundedReLuTestCommon<armnn::DataType::Float32>(
152  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
153  inputWidth, inputHeight, inputChannels, inputBatchSize);
154 }
155 
157  armnn::IWorkloadFactory& workloadFactory,
159  const armnn::ITensorHandleFactory& tensorHandleFactory)
160 {
161  unsigned int inputWidth = 3u;
162  unsigned int inputHeight = 2u;
163  unsigned int inputChannels = 1u;
164  unsigned int inputBatchSize = 1;
165 
166  std::vector<uint8_t> input = std::vector<uint8_t>{
167  51, 124, 28,
168  251, 8, 92
169  };
170 
171  // Calculated manually.
172  std::vector<uint8_t> output = std::vector<uint8_t>{
173  0, 122, 0,
174  255, 0, 58
175  };
176 
177  float inputScale = 12.0f / 255.0f;
178  int32_t inputOffset = 63;
179  float outputScale = 6.0f / 255.0f;
180  int32_t outputOffset = 0;
181 
182  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
183  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
184  inputScale, inputOffset, outputScale, outputOffset,
185  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
186 }
187 
189  armnn::IWorkloadFactory& workloadFactory,
191  const armnn::ITensorHandleFactory& tensorHandleFactory)
192 {
193  unsigned int inputWidth = 3u;
194  unsigned int inputHeight = 2u;
195  unsigned int inputChannels = 1u;
196  unsigned int inputBatchSize = 1;
197 
198  std::vector<uint8_t> input = std::vector<uint8_t>{
199  51, 230, 28,
200  251, 8, 92
201  };
202 
203  // Calculated manually.
204  std::vector<uint8_t> output = std::vector<uint8_t>{
205  51, 192, 32,
206  192, 32, 92
207  };
208 
209  int32_t inputOffset = 112;
210  float inputScale = 0.0125f;
211 
212  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
213  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
214  inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
215  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
216 }
217 
218 namespace
219 {
220 
221 struct BoundedReLuRandomInputTestTraits
222 {
223  constexpr static unsigned int inputHeight = 31u;
224  constexpr static unsigned int inputWidth = 19u;
225  constexpr static unsigned int inputChannels = 4u;
226  constexpr static unsigned int inputBatchSize = 2;
227 
228  constexpr static unsigned int outputHeight = inputHeight;
229  constexpr static unsigned int outputWidth = inputWidth;
230  constexpr static unsigned int outputChannels = inputChannels;
231  constexpr static unsigned int outputBatchSize = inputBatchSize;
232 
233  static armnn::TensorInfo GetInputTensorInfo()
234  {
235  return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
237  }
238 
239  static armnn::TensorInfo GetOutputTensorInfo()
240  {
241  return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
243  }
244 };
245 
246 std::vector<float> BoundedReLuRandomInputTest(
247  armnn::IWorkloadFactory& workloadFactory,
249  const armnn::ITensorHandleFactory& tensorHandleFactory,
250  float lowerBound,
251  float upperBound,
252  const armnn::ActivationDescriptor& activationDescriptor)
253 {
254  IgnoreUnused(memoryManager);
255  const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
256  const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
257 
258  // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
259  // range [lowerBound, upperBound].
260  std::vector<float> input = MakeRandomTensor<float>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
261  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
262 
263  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
264  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
265 
266  // Set up bounded ReLu.
268  armnn::WorkloadInfo workloadInfo;
269  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
270  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
271  descriptor.m_Parameters = activationDescriptor;
272 
273  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
274  descriptor, workloadInfo);
275 
276  inputHandle->Allocate();
277  outputHandle->Allocate();
278 
279  CopyDataToITensorHandle(inputHandle.get(), input.data());
280 
281  workload->Execute();
282 
283  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
284 
285  return actualOutput;
286 }
287 
288 } // namespace
289 
291  armnn::IWorkloadFactory& workloadFactory,
293  armnn::IWorkloadFactory& refWorkloadFactory,
294  const armnn::ITensorHandleFactory& tensorHandleFactory,
295  const armnn::ITensorHandleFactory& refTensorHandleFactory,
296  float upperBound,
297  float lowerBound)
298 {
299  LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
300 
301  armnn::ActivationDescriptor activationDescriptor;
302  activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
303  activationDescriptor.m_A = upperBound;
304  activationDescriptor.m_B = lowerBound;
305 
306  result.m_ActualData = BoundedReLuRandomInputTest(
307  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
308  result.m_ExpectedData = BoundedReLuRandomInputTest(
309  refWorkloadFactory, nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
310 
311  return result;
312 }
313 
314 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
316  armnn::IWorkloadFactory& workloadFactory,
318  const armnn::ITensorHandleFactory& tensorHandleFactory,
319  float qScale = 0.0f,
320  int32_t qOffset = 0)
321 {
322  IgnoreUnused(memoryManager);
323  unsigned int inputHeight = 20;
324  unsigned int inputWidth = 17;
325  unsigned int inputChannels = 3;
326  unsigned int batchSize = 5;
327 
328  armnn::TensorInfo inputTensorInfo;
329  armnn::TensorInfo outputTensorInfo;
330 
331  unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
332 
333  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
334  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
335 
336  // Set quantization parameters if the requested type is a quantized type.
337  if(armnn::IsQuantizedType<T>())
338  {
339  inputTensorInfo.SetQuantizationScale(qScale);
340  inputTensorInfo.SetQuantizationOffset(qOffset);
341  outputTensorInfo.SetQuantizationScale(qScale);
342  outputTensorInfo.SetQuantizationOffset(qOffset);
343  }
344 
345  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
346  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
347 
348  // Do linear activation that should leave the tensor unchanged.
350  armnn::WorkloadInfo info;
351  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
352  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
353  data.m_Parameters.m_A = 1.0f;
354  data.m_Parameters.m_B = 0.0f;
356 
357  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
358  data, info);
359 
360  inputHandle->Allocate();
361  outputHandle->Allocate();
362 
363  std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 7123561);
364  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
365 
366  CopyDataToITensorHandle(inputHandle.get(), input.data());
367 
368  workload->Execute();
369 
370  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
371 
372  // Use input as ExpectedData as tensor doesn't change.
373  return LayerTestResult<T, 4>(actualOutput,
374  input,
375  outputHandle->GetShape(),
376  outputTensorInfo.GetShape());
377 }
378 
380  armnn::IWorkloadFactory& workloadFactory,
382  const armnn::ITensorHandleFactory& tensorHandleFactory)
383 {
384  return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
385  memoryManager,
386  tensorHandleFactory);
387 }
388 
390  armnn::IWorkloadFactory& workloadFactory,
392  const armnn::ITensorHandleFactory& tensorHandleFactory)
393 {
394  return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
395  workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
396 }
397 
399  armnn::IWorkloadFactory& workloadFactory,
401  const armnn::ITensorHandleFactory& tensorHandleFactory)
402 {
403  return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
404  workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
405 }
406 
407 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
409  armnn::IWorkloadFactory& workloadFactory,
411  const armnn::ITensorHandleFactory& tensorHandleFactory,
412  armnn::ActivationFunction activationFunction,
413  float activationParameterA,
414  float activationParameterB,
415  float scale,
416  int32_t offset,
417  const std::vector<float>& inputData,
418  float outScale,
419  int32_t outOffset,
420  const std::vector<float>& outputExpectedData)
421 {
422  IgnoreUnused(memoryManager);
423  constexpr static unsigned int inputWidth = 16u;
424  constexpr static unsigned int inputHeight = 1u;
425  constexpr static unsigned int inputChannels = 1u;
426  constexpr static unsigned int inputBatchSize = 1u;
427 
428  constexpr static unsigned int outputWidth = inputWidth;
429  constexpr static unsigned int outputHeight = inputHeight;
430  constexpr static unsigned int outputChannels = inputChannels;
431  constexpr static unsigned int outputBatchSize = inputBatchSize;
432 
433  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
434  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
435 
436  // Set quantization parameters if the requested type is a quantized type.
437  if(armnn::IsQuantizedType<T>())
438  {
439  inputTensorInfo.SetQuantizationScale(scale);
440  inputTensorInfo.SetQuantizationOffset(offset);
441  outputTensorInfo.SetQuantizationScale(outScale);
442  outputTensorInfo.SetQuantizationOffset(outOffset);
443  }
444 
445  std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, scale, offset);
446 
447  // Calculated outputExpected manually.
448  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
449  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset);
450 
451  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
452  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
453 
454  // Setup bounded ReLu.
456  armnn::WorkloadInfo workloadInfo;
457  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
458  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
459 
460  descriptor.m_Parameters.m_Function = activationFunction;
461  descriptor.m_Parameters.m_A = activationParameterA;
462  descriptor.m_Parameters.m_B = activationParameterB;
463 
464  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
465  descriptor, workloadInfo);
466 
467  inputHandle->Allocate();
468  outputHandle->Allocate();
469 
470  CopyDataToITensorHandle(inputHandle.get(), input.data());
471 
472  workload->Execute();
473 
474  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
475 
476  return LayerTestResult<T, 4>(actualOutput,
477  outputExpected,
478  outputHandle->GetShape(),
479  outputTensorInfo.GetShape());
480 }
481 
482 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
484  armnn::IWorkloadFactory& workloadFactory,
486  const armnn::ITensorHandleFactory& tensorHandleFactory,
487  float qScale,
488  int32_t qOffset)
489 {
490  std::vector<float> inputData =
491  {
492  -0.1f, -0.2f, -0.3f, -0.4f,
493  0.1f, 0.2f, 0.3f, 0.4f,
494  -1.0f, -2.0f, -3.0f, -4.0f,
495  1.0f, 2.0f, 3.0f, 4.0f
496  };
497 
498  // Calculate output values for input.
499  auto f = [](float value)
500  {
501  return 1.0f / (1.0f + std::exp(-value));
502  };
503  std::vector<float> m_OutputExpected(inputData.size());
504  std::transform(inputData.begin(), inputData.end(), m_OutputExpected.begin(), f);
505 
506  return SimpleActivationTest<ArmnnType>(workloadFactory,
507  memoryManager,
508  tensorHandleFactory,
510  0.f,
511  0.f,
512  qScale,
513  qOffset,
514  inputData,
515  1.f / 256.f,
516  0,
517  m_OutputExpected);
518 }
519 
521  armnn::IWorkloadFactory& workloadFactory,
523  const armnn::ITensorHandleFactory& tensorHandleFactory)
524 {
525  return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
526  tensorHandleFactory, 0.0f, 0);
527 }
528 
530  armnn::IWorkloadFactory& workloadFactory,
532  const armnn::ITensorHandleFactory& tensorHandleFactory)
533 {
534  return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
535  tensorHandleFactory, 0.1f, 50);
536 }
537 
539  armnn::IWorkloadFactory& workloadFactory,
541  const armnn::ITensorHandleFactory& tensorHandleFactory)
542 {
543  return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
544  tensorHandleFactory, 0.1f, 0);
545 }
546 
547 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
549  armnn::IWorkloadFactory& workloadFactory,
551  const armnn::ITensorHandleFactory& tensorHandleFactory,
552  float qScale,
553  int32_t qOffset)
554 {
555  std::vector<float> inputData = {
556  -0.1f, -0.2f, -0.3f, -0.4f,
557  0.1f, 0.2f, 0.3f, 0.4f,
558  -1.0f, -2.0f, -3.0f, -4.0f,
559  1.0f, 2.0f, 3.0f, 4.0f
560  };
561 
562  // Calculate output values for input.
563  auto f = [](float value)
564  {
565  return std::fmax(0.0f, value);
566  };
567  std::vector<float> outputExpected(inputData.size());
568  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
569 
570  return SimpleActivationTest<ArmnnType>(workloadFactory,
571  memoryManager,
572  tensorHandleFactory,
574  0.f,
575  0.f,
576  qScale,
577  qOffset,
578  inputData,
579  qScale,
580  qOffset,
581  outputExpected);
582 }
583 
585  armnn::IWorkloadFactory& workloadFactory,
587  const armnn::ITensorHandleFactory& tensorHandleFactory)
588 {
589  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
590 }
591 
592 
594  armnn::IWorkloadFactory& workloadFactory,
596  const armnn::ITensorHandleFactory& tensorHandleFactory)
597 {
598  return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
599 }
600 
602  armnn::IWorkloadFactory& workloadFactory,
604  const armnn::ITensorHandleFactory& tensorHandleFactory)
605 {
606  return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
607 }
608 
609 
610 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
612  armnn::IWorkloadFactory& workloadFactory,
614  const armnn::ITensorHandleFactory& tensorHandleFactory,
615  float qScale,
616  int32_t qOffset)
617 {
618  std::vector<float> inputData = {
619  -0.1f, -0.2f, -0.3f, -0.4f,
620  0.1f, 0.2f, 0.3f, 0.4f,
621  -1.0f, -2.0f, -3.0f, -4.0f,
622  1.0f, 2.0f, 3.0f, 4.0f
623  };
624  const float a = 1.0f;
625  const float b = -1.0f;
626  // Calculate output values for input.
627  auto f = [a, b](float value)
628  {
629  return std::min(a, std::max(b, value));
630  };
631  std::vector<float> outputExpected(inputData.size());
632  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
633 
634  return SimpleActivationTest<ArmnnType>(workloadFactory,
635  memoryManager,
636  tensorHandleFactory,
638  a,
639  b,
640  qScale,
641  qOffset,
642  inputData,
643  qScale,
644  qOffset,
645  outputExpected);
646 }
647 
649  armnn::IWorkloadFactory& workloadFactory,
651  const armnn::ITensorHandleFactory& tensorHandleFactory)
652 {
653  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
654 }
655 
656 
657 
658 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
660  armnn::IWorkloadFactory& workloadFactory,
662  const armnn::ITensorHandleFactory& tensorHandleFactory,
663  float qScale,
664  int32_t qOffset)
665 {
666  std::vector<float> inputData = {
667  -0.1f, -0.2f, -0.3f, -0.4f,
668  0.1f, 0.2f, 0.3f, 0.4f,
669  -1.0f, -2.0f, -3.0f, -4.0f,
670  1.0f, 2.0f, 3.0f, 4.0f
671  };
672 
673  // Calculate output values for input.
674  auto f = [](float value)
675  {
676  return std::log(1.0f + std::exp(value));
677  };
678  std::vector<float> outputExpected(inputData.size());
679  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
680 
681  return SimpleActivationTest<ArmnnType>(workloadFactory,
682  memoryManager,
683  tensorHandleFactory,
685  0.f,
686  0.f,
687  qScale,
688  qOffset,
689  inputData,
690  qScale,
691  qOffset,
692  outputExpected);
693 }
694 
696  armnn::IWorkloadFactory& workloadFactory,
698  const armnn::ITensorHandleFactory& tensorHandleFactory)
699 {
700  return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
701 }
702 
704  armnn::IWorkloadFactory& workloadFactory,
706  const armnn::ITensorHandleFactory& tensorHandleFactory)
707 {
708  return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
709  tensorHandleFactory, 0.0625f, 64);
710 }
711 
713  armnn::IWorkloadFactory& workloadFactory,
715  const armnn::ITensorHandleFactory& tensorHandleFactory)
716 {
717  return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
718 }
719 
720 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
722  armnn::IWorkloadFactory& workloadFactory,
724  const armnn::ITensorHandleFactory& tensorHandleFactory,
725  float qScale,
726  int32_t qOffset)
727 {
728  std::vector<float> inputData = {
729  -0.1f, -0.2f, -0.3f, -0.4f,
730  0.1f, 0.2f, 0.3f, 0.4f,
731  -1.0f, -2.0f, -3.0f, -4.0f,
732  1.0f, 2.0f, 3.0f, 4.0f
733  };
734 
735  const float a = 0.01f;
736  // Calculate output values for input.
737  auto f = [a](float value)
738  {
739  return value > 0.0f ? value : (value * a);
740  };
741  std::vector<float> outputExpected(inputData.size());
742  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
743 
744  return SimpleActivationTest<ArmnnType>(workloadFactory,
745  memoryManager,
746  tensorHandleFactory,
748  a,
749  0.f,
750  qScale,
751  qOffset,
752  inputData,
753  qScale,
754  qOffset,
755  outputExpected);
756 }
757 
759  armnn::IWorkloadFactory& workloadFactory,
761  const armnn::ITensorHandleFactory& tensorHandleFactory)
762 {
763  return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
764 }
765 
767  armnn::IWorkloadFactory& workloadFactory,
769  const armnn::ITensorHandleFactory& tensorHandleFactory)
770 {
771  return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
772  tensorHandleFactory, 0.0625f, 64);
773 }
774 
776  armnn::IWorkloadFactory& workloadFactory,
778  const armnn::ITensorHandleFactory& tensorHandleFactory)
779 {
780  return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
781 }
782 
783 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
785  armnn::IWorkloadFactory& workloadFactory,
787  const armnn::ITensorHandleFactory& tensorHandleFactory,
788  float qScale,
789  int32_t qOffset)
790 {
791  std::vector<float> inputData = {
792  -0.1f, -0.2f, -0.3f, -0.4f,
793  0.1f, 0.2f, 0.3f, 0.4f,
794  -1.0f, -2.0f, -3.0f, -4.0f,
795  1.0f, 2.0f, 3.0f, 4.0f
796  };
797 
798  // Calculate output values for input.
799  auto f = [](float value)
800  {
801  return std::abs(value);
802  };
803  std::vector<float> outputExpected(inputData.size());
804  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
805 
806  return SimpleActivationTest<ArmnnType>(workloadFactory,
807  memoryManager,
808  tensorHandleFactory,
810  0.f,
811  0.f,
812  qScale,
813  qOffset,
814  inputData,
815  qScale,
816  qOffset,
817  outputExpected);
818 }
819 
821  armnn::IWorkloadFactory& workloadFactory,
823  const armnn::ITensorHandleFactory& tensorHandleFactory)
824 {
825  return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
826 }
827 
829  armnn::IWorkloadFactory& workloadFactory,
831  const armnn::ITensorHandleFactory& tensorHandleFactory)
832 {
833  return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
834 }
835 
837  armnn::IWorkloadFactory& workloadFactory,
839  const armnn::ITensorHandleFactory& tensorHandleFactory)
840 {
841  return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
842 }
843 
845  armnn::IWorkloadFactory& workloadFactory,
847  const armnn::ITensorHandleFactory& tensorHandleFactory)
848 {
849  IgnoreUnused(memoryManager);
850  const int inputDataSize = 120;
851  std::vector<float> inputData(inputDataSize);
852 
853  for (unsigned int i = 0u; i < inputDataSize; ++i)
854  {
855  inputData[i] = static_cast<float>(i) / 10;
856  }
857 
858  auto f = [](float value)
859  {
860  return std::sqrt(value);
861  };
862  std::vector<float> expectedOutput(inputDataSize);
863  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
864 
865  armnn::TensorInfo inputTensorInfo(
866  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
867  armnn::TensorInfo outputTensorInfo(
868  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
869 
870  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
871 
872  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
873  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
874 
876  armnn::WorkloadInfo workloadInfo;
877  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
878  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
879 
881 
882  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
883  descriptor, workloadInfo);
884 
885  inputHandle->Allocate();
886  outputHandle->Allocate();
887 
888  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
889 
890  workload->Execute();
891 
892  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
893 
894  return LayerTestResult<float, 5>(actualOutput,
895  expectedOutput,
896  outputHandle->GetShape(),
897  outputTensorInfo.GetShape());
898 };
899 
900 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
902  armnn::IWorkloadFactory& workloadFactory,
904  const armnn::ITensorHandleFactory& tensorHandleFactory,
905  float qScale,
906  int32_t qOffset)
907 {
908  std::vector<float> inputData = {
909  0.1f, 0.2f, 0.3f, 0.4f,
910  0.1f, 0.2f, 0.3f, 0.4f,
911  1.0f, 2.0f, 3.0f, 4.0f,
912  1.0f, 2.0f, 3.0f, 4.0f
913  };
914 
915  // Calculate output values for input.
916  auto f = [](float value)
917  {
918  return std::sqrt(value);
919  };
920  std::vector<float> expectedOutput(inputData.size());
921  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
922 
923  return SimpleActivationTest<ArmnnType>(workloadFactory,
924  memoryManager,
925  tensorHandleFactory,
927  0.f,
928  0.f,
929  qScale,
930  qOffset,
931  inputData,
932  qScale,
933  qOffset,
934  expectedOutput);
935 }
936 
938  armnn::IWorkloadFactory& workloadFactory,
940  const armnn::ITensorHandleFactory& tensorHandleFactory)
941 {
942  return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
943 }
944 
946  armnn::IWorkloadFactory& workloadFactory,
948  const armnn::ITensorHandleFactory& tensorHandleFactory)
949 {
950  return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
951 }
952 
954  armnn::IWorkloadFactory& workloadFactory,
956  const armnn::ITensorHandleFactory& tensorHandleFactory)
957 {
958  return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
959 }
960 
961 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
963  armnn::IWorkloadFactory& workloadFactory,
965  const armnn::ITensorHandleFactory& tensorHandleFactory,
966  float qScale,
967  int32_t qOffset)
968 {
969  std::vector<float> inputData = {
970  -0.1f, -0.2f, -0.3f, -0.4f,
971  0.1f, 0.2f, 0.3f, 0.4f,
972  -1.0f, -2.0f, -3.0f, -4.0f,
973  1.0f, 2.0f, 3.0f, 4.0f
974  };
975 
976  // Calculate output values for input.
977  auto f = [](float value)
978  {
979  return std::pow(value,2);
980  };
981  std::vector<float> expectedOutput(inputData.size());
982  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
983 
984  return SimpleActivationTest<ArmnnType>(workloadFactory,
985  memoryManager,
986  tensorHandleFactory,
988  0.f,
989  0.f,
990  qScale,
991  qOffset,
992  inputData,
993  qScale,
994  qOffset,
995  expectedOutput);
996 }
997 
999  armnn::IWorkloadFactory& workloadFactory,
1001  const armnn::ITensorHandleFactory& tensorHandleFactory)
1002 {
1003  return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1004 }
1005 
1007  armnn::IWorkloadFactory& workloadFactory,
1009  const armnn::ITensorHandleFactory& tensorHandleFactory)
1010 {
1011  return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1012  tensorHandleFactory, 0.0625f, 64);
1013 }
1014 
1016  armnn::IWorkloadFactory& workloadFactory,
1018  const armnn::ITensorHandleFactory& tensorHandleFactory)
1019 {
1020  return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1021 }
1022 
1023 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1025  armnn::IWorkloadFactory& workloadFactory,
1027  const armnn::ITensorHandleFactory& tensorHandleFactory,
1028  float qScale,
1029  int32_t qOffset)
1030 {
1031  std::vector<float> inputData = {
1032  -0.1f, -0.2f, -0.3f, -0.4f,
1033  0.1f, 0.2f, 0.3f, 0.4f,
1034  -1.0f, -2.0f, -3.0f, -4.0f,
1035  1.0f, 2.0f, 3.0f, 4.0f
1036  };
1037 
1038  const float a = 2.0f;
1039  const float b = 3.0f;
1040  // Calculate output values for input.
1041  auto f = [a, b](float value)
1042  {
1043  return a * tanhf(b * value);
1044  };
1045  std::vector<float> expectedOutput(inputData.size());
1046  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1047 
1048  return SimpleActivationTest<ArmnnType>(workloadFactory,
1049  memoryManager,
1050  tensorHandleFactory,
1052  a,
1053  b,
1054  qScale,
1055  qOffset,
1056  inputData,
1057  qScale,
1058  qOffset,
1059  expectedOutput);
1060 }
1061 
1063  armnn::IWorkloadFactory& workloadFactory,
1065  const armnn::ITensorHandleFactory& tensorHandleFactory)
1066 {
1067  return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1068 }
1069 
1071  armnn::IWorkloadFactory& workloadFactory,
1073  const armnn::ITensorHandleFactory& tensorHandleFactory)
1074 {
1075  return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1076 }
1077 
1079  armnn::IWorkloadFactory& workloadFactory,
1081  const armnn::ITensorHandleFactory& tensorHandleFactory)
1082 {
1083  return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1084 }
1085 
1086 
1087 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1089  armnn::IWorkloadFactory& workloadFactory,
1091  const armnn::ITensorHandleFactory& tensorHandleFactory,
1092  float qScale,
1093  int32_t qOffset)
1094 {
1095  std::vector<float> inputData = {
1096  -0.1f, -0.2f, -0.3f, -0.4f,
1097  0.1f, 0.2f, 0.3f, 0.4f,
1098  -1.0f, -2.0f, -3.0f, -4.0f,
1099  1.0f, 2.0f, 3.0f, 4.0f
1100  };
1101 
1102 
1103  const float a = 0.01f;
1104  // Calculate output values for input.
1105  auto f = [a](float value)
1106  {
1107  return (value >= 0) ? value : a * (expf(value) - 1);
1108  };
1109  std::vector<float> expectedOutput(inputData.size());
1110  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1111 
1112  return SimpleActivationTest<ArmnnType>(workloadFactory,
1113  memoryManager,
1114  tensorHandleFactory,
1116  a,
1117  0.0f,
1118  qScale,
1119  qOffset,
1120  inputData,
1121  qScale,
1122  qOffset,
1123  expectedOutput);
1124 }
1125 
1127  armnn::IWorkloadFactory& workloadFactory,
1129  const armnn::ITensorHandleFactory& tensorHandleFactory)
1130 {
1131  return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1132 }
1133 
1135  armnn::IWorkloadFactory& workloadFactory,
1137  const armnn::ITensorHandleFactory& tensorHandleFactory)
1138 {
1139  return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1140 }
1141 
1143  armnn::IWorkloadFactory& workloadFactory,
1145  const armnn::ITensorHandleFactory& tensorHandleFactory)
1146 {
1147  return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1148 }
1149 
1150 
1151 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1153  armnn::IWorkloadFactory& workloadFactory,
1155  const armnn::ITensorHandleFactory& tensorHandleFactory,
1156  float qScale,
1157  int32_t qOffset)
1158 {
1159  std::vector<float> inputData = {
1160  -0.1f, -0.2f, -0.3f, -0.4f,
1161  0.1f, 0.2f, 0.3f, 0.4f,
1162  -1.0f, -2.0f, -3.0f, -4.0f,
1163  1.0f, 2.0f, 3.0f, 4.0f
1164  };
1165  // Calculate output values for input.
1166  auto f = [](float x)
1167  {
1168  // Break down the calculation to help with verification.
1169  // hard_swish(x) = x * relu6(x+3) / 6
1170  // relu6(x) = min(max(x,0),6)
1171  float reLu6_step1 = std::max((x + 3),0.0f);
1172  float reLu6Complete = std::min(reLu6_step1, 6.0f);
1173  float hardSwish_step1 = x * reLu6Complete;
1174  float result = hardSwish_step1 / 6;
1175  return result;
1176  };
1177  std::vector<float> expectedOutput(inputData.size());
1178  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1179 
1180  return SimpleActivationTest<ArmnnType>(workloadFactory,
1181  memoryManager,
1182  tensorHandleFactory,
1184  0.f,
1185  0.f,
1186  qScale,
1187  qOffset,
1188  inputData,
1189  qScale,
1190  qOffset,
1191  expectedOutput);
1192 }
1193 
1195  armnn::IWorkloadFactory& workloadFactory,
1197  const armnn::ITensorHandleFactory& tensorHandleFactory)
1198 {
1199  return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1200 }
1201 
1203  armnn::IWorkloadFactory& workloadFactory,
1205  const armnn::ITensorHandleFactory& tensorHandleFactory)
1206 {
1207  return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1208  tensorHandleFactory, 0.1f, 64);
1209 }
1210 
1212  armnn::IWorkloadFactory& workloadFactory,
1214  const armnn::ITensorHandleFactory& tensorHandleFactory)
1215 {
1216  return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1217 }
1218 
1219 
1220 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1222  armnn::IWorkloadFactory& workloadFactory,
1224  armnn::IWorkloadFactory& refWorkloadFactory,
1225  const armnn::ITensorHandleFactory& tensorHandleFactory,
1226  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1228  unsigned int batchSize = 5,
1229  float qScale = 0.0f,
1230  int32_t qOffset = 0)
1231 {
1232  IgnoreUnused(memoryManager);
1233  unsigned int width = 17;
1234  unsigned int height = 29;
1235  unsigned int channels = 2;
1236 
1237  float a = 0.234f;
1238  float b = -12.345f;
1239 
1240  armnn::TensorInfo inputTensorInfo;
1241  armnn::TensorInfo outputTensorInfo;
1242 
1243  unsigned int shape[] = {batchSize, channels, height, width};
1244 
1245  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1246  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1247 
1248  // Set quantization parameters if the requested type is a quantized type.
1249  if(armnn::IsQuantizedType<T>())
1250  {
1251  inputTensorInfo.SetQuantizationScale(qScale);
1252  inputTensorInfo.SetQuantizationOffset(qOffset);
1253  outputTensorInfo.SetQuantizationScale(qScale);
1254  outputTensorInfo.SetQuantizationOffset(qOffset);
1255  }
1256 
1257  float minVal = -10.f;
1259  {
1260  minVal = 0.f;
1261  }
1262 
1263  std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 21453, minVal, 10.f);
1264  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1265  std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
1266 
1267  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1268  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1269 
1270  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1271  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1272 
1274  armnn::WorkloadInfo info;
1275  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1276  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1277  data.m_Parameters.m_A = a;
1278  data.m_Parameters.m_B = b;
1279  data.m_Parameters.m_Function = f;
1280 
1281  armnn::ActivationQueueDescriptor refData = data;
1282  armnn::WorkloadInfo refInfo = info;
1283  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1284  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1285 
1286  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
1287  data, info);
1288  ARMNN_ASSERT(workload != nullptr);
1289  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Activation,
1290  refData, refInfo);
1291  ARMNN_ASSERT(workloadRef != nullptr);
1292 
1293  inputHandle->Allocate();
1294  outputHandle->Allocate();
1295  inputHandleRef->Allocate();
1296  outputHandleRef->Allocate();
1297 
1298  CopyDataToITensorHandle(inputHandle.get(), input.data());
1299  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
1300 
1301  workload->Execute();
1302  workloadRef->Execute();
1303 
1304  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1305  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
1306 
1307  return LayerTestResult<T, 4>(actualOutput,
1308  expectedOutput,
1309  outputHandle->GetShape(),
1310  outputTensorInfo.GetShape());
1311 
1312 }
1313 
1315  armnn::IWorkloadFactory& workloadFactory,
1317  armnn::IWorkloadFactory& refWorkloadFactory,
1318  const armnn::ITensorHandleFactory& tensorHandleFactory,
1319  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1321  unsigned int batchSize)
1322 {
1323  return CompareActivationTestImpl<armnn::DataType::Float32>(
1324  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1325  refTensorHandleFactory, f, batchSize);
1326 }
1327 
1329  armnn::IWorkloadFactory& workloadFactory,
1331  armnn::IWorkloadFactory& refWorkloadFactory,
1332  const armnn::ITensorHandleFactory& tensorHandleFactory,
1333  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1335 {
1336  return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1337  workloadFactory, memoryManager, refWorkloadFactory,
1338  tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1339 }
1340 
1342  armnn::IWorkloadFactory& workloadFactory,
1344  armnn::IWorkloadFactory& refWorkloadFactory,
1345  const armnn::ITensorHandleFactory& tensorHandleFactory,
1346  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1348 {
1349  return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1350  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1351  refTensorHandleFactory, f, 5, 0.1f, 0);
1352 }
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< T > m_ExpectedData
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
std::vector< T > m_ActualData
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
ActivationFunction
Definition: Types.hpp:86
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)