ArmNN
 20.08
ActivationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ActivationTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
15 
16 #include <test/TensorHelpers.hpp>
17 
18 #include <boost/multi_array.hpp>
19 
20 #include <algorithm>
21 
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
24  armnn::IWorkloadFactory& workloadFactory,
26  float upperBound,
27  float lowerBound,
28  float inputScale,
29  int32_t inputOffset,
30  float outputScale,
31  int32_t outputOffset,
32  const std::vector<T>& inputData,
33  const std::vector<T>& outputExpectedData,
34  unsigned int inputWidth,
35  unsigned int inputHeight,
36  unsigned int inputChannels,
37  unsigned int inputBatchSize)
38 {
39  IgnoreUnused(memoryManager);
40  unsigned int outputWidth = inputWidth;
41  unsigned int outputHeight = inputHeight;
42  unsigned int outputChannels = inputChannels;
43  unsigned int outputBatchSize = inputBatchSize;
44 
45  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
46 
47  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
48 
49  if(armnn::IsQuantizedType<T>())
50  {
51  inputTensorInfo.SetQuantizationScale(inputScale);
52  inputTensorInfo.SetQuantizationOffset(inputOffset);
53 
54  outputTensorInfo.SetQuantizationScale(outputScale);
55  outputTensorInfo.SetQuantizationOffset(outputOffset);
56  }
57 
58  LayerTestResult<T, 4> result(inputTensorInfo);
59 
60  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
61 
63  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
64  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
66 
67  // Setup bounded ReLu.
69  armnn::WorkloadInfo workloadInfo;
70  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
71  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
72 
73  descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
74  descriptor.m_Parameters.m_A = upperBound;
75  descriptor.m_Parameters.m_B = lowerBound;
76 
77  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
78 
79  inputHandle->Allocate();
80  outputHandle->Allocate();
81 
82  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
83 
84  workload->Execute();
85 
86  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
87 
88  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
89 
90  return result;
91 }
92 
94  armnn::IWorkloadFactory& workloadFactory,
96 {
97  unsigned int inputWidth = 4u;
98  unsigned int inputHeight = 5u;
99  unsigned int inputChannels = 1u;
100  unsigned int inputBatchSize = 1;
101 
102  std::vector<float> input = std::vector<float>{
103  -2.0f, 0.1f, 0.5f, 1.25f,
104  0.786f, 0.9875f, -1.5f, 0.384f,
105  1.0001f, 3.5f, 7.5f, 0.896f,
106  2.126f, 2.0f, 0.3f, 0.15f,
107  0.999f, 1.2f, 0.89f, 6.1f,
108  };
109 
110  // Calculated manually.
111  std::vector<float> output = std::vector<float>{
112  -1.0f, 0.1f, 0.5f, 1.0f,
113  0.786f, 0.9875f, -1.0f, 0.384f,
114  1.0f, 1.0f, 1.0f, 0.896f,
115  1.0f, 1.0f, 0.3f, 0.15f,
116  0.999f, 1.0f, 0.89f, 1.0f,
117  };
118 
119  return BoundedReLuTestCommon<armnn::DataType::Float32>(
120  workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
121  inputWidth, inputHeight, inputChannels, inputBatchSize);
122 }
123 
125  armnn::IWorkloadFactory& workloadFactory,
127 {
128  unsigned int inputWidth = 4u;
129  unsigned int inputHeight = 5u;
130  unsigned int inputChannels = 1u;
131  unsigned int inputBatchSize = 1;
132 
133  std::vector<float> input = std::vector<float>{
134  -1.0f, 0.1f, 0.5f, 6.25f,
135  0.786f, 5.9875f, -0.5f, 0.384f,
136  6.0001f, 3.5f, 7.5f, 0.896f,
137  2.126f, 12.0f, 0.3f, 0.15f,
138  0.999f, 1.2f, 0.89f, 6.1f,
139  };
140 
141  // Calculated manually.
142  std::vector<float> output = std::vector<float>{
143  0.0f, 0.1f, 0.5f, 6.0f,
144  0.786f, 5.9875f, 0.0f, 0.384f,
145  6.0f, 3.5f, 6.0f, 0.896f,
146  2.126f, 6.0f, 0.3f, 0.15f,
147  0.999f, 1.2f, 0.89f, 6.0f,
148  };
149 
150  return BoundedReLuTestCommon<armnn::DataType::Float32>(
151  workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
152  inputWidth, inputHeight, inputChannels, inputBatchSize);
153 }
154 
156  armnn::IWorkloadFactory& workloadFactory,
158 {
159  unsigned int inputWidth = 3u;
160  unsigned int inputHeight = 2u;
161  unsigned int inputChannels = 1u;
162  unsigned int inputBatchSize = 1;
163 
164  std::vector<uint8_t> input = std::vector<uint8_t>{
165  51, 124, 28,
166  251, 8, 92
167  };
168 
169  // Calculated manually.
170  std::vector<uint8_t> output = std::vector<uint8_t>{
171  0, 122, 0,
172  255, 0, 58
173  };
174 
175  float inputScale = 12.0f / 255.0f;
176  int32_t inputOffset = 63;
177  float outputScale = 6.0f / 255.0f;
178  int32_t outputOffset = 0;
179 
180  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
181  workloadFactory, memoryManager, 6.0f, 0.0f,
182  inputScale, inputOffset, outputScale, outputOffset,
183  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
184 }
185 
187  armnn::IWorkloadFactory& workloadFactory,
189 {
190  unsigned int inputWidth = 3u;
191  unsigned int inputHeight = 2u;
192  unsigned int inputChannels = 1u;
193  unsigned int inputBatchSize = 1;
194 
195  std::vector<uint8_t> input = std::vector<uint8_t>{
196  51, 230, 28,
197  251, 8, 92
198  };
199 
200  // Calculated manually.
201  std::vector<uint8_t> output = std::vector<uint8_t>{
202  51, 192, 32,
203  192, 32, 92
204  };
205 
206  int32_t inputOffset = 112;
207  float inputScale = 0.0125f;
208 
209  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
210  workloadFactory, memoryManager, 1.0f, -1.0f,
211  inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
212  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
213 }
214 
215 namespace
216 {
217 
218 struct BoundedReLuRandomInputTestTraits
219 {
220  constexpr static unsigned int inputHeight = 31u;
221  constexpr static unsigned int inputWidth = 19u;
222  constexpr static unsigned int inputChannels = 4u;
223  constexpr static unsigned int inputBatchSize = 2;
224 
225  constexpr static unsigned int outputHeight = inputHeight;
226  constexpr static unsigned int outputWidth = inputWidth;
227  constexpr static unsigned int outputChannels = inputChannels;
228  constexpr static unsigned int outputBatchSize = inputBatchSize;
229 
231  {
232  return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
234  }
235 
236  static armnn::TensorInfo GetOutputTensorInfo()
237  {
238  return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
240  }
241 };
242 
243 boost::multi_array<float, 4> BoundedReLuRandomInputTest(
244  armnn::IWorkloadFactory& workloadFactory,
246  float lowerBound,
247  float upperBound,
248  const armnn::ActivationDescriptor& activationDescriptor)
249 {
250  IgnoreUnused(memoryManager);
252  const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
253 
254  boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
255 
256  // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
257  // range [lowerBound, upperBound].
258  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
259 
261  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
262  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
264 
265  // Set up bounded ReLu.
267  armnn::WorkloadInfo workloadInfo;
268  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
269  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
270  descriptor.m_Parameters = activationDescriptor;
271 
272  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
273 
274  inputHandle->Allocate();
275  outputHandle->Allocate();
276 
277  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
278 
279  workload->Execute();
280 
281  CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
282 
283  return output;
284 }
285 
286 } // namespace
287 
289  armnn::IWorkloadFactory& workloadFactory,
291  armnn::IWorkloadFactory& refWorkloadFactory,
292  float upperBound,
293  float lowerBound)
294 {
295  LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
296 
297  armnn::ActivationDescriptor activationDescriptor;
298  activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
299  activationDescriptor.m_A = upperBound;
300  activationDescriptor.m_B = lowerBound;
301 
302  result.output = BoundedReLuRandomInputTest(
303  workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
304  result.outputExpected = BoundedReLuRandomInputTest(
305  refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
306 
307  return result;
308 }
309 
310 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
312  armnn::IWorkloadFactory& workloadFactory,
314  float qScale = 0.0f,
315  int32_t qOffset = 0)
316 {
317  IgnoreUnused(memoryManager);
318  unsigned int inputHeight = 20;
319  unsigned int inputWidth = 17;
320  unsigned int inputChannels = 3;
321  unsigned int batchSize = 5;
322 
323  armnn::TensorInfo inputTensorInfo;
324  armnn::TensorInfo outputTensorInfo;
325 
326  unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
327 
328  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
329  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
330 
331  // Set quantization parameters if the requested type is a quantized type.
332  if(armnn::IsQuantizedType<T>())
333  {
334  inputTensorInfo.SetQuantizationScale(qScale);
335  inputTensorInfo.SetQuantizationOffset(qOffset);
336  outputTensorInfo.SetQuantizationScale(qScale);
337  outputTensorInfo.SetQuantizationOffset(qOffset);
338  }
339 
340  LayerTestResult<T, 4> ret(outputTensorInfo);
342  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
343  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
345 
346  // Do linear activation that should leave the tensor unchanged.
348  armnn::WorkloadInfo info;
349  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
350  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
351  data.m_Parameters.m_A = 1.0f;
352  data.m_Parameters.m_B = 0.0f;
353  data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
354 
355  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
356 
357  inputHandle->Allocate();
358  outputHandle->Allocate();
359 
360  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
361  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
362 
363  workload->Execute();
364 
365  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
366 
367  // Ensure output equals input.
368  ret.outputExpected = input;
369 
370  return ret;
371 }
372 
374  armnn::IWorkloadFactory& workloadFactory,
376 {
377  return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
378 }
379 
381  armnn::IWorkloadFactory& workloadFactory,
383 {
384  return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
385  workloadFactory, memoryManager, 4.0f, 3);
386 }
387 
389  armnn::IWorkloadFactory& workloadFactory,
391 {
392  return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
393  workloadFactory, memoryManager, 0.1f, 0);
394 }
395 
396 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
398  armnn::IWorkloadFactory& workloadFactory,
400  armnn::ActivationFunction activationFunction,
401  float activationParameterA,
402  float activationParameterB,
403  float scale,
404  int32_t offset,
405  const std::vector<float>& inputData,
406  float outScale,
407  int32_t outOffset,
408  const std::vector<float>& outputExpectedData)
409 {
410  IgnoreUnused(memoryManager);
411  constexpr static unsigned int inputWidth = 16u;
412  constexpr static unsigned int inputHeight = 1u;
413  constexpr static unsigned int inputChannels = 1u;
414  constexpr static unsigned int inputBatchSize = 1u;
415 
416  constexpr static unsigned int outputWidth = inputWidth;
417  constexpr static unsigned int outputHeight = inputHeight;
418  constexpr static unsigned int outputChannels = inputChannels;
419  constexpr static unsigned int outputBatchSize = inputBatchSize;
420 
421  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
422  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
423 
424  // Set quantization parameters if the requested type is a quantized type.
425  if(armnn::IsQuantizedType<T>())
426  {
427  inputTensorInfo.SetQuantizationScale(scale);
428  inputTensorInfo.SetQuantizationOffset(offset);
429  outputTensorInfo.SetQuantizationScale(outScale);
430  outputTensorInfo.SetQuantizationOffset(outOffset);
431  }
432 
433  LayerTestResult<T, 4> result(inputTensorInfo);
434 
435  auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
436 
438  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
439  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
441 
442  // Setup bounded ReLu.
444  armnn::WorkloadInfo workloadInfo;
445  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
446  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
447 
448  descriptor.m_Parameters.m_Function = activationFunction;
449  descriptor.m_Parameters.m_A = activationParameterA;
450  descriptor.m_Parameters.m_B = activationParameterB;
451 
452  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
453 
454  inputHandle->Allocate();
455  outputHandle->Allocate();
456 
457  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
458 
459  workload->Execute();
460 
461  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
462 
463  // Calculated manually.
464  result.outputExpected =
465  MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
466 
467  return result;
468 }
469 
470 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
472  armnn::IWorkloadFactory& workloadFactory,
474  float qScale,
475  int32_t qOffset)
476 {
477  std::vector<float> inputData =
478  {
479  -0.1f, -0.2f, -0.3f, -0.4f,
480  0.1f, 0.2f, 0.3f, 0.4f,
481  -1.0f, -2.0f, -3.0f, -4.0f,
482  1.0f, 2.0f, 3.0f, 4.0f
483  };
484 
485  // Calculate output values for input.
486  auto f = [](float value)
487  {
488  return 1.0f / (1.0f + std::exp(-value));
489  };
490  std::vector<float> outputExpectedData(inputData.size());
491  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
492 
493  return SimpleActivationTest<ArmnnType>(workloadFactory,
494  memoryManager,
496  0.f,
497  0.f,
498  qScale,
499  qOffset,
500  inputData,
501  1.f / 256.f,
502  0,
503  outputExpectedData);
504 }
505 
507  armnn::IWorkloadFactory& workloadFactory,
509 {
510  return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
511 }
512 
514  armnn::IWorkloadFactory& workloadFactory,
516 {
517  return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 50);
518 }
519 
521  armnn::IWorkloadFactory& workloadFactory,
523 {
524  return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
525 }
526 
527 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
529  armnn::IWorkloadFactory& workloadFactory,
531  float qScale,
532  int32_t qOffset)
533 {
534  std::vector<float> inputData = {
535  -0.1f, -0.2f, -0.3f, -0.4f,
536  0.1f, 0.2f, 0.3f, 0.4f,
537  -1.0f, -2.0f, -3.0f, -4.0f,
538  1.0f, 2.0f, 3.0f, 4.0f
539  };
540 
541  // Calculate output values for input.
542  auto f = [](float value)
543  {
544  return std::fmax(0.0f, value);
545  };
546  std::vector<float> outputExpectedData(inputData.size());
547  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
548 
549  return SimpleActivationTest<ArmnnType>(workloadFactory,
550  memoryManager,
552  0.f,
553  0.f,
554  qScale,
555  qOffset,
556  inputData,
557  qScale,
558  qOffset,
559  outputExpectedData);
560 }
561 
563  armnn::IWorkloadFactory& workloadFactory,
565 {
566  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
567 }
568 
569 
571  armnn::IWorkloadFactory& workloadFactory,
573 {
574  return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 0);
575 }
576 
578  armnn::IWorkloadFactory& workloadFactory,
580 {
581  return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
582 }
583 
584 
585 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
587  armnn::IWorkloadFactory& workloadFactory,
589  float qScale,
590  int32_t qOffset)
591 {
592  std::vector<float> inputData = {
593  -0.1f, -0.2f, -0.3f, -0.4f,
594  0.1f, 0.2f, 0.3f, 0.4f,
595  -1.0f, -2.0f, -3.0f, -4.0f,
596  1.0f, 2.0f, 3.0f, 4.0f
597  };
598  const float a = 1.0f;
599  const float b = -1.0f;
600  // Calculate output values for input.
601  auto f = [a, b](float value)
602  {
603  return std::min(a, std::max(b, value));
604  };
605  std::vector<float> outputExpectedData(inputData.size());
606  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
607 
608  return SimpleActivationTest<ArmnnType>(workloadFactory,
609  memoryManager,
611  a,
612  b,
613  qScale,
614  qOffset,
615  inputData,
616  qScale,
617  qOffset,
618  outputExpectedData);
619 }
620 
622  armnn::IWorkloadFactory& workloadFactory,
624 {
625  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
626 }
627 
628 
629 
630 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
632  armnn::IWorkloadFactory& workloadFactory,
634  float qScale,
635  int32_t qOffset)
636 {
637  std::vector<float> inputData = {
638  -0.1f, -0.2f, -0.3f, -0.4f,
639  0.1f, 0.2f, 0.3f, 0.4f,
640  -1.0f, -2.0f, -3.0f, -4.0f,
641  1.0f, 2.0f, 3.0f, 4.0f
642  };
643 
644  // Calculate output values for input.
645  auto f = [](float value)
646  {
647  return std::log(1.0f + std::exp(value));
648  };
649  std::vector<float> outputExpectedData(inputData.size());
650  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
651 
652  return SimpleActivationTest<ArmnnType>(workloadFactory,
653  memoryManager,
655  0.f,
656  0.f,
657  qScale,
658  qOffset,
659  inputData,
660  qScale,
661  qOffset,
662  outputExpectedData);
663 }
664 
666  armnn::IWorkloadFactory& workloadFactory,
668 {
669  return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
670 }
671 
673  armnn::IWorkloadFactory& workloadFactory,
675 {
676  return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
677 }
678 
680  armnn::IWorkloadFactory& workloadFactory,
682 {
683  return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
684 }
685 
686 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
688  armnn::IWorkloadFactory& workloadFactory,
690  float qScale,
691  int32_t qOffset)
692 {
693  std::vector<float> inputData = {
694  -0.1f, -0.2f, -0.3f, -0.4f,
695  0.1f, 0.2f, 0.3f, 0.4f,
696  -1.0f, -2.0f, -3.0f, -4.0f,
697  1.0f, 2.0f, 3.0f, 4.0f
698  };
699 
700  const float a = 0.01f;
701  // Calculate output values for input.
702  auto f = [a](float value)
703  {
704  return value > 0.0f ? value : (value * a);
705  };
706  std::vector<float> outputExpectedData(inputData.size());
707  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
708 
709  return SimpleActivationTest<ArmnnType>(workloadFactory,
710  memoryManager,
712  a,
713  0.f,
714  qScale,
715  qOffset,
716  inputData,
717  qScale,
718  qOffset,
719  outputExpectedData);
720 }
721 
723  armnn::IWorkloadFactory& workloadFactory,
725 {
726  return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
727 }
728 
730  armnn::IWorkloadFactory& workloadFactory,
732 {
733  return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
734 }
735 
737  armnn::IWorkloadFactory& workloadFactory,
739 {
740  return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
741 }
742 
743 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
745  armnn::IWorkloadFactory& workloadFactory,
747  float qScale,
748  int32_t qOffset)
749 {
750  std::vector<float> inputData = {
751  -0.1f, -0.2f, -0.3f, -0.4f,
752  0.1f, 0.2f, 0.3f, 0.4f,
753  -1.0f, -2.0f, -3.0f, -4.0f,
754  1.0f, 2.0f, 3.0f, 4.0f
755  };
756 
757  // Calculate output values for input.
758  auto f = [](float value)
759  {
760  return std::abs(value);
761  };
762  std::vector<float> outputExpectedData(inputData.size());
763  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
764 
765  return SimpleActivationTest<ArmnnType>(workloadFactory,
766  memoryManager,
768  0.f,
769  0.f,
770  qScale,
771  qOffset,
772  inputData,
773  qScale,
774  qOffset,
775  outputExpectedData);
776 }
777 
779  armnn::IWorkloadFactory& workloadFactory,
781 {
782  return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
783 }
784 
786  armnn::IWorkloadFactory& workloadFactory,
788 {
789  return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
790 }
791 
793  armnn::IWorkloadFactory& workloadFactory,
795 {
796  return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
797 }
798 
800  armnn::IWorkloadFactory& workloadFactory,
802 {
803  IgnoreUnused(memoryManager);
804  const int inputDataSize = 120;
805  std::vector<float> inputData(inputDataSize);
806 
807  for (unsigned int i = 0u; i < inputDataSize; ++i)
808  {
809  inputData[i] = static_cast<float>(i) / 10;
810  }
811 
812  auto f = [](float value)
813  {
814  return std::sqrt(value);
815  };
816  std::vector<float> outputExpectedData(inputDataSize);
817  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
818 
819  armnn::TensorInfo inputTensorInfo(
820  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
821  armnn::TensorInfo outputTensorInfo(
822  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
823 
824  LayerTestResult<float, 5> result(inputTensorInfo);
825 
826  auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
827 
829  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
830  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
832 
834  armnn::WorkloadInfo workloadInfo;
835  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
836  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
837 
838  descriptor.m_Parameters.m_Function = armnn::ActivationFunction::Sqrt;
839 
840  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
841 
842  inputHandle->Allocate();
843  outputHandle->Allocate();
844 
845  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]);
846 
847  workload->Execute();
848 
849  CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
850 
851  // Calculated manually.
852  result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
853 
854  return result;
855 };
856 
857 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
859  armnn::IWorkloadFactory& workloadFactory,
861  float qScale,
862  int32_t qOffset)
863 {
864  std::vector<float> inputData = {
865  0.1f, 0.2f, 0.3f, 0.4f,
866  0.1f, 0.2f, 0.3f, 0.4f,
867  1.0f, 2.0f, 3.0f, 4.0f,
868  1.0f, 2.0f, 3.0f, 4.0f
869  };
870 
871  // Calculate output values for input.
872  auto f = [](float value)
873  {
874  return std::sqrt(value);
875  };
876  std::vector<float> outputExpectedData(inputData.size());
877  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
878 
879  return SimpleActivationTest<ArmnnType>(workloadFactory,
880  memoryManager,
882  0.f,
883  0.f,
884  qScale,
885  qOffset,
886  inputData,
887  qScale,
888  qOffset,
889  outputExpectedData);
890 }
891 
893  armnn::IWorkloadFactory& workloadFactory,
895 {
896  return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
897 }
898 
900  armnn::IWorkloadFactory& workloadFactory,
902 {
903  return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
904 }
905 
907  armnn::IWorkloadFactory& workloadFactory,
909 {
910  return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
911 }
912 
913 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
915  armnn::IWorkloadFactory& workloadFactory,
917  float qScale,
918  int32_t qOffset)
919 {
920  std::vector<float> inputData = {
921  -0.1f, -0.2f, -0.3f, -0.4f,
922  0.1f, 0.2f, 0.3f, 0.4f,
923  -1.0f, -2.0f, -3.0f, -4.0f,
924  1.0f, 2.0f, 3.0f, 4.0f
925  };
926 
927  // Calculate output values for input.
928  auto f = [](float value)
929  {
930  return std::pow(value,2);
931  };
932  std::vector<float> outputExpectedData(inputData.size());
933  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
934 
935  return SimpleActivationTest<ArmnnType>(workloadFactory,
936  memoryManager,
938  0.f,
939  0.f,
940  qScale,
941  qOffset,
942  inputData,
943  qScale,
944  qOffset,
945  outputExpectedData);
946 }
947 
949  armnn::IWorkloadFactory& workloadFactory,
951 {
952  return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
953 }
954 
956  armnn::IWorkloadFactory& workloadFactory,
958 {
959  return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
960 }
961 
963  armnn::IWorkloadFactory& workloadFactory,
965 {
966  return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
967 }
968 
969 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
971  armnn::IWorkloadFactory& workloadFactory,
973  float qScale,
974  int32_t qOffset)
975 {
976  std::vector<float> inputData = {
977  -0.1f, -0.2f, -0.3f, -0.4f,
978  0.1f, 0.2f, 0.3f, 0.4f,
979  -1.0f, -2.0f, -3.0f, -4.0f,
980  1.0f, 2.0f, 3.0f, 4.0f
981  };
982 
983  const float a = 2.0f;
984  const float b = 3.0f;
985  // Calculate output values for input.
986  auto f = [a, b](float value)
987  {
988  return a * tanhf(b * value);
989  };
990  std::vector<float> outputExpectedData(inputData.size());
991  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
992 
993  return SimpleActivationTest<ArmnnType>(workloadFactory,
994  memoryManager,
996  a,
997  b,
998  qScale,
999  qOffset,
1000  inputData,
1001  qScale,
1002  qOffset,
1003  outputExpectedData);
1004 }
1005 
1007  armnn::IWorkloadFactory& workloadFactory,
1009 {
1010  return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1011 }
1012 
1014  armnn::IWorkloadFactory& workloadFactory,
1016 {
1017  return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1018 }
1019 
1021  armnn::IWorkloadFactory& workloadFactory,
1023 {
1024  return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1025 }
1026 
1027 
1028 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1030  armnn::IWorkloadFactory& workloadFactory,
1032  float qScale,
1033  int32_t qOffset)
1034 {
1035  std::vector<float> inputData = {
1036  -0.1f, -0.2f, -0.3f, -0.4f,
1037  0.1f, 0.2f, 0.3f, 0.4f,
1038  -1.0f, -2.0f, -3.0f, -4.0f,
1039  1.0f, 2.0f, 3.0f, 4.0f
1040  };
1041 
1042 
1043  const float a = 0.01f;
1044  // Calculate output values for input.
1045  auto f = [a](float value)
1046  {
1047  return (value >= 0) ? value : a * (expf(value) - 1);
1048  };
1049  std::vector<float> outputExpectedData(inputData.size());
1050  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1051 
1052  return SimpleActivationTest<ArmnnType>(workloadFactory,
1053  memoryManager,
1055  a,
1056  0.0f,
1057  qScale,
1058  qOffset,
1059  inputData,
1060  qScale,
1061  qOffset,
1062  outputExpectedData);
1063 }
1064 
1066  armnn::IWorkloadFactory& workloadFactory,
1068 {
1069  return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1070 }
1071 
1073  armnn::IWorkloadFactory& workloadFactory,
1075 {
1076  return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1077 }
1078 
1080  armnn::IWorkloadFactory& workloadFactory,
1082 {
1083  return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1084 }
1085 
1086 
1087 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1089  armnn::IWorkloadFactory& workloadFactory,
1091  float qScale,
1092  int32_t qOffset)
1093 {
1094  std::vector<float> inputData = {
1095  -0.1f, -0.2f, -0.3f, -0.4f,
1096  0.1f, 0.2f, 0.3f, 0.4f,
1097  -1.0f, -2.0f, -3.0f, -4.0f,
1098  1.0f, 2.0f, 3.0f, 4.0f
1099  };
1100  // Calculate output values for input.
1101  auto f = [](float x)
1102  {
1103  // Break down the calculation to help with verification.
1104  // hard_swish(x) = x * relu6(x+3) / 6
1105  // relu6(x) = min(max(x,0),6)
1106  float reLu6_step1 = std::max((x + 3),0.0f);
1107  float reLu6Complete = std::min(reLu6_step1, 6.0f);
1108  float hardSwish_step1 = x * reLu6Complete;
1109  float result = hardSwish_step1 / 6;
1110  return result;
1111  };
1112  std::vector<float> outputExpectedData(inputData.size());
1113  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1114 
1115  return SimpleActivationTest<ArmnnType>(workloadFactory,
1116  memoryManager,
1118  0.f,
1119  0.f,
1120  qScale,
1121  qOffset,
1122  inputData,
1123  qScale,
1124  qOffset,
1125  outputExpectedData);
1126 }
1127 
1129  armnn::IWorkloadFactory& workloadFactory,
1131 {
1132  return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1133 }
1134 
1136  armnn::IWorkloadFactory& workloadFactory,
1138 {
1139  return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1140 }
1141 
1143  armnn::IWorkloadFactory& workloadFactory,
1145 {
1146  return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1147 }
1148 
1149 
1150 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1152  armnn::IWorkloadFactory& workloadFactory,
1154  armnn::IWorkloadFactory& refWorkloadFactory,
1156  unsigned int batchSize = 5,
1157  float qScale = 0.0f,
1158  int32_t qOffset = 0)
1159 {
1160  IgnoreUnused(memoryManager);
1161  unsigned int width = 17;
1162  unsigned int height = 29;
1163  unsigned int channels = 2;
1164 
1165  float a = 0.234f;
1166  float b = -12.345f;
1167 
1168  armnn::TensorInfo inputTensorInfo;
1169  armnn::TensorInfo outputTensorInfo;
1170 
1171  unsigned int shape[] = {batchSize, channels, height, width};
1172 
1173  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1174  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1175 
1176  // Set quantization parameters if the requested type is a quantized type.
1177  if(armnn::IsQuantizedType<T>())
1178  {
1179  inputTensorInfo.SetQuantizationScale(qScale);
1180  inputTensorInfo.SetQuantizationOffset(qOffset);
1181  outputTensorInfo.SetQuantizationScale(qScale);
1182  outputTensorInfo.SetQuantizationOffset(qOffset);
1183  }
1184 
1185  float minVal = -10.f;
1187  {
1188  minVal = 0.f;
1189  }
1190 
1191  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1192 
1193 
1194  LayerTestResult<T,4> ret(outputTensorInfo);
1195  auto boostArrayExtents = boost::extents
1196  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1197  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1198  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1199  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1200  ret.output.resize(boostArrayExtents);
1201  ret.outputExpected.resize(boostArrayExtents);
1202 
1204  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1205  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1206 
1207  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1208  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1210 
1212  armnn::WorkloadInfo info;
1213  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1214  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1215  data.m_Parameters.m_A = a;
1216  data.m_Parameters.m_B = b;
1217  data.m_Parameters.m_Function = f;
1218 
1219  armnn::ActivationQueueDescriptor refData = data;
1220  armnn::WorkloadInfo refInfo = info;
1221  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1222  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1223 
1224  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1225  ARMNN_ASSERT(workload != nullptr);
1226  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1227  ARMNN_ASSERT(workloadRef != nullptr);
1228 
1229  inputHandle->Allocate();
1230  outputHandle->Allocate();
1231  inputHandleRef->Allocate();
1232  outputHandleRef->Allocate();
1233 
1234  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1235  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1236 
1237  workload->Execute();
1238  workloadRef->Execute();
1239 
1240  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1241  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1242 
1243  return ret;
1244 }
1245 
1247  armnn::IWorkloadFactory& workloadFactory,
1249  armnn::IWorkloadFactory& refWorkloadFactory,
1251  unsigned int batchSize)
1252 {
1253  return CompareActivationTestImpl<armnn::DataType::Float32>(
1254  workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
1255 }
1256 
1258  armnn::IWorkloadFactory& workloadFactory,
1260  armnn::IWorkloadFactory& refWorkloadFactory,
1262 {
1263  return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1264  workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
1265 }
1266 
1268  armnn::IWorkloadFactory& workloadFactory,
1270  armnn::IWorkloadFactory& refWorkloadFactory,
1272 {
1273  return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1274  workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
1275 }
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
boost::multi_array< T, n > outputExpected
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f)
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:45
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:47
TensorInfo GetInputTensorInfo(const Network *network)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ActivationFunction
Definition: Types.hpp:55
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)