ArmNN
 20.02
ActivationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ActivationTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 
15 
16 #include <test/TensorHelpers.hpp>
17 
18 #include <boost/multi_array.hpp>
19 
20 #include <algorithm>
21 
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
24  armnn::IWorkloadFactory& workloadFactory,
26  float upperBound,
27  float lowerBound,
28  float inputScale,
29  int32_t inputOffset,
30  float outputScale,
31  int32_t outputOffset,
32  const std::vector<T>& inputData,
33  const std::vector<T>& outputExpectedData,
34  unsigned int inputWidth,
35  unsigned int inputHeight,
36  unsigned int inputChannels,
37  unsigned int inputBatchSize)
38 {
39  IgnoreUnused(memoryManager);
40  unsigned int outputWidth = inputWidth;
41  unsigned int outputHeight = inputHeight;
42  unsigned int outputChannels = inputChannels;
43  unsigned int outputBatchSize = inputBatchSize;
44 
45  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
46 
47  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
48 
49  if(armnn::IsQuantizedType<T>())
50  {
51  inputTensorInfo.SetQuantizationScale(inputScale);
52  inputTensorInfo.SetQuantizationOffset(inputOffset);
53 
54  outputTensorInfo.SetQuantizationScale(outputScale);
55  outputTensorInfo.SetQuantizationOffset(outputOffset);
56  }
57 
58  LayerTestResult<T, 4> result(inputTensorInfo);
59 
60  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
61 
62  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
63  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
64 
65  // Setup bounded ReLu.
67  armnn::WorkloadInfo workloadInfo;
68  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
69  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
70 
72  descriptor.m_Parameters.m_A = upperBound;
73  descriptor.m_Parameters.m_B = lowerBound;
74 
75  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
76 
77  inputHandle->Allocate();
78  outputHandle->Allocate();
79 
80  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
81 
82  workload->Execute();
83 
84  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
85 
86  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
87 
88  return result;
89 }
90 
92  armnn::IWorkloadFactory& workloadFactory,
94 {
95  unsigned int inputWidth = 4u;
96  unsigned int inputHeight = 5u;
97  unsigned int inputChannels = 1u;
98  unsigned int inputBatchSize = 1;
99 
100  std::vector<float> input = std::vector<float>{
101  -2.0f, 0.1f, 0.5f, 1.25f,
102  0.786f, 0.9875f, -1.5f, 0.384f,
103  1.0001f, 3.5f, 7.5f, 0.896f,
104  2.126f, 2.0f, 0.3f, 0.15f,
105  0.999f, 1.2f, 0.89f, 6.1f,
106  };
107 
108  // Calculated manually.
109  std::vector<float> output = std::vector<float>{
110  -1.0f, 0.1f, 0.5f, 1.0f,
111  0.786f, 0.9875f, -1.0f, 0.384f,
112  1.0f, 1.0f, 1.0f, 0.896f,
113  1.0f, 1.0f, 0.3f, 0.15f,
114  0.999f, 1.0f, 0.89f, 1.0f,
115  };
116 
117  return BoundedReLuTestCommon<armnn::DataType::Float32>(
118  workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
119  inputWidth, inputHeight, inputChannels, inputBatchSize);
120 }
121 
123  armnn::IWorkloadFactory& workloadFactory,
125 {
126  unsigned int inputWidth = 4u;
127  unsigned int inputHeight = 5u;
128  unsigned int inputChannels = 1u;
129  unsigned int inputBatchSize = 1;
130 
131  std::vector<float> input = std::vector<float>{
132  -1.0f, 0.1f, 0.5f, 6.25f,
133  0.786f, 5.9875f, -0.5f, 0.384f,
134  6.0001f, 3.5f, 7.5f, 0.896f,
135  2.126f, 12.0f, 0.3f, 0.15f,
136  0.999f, 1.2f, 0.89f, 6.1f,
137  };
138 
139  // Calculated manually.
140  std::vector<float> output = std::vector<float>{
141  0.0f, 0.1f, 0.5f, 6.0f,
142  0.786f, 5.9875f, 0.0f, 0.384f,
143  6.0f, 3.5f, 6.0f, 0.896f,
144  2.126f, 6.0f, 0.3f, 0.15f,
145  0.999f, 1.2f, 0.89f, 6.0f,
146  };
147 
148  return BoundedReLuTestCommon<armnn::DataType::Float32>(
149  workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
150  inputWidth, inputHeight, inputChannels, inputBatchSize);
151 }
152 
154  armnn::IWorkloadFactory& workloadFactory,
156 {
157  unsigned int inputWidth = 3u;
158  unsigned int inputHeight = 2u;
159  unsigned int inputChannels = 1u;
160  unsigned int inputBatchSize = 1;
161 
162  std::vector<uint8_t> input = std::vector<uint8_t>{
163  51, 124, 28,
164  251, 8, 92
165  };
166 
167  // Calculated manually.
168  std::vector<uint8_t> output = std::vector<uint8_t>{
169  0, 122, 0,
170  255, 0, 58
171  };
172 
173  float inputScale = 12.0f / 255.0f;
174  int32_t inputOffset = 63;
175  float outputScale = 6.0f / 255.0f;
176  int32_t outputOffset = 0;
177 
178  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
179  workloadFactory, memoryManager, 6.0f, 0.0f,
180  inputScale, inputOffset, outputScale, outputOffset,
181  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
182 }
183 
185  armnn::IWorkloadFactory& workloadFactory,
187 {
188  unsigned int inputWidth = 3u;
189  unsigned int inputHeight = 2u;
190  unsigned int inputChannels = 1u;
191  unsigned int inputBatchSize = 1;
192 
193  std::vector<uint8_t> input = std::vector<uint8_t>{
194  51, 230, 28,
195  251, 8, 92
196  };
197 
198  // Calculated manually.
199  std::vector<uint8_t> output = std::vector<uint8_t>{
200  51, 192, 32,
201  192, 32, 92
202  };
203 
204  int32_t inputOffset = 112;
205  float inputScale = 0.0125f;
206 
207  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
208  workloadFactory, memoryManager, 1.0f, -1.0f,
209  inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
210  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
211 }
212 
213 namespace
214 {
215 
216 struct BoundedReLuRandomInputTestTraits
217 {
218  constexpr static unsigned int inputHeight = 31u;
219  constexpr static unsigned int inputWidth = 19u;
220  constexpr static unsigned int inputChannels = 4u;
221  constexpr static unsigned int inputBatchSize = 2;
222 
223  constexpr static unsigned int outputHeight = inputHeight;
224  constexpr static unsigned int outputWidth = inputWidth;
225  constexpr static unsigned int outputChannels = inputChannels;
226  constexpr static unsigned int outputBatchSize = inputBatchSize;
227 
229  {
230  return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
232  }
233 
234  static armnn::TensorInfo GetOutputTensorInfo()
235  {
236  return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
238  }
239 };
240 
241 boost::multi_array<float, 4> BoundedReLuRandomInputTest(
242  armnn::IWorkloadFactory& workloadFactory,
244  float lowerBound,
245  float upperBound,
246  const armnn::ActivationDescriptor& activationDescriptor)
247 {
248  IgnoreUnused(memoryManager);
250  const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
251 
252  boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
253 
254  // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
255  // range [lowerBound, upperBound].
256  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
257 
258  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
259  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
260 
261  // Set up bounded ReLu.
263  armnn::WorkloadInfo workloadInfo;
264  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
265  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
266  descriptor.m_Parameters = activationDescriptor;
267 
268  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
269 
270  inputHandle->Allocate();
271  outputHandle->Allocate();
272 
273  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
274 
275  workload->Execute();
276 
277  CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
278 
279  return output;
280 }
281 
282 } // namespace
283 
285  armnn::IWorkloadFactory& workloadFactory,
287  armnn::IWorkloadFactory& refWorkloadFactory,
288  float upperBound,
289  float lowerBound)
290 {
291  LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
292 
293  armnn::ActivationDescriptor activationDescriptor;
294  activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
295  activationDescriptor.m_A = upperBound;
296  activationDescriptor.m_B = lowerBound;
297 
298  result.output = BoundedReLuRandomInputTest(
299  workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
300  result.outputExpected = BoundedReLuRandomInputTest(
301  refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
302 
303  return result;
304 }
305 
306 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
308  armnn::IWorkloadFactory& workloadFactory,
310  float qScale = 0.0f,
311  int32_t qOffset = 0)
312 {
313  IgnoreUnused(memoryManager);
314  unsigned int inputHeight = 20;
315  unsigned int inputWidth = 17;
316  unsigned int inputChannels = 3;
317  unsigned int batchSize = 5;
318 
319  armnn::TensorInfo inputTensorInfo;
320  armnn::TensorInfo outputTensorInfo;
321 
322  unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
323 
324  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
325  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
326 
327  // Set quantization parameters if the requested type is a quantized type.
328  if(armnn::IsQuantizedType<T>())
329  {
330  inputTensorInfo.SetQuantizationScale(qScale);
331  inputTensorInfo.SetQuantizationOffset(qOffset);
332  outputTensorInfo.SetQuantizationScale(qScale);
333  outputTensorInfo.SetQuantizationOffset(qOffset);
334  }
335 
336  LayerTestResult<T, 4> ret(outputTensorInfo);
337 
338  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
339  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
340 
341  // Do linear activation that should leave the tensor unchanged.
343  armnn::WorkloadInfo info;
344  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
345  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
346  data.m_Parameters.m_A = 1.0f;
347  data.m_Parameters.m_B = 0.0f;
349 
350  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
351 
352  inputHandle->Allocate();
353  outputHandle->Allocate();
354 
355  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
356  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
357 
358  workload->Execute();
359 
360  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
361 
362  // Ensure output equals input.
363  ret.outputExpected = input;
364 
365  return ret;
366 }
367 
369  armnn::IWorkloadFactory& workloadFactory,
371 {
372  return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
373 }
374 
376  armnn::IWorkloadFactory& workloadFactory,
378 {
379  return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
380  workloadFactory, memoryManager, 4.0f, 3);
381 }
382 
384  armnn::IWorkloadFactory& workloadFactory,
386 {
387  return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
388  workloadFactory, memoryManager, 0.1f, 0);
389 }
390 
391 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
393  armnn::IWorkloadFactory& workloadFactory,
395  armnn::ActivationFunction activationFunction,
396  float activationParameterA,
397  float activationParameterB,
398  float scale,
399  int32_t offset,
400  const std::vector<float>& inputData,
401  float outScale,
402  int32_t outOffset,
403  const std::vector<float>& outputExpectedData)
404 {
405  IgnoreUnused(memoryManager);
406  constexpr static unsigned int inputWidth = 16u;
407  constexpr static unsigned int inputHeight = 1u;
408  constexpr static unsigned int inputChannels = 1u;
409  constexpr static unsigned int inputBatchSize = 1u;
410 
411  constexpr static unsigned int outputWidth = inputWidth;
412  constexpr static unsigned int outputHeight = inputHeight;
413  constexpr static unsigned int outputChannels = inputChannels;
414  constexpr static unsigned int outputBatchSize = inputBatchSize;
415 
416  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
417  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
418 
419  // Set quantization parameters if the requested type is a quantized type.
420  if(armnn::IsQuantizedType<T>())
421  {
422  inputTensorInfo.SetQuantizationScale(scale);
423  inputTensorInfo.SetQuantizationOffset(offset);
424  outputTensorInfo.SetQuantizationScale(outScale);
425  outputTensorInfo.SetQuantizationOffset(outOffset);
426  }
427 
428  LayerTestResult<T, 4> result(inputTensorInfo);
429 
430  auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
431 
432  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
433  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
434 
435  // Setup bounded ReLu.
437  armnn::WorkloadInfo workloadInfo;
438  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
439  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
440 
441  descriptor.m_Parameters.m_Function = activationFunction;
442  descriptor.m_Parameters.m_A = activationParameterA;
443  descriptor.m_Parameters.m_B = activationParameterB;
444 
445  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
446 
447  inputHandle->Allocate();
448  outputHandle->Allocate();
449 
450  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
451 
452  workload->Execute();
453 
454  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
455 
456  // Calculated manually.
457  result.outputExpected =
458  MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
459 
460  return result;
461 }
462 
463 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
465  armnn::IWorkloadFactory& workloadFactory,
467  float qScale,
468  int32_t qOffset)
469 {
470  std::vector<float> inputData =
471  {
472  -0.1f, -0.2f, -0.3f, -0.4f,
473  0.1f, 0.2f, 0.3f, 0.4f,
474  -1.0f, -2.0f, -3.0f, -4.0f,
475  1.0f, 2.0f, 3.0f, 4.0f
476  };
477 
478  // Calculate output values for input.
479  auto f = [](float value)
480  {
481  return 1.0f / (1.0f + std::exp(-value));
482  };
483  std::vector<float> outputExpectedData(inputData.size());
484  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
485 
486  return SimpleActivationTest<ArmnnType>(workloadFactory,
487  memoryManager,
489  0.f,
490  0.f,
491  qScale,
492  qOffset,
493  inputData,
494  1.f / 256.f,
495  0,
496  outputExpectedData);
497 }
498 
500  armnn::IWorkloadFactory& workloadFactory,
502 {
503  return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
504 }
505 
507  armnn::IWorkloadFactory& workloadFactory,
509 {
510  return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 50);
511 }
512 
514  armnn::IWorkloadFactory& workloadFactory,
516 {
517  return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
518 }
519 
520 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
522  armnn::IWorkloadFactory& workloadFactory,
524  float qScale,
525  int32_t qOffset)
526 {
527  std::vector<float> inputData = {
528  -0.1f, -0.2f, -0.3f, -0.4f,
529  0.1f, 0.2f, 0.3f, 0.4f,
530  -1.0f, -2.0f, -3.0f, -4.0f,
531  1.0f, 2.0f, 3.0f, 4.0f
532  };
533 
534  // Calculate output values for input.
535  auto f = [](float value)
536  {
537  return std::fmax(0.0f, value);
538  };
539  std::vector<float> outputExpectedData(inputData.size());
540  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
541 
542  return SimpleActivationTest<ArmnnType>(workloadFactory,
543  memoryManager,
545  0.f,
546  0.f,
547  qScale,
548  qOffset,
549  inputData,
550  qScale,
551  qOffset,
552  outputExpectedData);
553 }
554 
556  armnn::IWorkloadFactory& workloadFactory,
558 {
559  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
560 }
561 
562 
564  armnn::IWorkloadFactory& workloadFactory,
566 {
567  return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 0);
568 }
569 
571  armnn::IWorkloadFactory& workloadFactory,
573 {
574  return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
575 }
576 
577 
578 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
580  armnn::IWorkloadFactory& workloadFactory,
582  float qScale,
583  int32_t qOffset)
584 {
585  std::vector<float> inputData = {
586  -0.1f, -0.2f, -0.3f, -0.4f,
587  0.1f, 0.2f, 0.3f, 0.4f,
588  -1.0f, -2.0f, -3.0f, -4.0f,
589  1.0f, 2.0f, 3.0f, 4.0f
590  };
591  const float a = 1.0f;
592  const float b = -1.0f;
593  // Calculate output values for input.
594  auto f = [a, b](float value)
595  {
596  return std::min(a, std::max(b, value));
597  };
598  std::vector<float> outputExpectedData(inputData.size());
599  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
600 
601  return SimpleActivationTest<ArmnnType>(workloadFactory,
602  memoryManager,
604  a,
605  b,
606  qScale,
607  qOffset,
608  inputData,
609  qScale,
610  qOffset,
611  outputExpectedData);
612 }
613 
615  armnn::IWorkloadFactory& workloadFactory,
617 {
618  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
619 }
620 
621 
622 
623 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
625  armnn::IWorkloadFactory& workloadFactory,
627  float qScale,
628  int32_t qOffset)
629 {
630  std::vector<float> inputData = {
631  -0.1f, -0.2f, -0.3f, -0.4f,
632  0.1f, 0.2f, 0.3f, 0.4f,
633  -1.0f, -2.0f, -3.0f, -4.0f,
634  1.0f, 2.0f, 3.0f, 4.0f
635  };
636 
637  // Calculate output values for input.
638  auto f = [](float value)
639  {
640  return std::log(1.0f + std::exp(value));
641  };
642  std::vector<float> outputExpectedData(inputData.size());
643  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
644 
645  return SimpleActivationTest<ArmnnType>(workloadFactory,
646  memoryManager,
648  0.f,
649  0.f,
650  qScale,
651  qOffset,
652  inputData,
653  qScale,
654  qOffset,
655  outputExpectedData);
656 }
657 
659  armnn::IWorkloadFactory& workloadFactory,
661 {
662  return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
663 }
664 
666  armnn::IWorkloadFactory& workloadFactory,
668 {
669  return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
670 }
671 
673  armnn::IWorkloadFactory& workloadFactory,
675 {
676  return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
677 }
678 
679 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
681  armnn::IWorkloadFactory& workloadFactory,
683  float qScale,
684  int32_t qOffset)
685 {
686  std::vector<float> inputData = {
687  -0.1f, -0.2f, -0.3f, -0.4f,
688  0.1f, 0.2f, 0.3f, 0.4f,
689  -1.0f, -2.0f, -3.0f, -4.0f,
690  1.0f, 2.0f, 3.0f, 4.0f
691  };
692 
693  const float a = 0.01f;
694  // Calculate output values for input.
695  auto f = [a](float value)
696  {
697  return value > 0.0f ? value : (value * a);
698  };
699  std::vector<float> outputExpectedData(inputData.size());
700  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
701 
702  return SimpleActivationTest<ArmnnType>(workloadFactory,
703  memoryManager,
705  a,
706  0.f,
707  qScale,
708  qOffset,
709  inputData,
710  qScale,
711  qOffset,
712  outputExpectedData);
713 }
714 
716  armnn::IWorkloadFactory& workloadFactory,
718 {
719  return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
720 }
721 
723  armnn::IWorkloadFactory& workloadFactory,
725 {
726  return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
727 }
728 
730  armnn::IWorkloadFactory& workloadFactory,
732 {
733  return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
734 }
735 
736 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
738  armnn::IWorkloadFactory& workloadFactory,
740  float qScale,
741  int32_t qOffset)
742 {
743  std::vector<float> inputData = {
744  -0.1f, -0.2f, -0.3f, -0.4f,
745  0.1f, 0.2f, 0.3f, 0.4f,
746  -1.0f, -2.0f, -3.0f, -4.0f,
747  1.0f, 2.0f, 3.0f, 4.0f
748  };
749 
750  // Calculate output values for input.
751  auto f = [](float value)
752  {
753  return std::abs(value);
754  };
755  std::vector<float> outputExpectedData(inputData.size());
756  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
757 
758  return SimpleActivationTest<ArmnnType>(workloadFactory,
759  memoryManager,
761  0.f,
762  0.f,
763  qScale,
764  qOffset,
765  inputData,
766  qScale,
767  qOffset,
768  outputExpectedData);
769 }
770 
772  armnn::IWorkloadFactory& workloadFactory,
774 {
775  return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
776 }
777 
779  armnn::IWorkloadFactory& workloadFactory,
781 {
782  return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
783 }
784 
786  armnn::IWorkloadFactory& workloadFactory,
788 {
789  return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
790 }
791 
793  armnn::IWorkloadFactory& workloadFactory,
795 {
796  IgnoreUnused(memoryManager);
797  const int inputDataSize = 120;
798  std::vector<float> inputData(inputDataSize);
799 
800  for (unsigned int i = 0u; i < inputDataSize; ++i)
801  {
802  inputData[i] = static_cast<float>(i) / 10;
803  }
804 
805  auto f = [](float value)
806  {
807  return std::sqrt(value);
808  };
809  std::vector<float> outputExpectedData(inputDataSize);
810  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
811 
812  armnn::TensorInfo inputTensorInfo(
813  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
814  armnn::TensorInfo outputTensorInfo(
815  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
816 
817  LayerTestResult<float, 5> result(inputTensorInfo);
818 
819  auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
820 
821  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
822  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
823 
825  armnn::WorkloadInfo workloadInfo;
826  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
827  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
828 
830 
831  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
832 
833  inputHandle->Allocate();
834  outputHandle->Allocate();
835 
836  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]);
837 
838  workload->Execute();
839 
840  CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
841 
842  // Calculated manually.
843  result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
844 
845  return result;
846 };
847 
848 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
850  armnn::IWorkloadFactory& workloadFactory,
852  float qScale,
853  int32_t qOffset)
854 {
855  std::vector<float> inputData = {
856  0.1f, 0.2f, 0.3f, 0.4f,
857  0.1f, 0.2f, 0.3f, 0.4f,
858  1.0f, 2.0f, 3.0f, 4.0f,
859  1.0f, 2.0f, 3.0f, 4.0f
860  };
861 
862  // Calculate output values for input.
863  auto f = [](float value)
864  {
865  return std::sqrt(value);
866  };
867  std::vector<float> outputExpectedData(inputData.size());
868  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
869 
870  return SimpleActivationTest<ArmnnType>(workloadFactory,
871  memoryManager,
873  0.f,
874  0.f,
875  qScale,
876  qOffset,
877  inputData,
878  qScale,
879  qOffset,
880  outputExpectedData);
881 }
882 
884  armnn::IWorkloadFactory& workloadFactory,
886 {
887  return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
888 }
889 
891  armnn::IWorkloadFactory& workloadFactory,
893 {
894  return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
895 }
896 
898  armnn::IWorkloadFactory& workloadFactory,
900 {
901  return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
902 }
903 
904 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
906  armnn::IWorkloadFactory& workloadFactory,
908  float qScale,
909  int32_t qOffset)
910 {
911  std::vector<float> inputData = {
912  -0.1f, -0.2f, -0.3f, -0.4f,
913  0.1f, 0.2f, 0.3f, 0.4f,
914  -1.0f, -2.0f, -3.0f, -4.0f,
915  1.0f, 2.0f, 3.0f, 4.0f
916  };
917 
918  // Calculate output values for input.
919  auto f = [](float value)
920  {
921  return std::pow(value,2);
922  };
923  std::vector<float> outputExpectedData(inputData.size());
924  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
925 
926  return SimpleActivationTest<ArmnnType>(workloadFactory,
927  memoryManager,
929  0.f,
930  0.f,
931  qScale,
932  qOffset,
933  inputData,
934  qScale,
935  qOffset,
936  outputExpectedData);
937 }
938 
940  armnn::IWorkloadFactory& workloadFactory,
942 {
943  return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
944 }
945 
947  armnn::IWorkloadFactory& workloadFactory,
949 {
950  return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
951 }
952 
954  armnn::IWorkloadFactory& workloadFactory,
956 {
957  return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
958 }
959 
960 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
962  armnn::IWorkloadFactory& workloadFactory,
964  float qScale,
965  int32_t qOffset)
966 {
967  std::vector<float> inputData = {
968  -0.1f, -0.2f, -0.3f, -0.4f,
969  0.1f, 0.2f, 0.3f, 0.4f,
970  -1.0f, -2.0f, -3.0f, -4.0f,
971  1.0f, 2.0f, 3.0f, 4.0f
972  };
973 
974  const float a = 2.0f;
975  const float b = 3.0f;
976  // Calculate output values for input.
977  auto f = [a, b](float value)
978  {
979  return a * tanhf(b * value);
980  };
981  std::vector<float> outputExpectedData(inputData.size());
982  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
983 
984  return SimpleActivationTest<ArmnnType>(workloadFactory,
985  memoryManager,
987  a,
988  b,
989  qScale,
990  qOffset,
991  inputData,
992  qScale,
993  qOffset,
994  outputExpectedData);
995 }
996 
998  armnn::IWorkloadFactory& workloadFactory,
1000 {
1001  return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1002 }
1003 
1005  armnn::IWorkloadFactory& workloadFactory,
1007 {
1008  return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1009 }
1010 
1012  armnn::IWorkloadFactory& workloadFactory,
1014 {
1015  return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1016 }
1017 
1018 
1019 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1021  armnn::IWorkloadFactory& workloadFactory,
1023  float qScale,
1024  int32_t qOffset)
1025 {
1026  std::vector<float> inputData = {
1027  -0.1f, -0.2f, -0.3f, -0.4f,
1028  0.1f, 0.2f, 0.3f, 0.4f,
1029  -1.0f, -2.0f, -3.0f, -4.0f,
1030  1.0f, 2.0f, 3.0f, 4.0f
1031  };
1032 
1033 
1034  const float a = 0.01f;
1035  // Calculate output values for input.
1036  auto f = [a](float value)
1037  {
1038  return (value >= 0) ? value : a * (expf(value) - 1);
1039  };
1040  std::vector<float> outputExpectedData(inputData.size());
1041  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1042 
1043  return SimpleActivationTest<ArmnnType>(workloadFactory,
1044  memoryManager,
1046  a,
1047  0.0f,
1048  qScale,
1049  qOffset,
1050  inputData,
1051  qScale,
1052  qOffset,
1053  outputExpectedData);
1054 }
1055 
1057  armnn::IWorkloadFactory& workloadFactory,
1059 {
1060  return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1061 }
1062 
1064  armnn::IWorkloadFactory& workloadFactory,
1066 {
1067  return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1068 }
1069 
1071  armnn::IWorkloadFactory& workloadFactory,
1073 {
1074  return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1075 }
1076 
1077 
1078 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1080  armnn::IWorkloadFactory& workloadFactory,
1082  float qScale,
1083  int32_t qOffset)
1084 {
1085  std::vector<float> inputData = {
1086  -0.1f, -0.2f, -0.3f, -0.4f,
1087  0.1f, 0.2f, 0.3f, 0.4f,
1088  -1.0f, -2.0f, -3.0f, -4.0f,
1089  1.0f, 2.0f, 3.0f, 4.0f
1090  };
1091  // Calculate output values for input.
1092  auto f = [](float x)
1093  {
1094  // Break down the calculation to help with verification.
1095  // hard_swish(x) = x * relu6(x+3) / 6
1096  // relu6(x) = min(max(x,0),6)
1097  float reLu6_step1 = std::max((x + 3),0.0f);
1098  float reLu6Complete = std::min(reLu6_step1, 6.0f);
1099  float hardSwish_step1 = x * reLu6Complete;
1100  float result = hardSwish_step1 / 6;
1101  return result;
1102  };
1103  std::vector<float> outputExpectedData(inputData.size());
1104  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1105 
1106  return SimpleActivationTest<ArmnnType>(workloadFactory,
1107  memoryManager,
1109  0.f,
1110  0.f,
1111  qScale,
1112  qOffset,
1113  inputData,
1114  qScale,
1115  qOffset,
1116  outputExpectedData);
1117 }
1118 
1120  armnn::IWorkloadFactory& workloadFactory,
1122 {
1123  return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1124 }
1125 
1127  armnn::IWorkloadFactory& workloadFactory,
1129 {
1130  return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1131 }
1132 
1134  armnn::IWorkloadFactory& workloadFactory,
1136 {
1137  return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1138 }
1139 
1140 
1141 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1143  armnn::IWorkloadFactory& workloadFactory,
1145  armnn::IWorkloadFactory& refWorkloadFactory,
1147  unsigned int batchSize = 5,
1148  float qScale = 0.0f,
1149  int32_t qOffset = 0)
1150 {
1151  IgnoreUnused(memoryManager);
1152  unsigned int width = 17;
1153  unsigned int height = 29;
1154  unsigned int channels = 2;
1155 
1156  float a = 0.234f;
1157  float b = -12.345f;
1158 
1159  armnn::TensorInfo inputTensorInfo;
1160  armnn::TensorInfo outputTensorInfo;
1161 
1162  unsigned int shape[] = {batchSize, channels, height, width};
1163 
1164  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1165  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1166 
1167  // Set quantization parameters if the requested type is a quantized type.
1168  if(armnn::IsQuantizedType<T>())
1169  {
1170  inputTensorInfo.SetQuantizationScale(qScale);
1171  inputTensorInfo.SetQuantizationOffset(qOffset);
1172  outputTensorInfo.SetQuantizationScale(qScale);
1173  outputTensorInfo.SetQuantizationOffset(qOffset);
1174  }
1175 
1176  float minVal = -10.f;
1178  {
1179  minVal = 0.f;
1180  }
1181 
1182  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1183 
1184 
1185  LayerTestResult<T,4> ret(outputTensorInfo);
1186  auto boostArrayExtents = boost::extents
1187  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1188  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1189  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1190  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1191  ret.output.resize(boostArrayExtents);
1192  ret.outputExpected.resize(boostArrayExtents);
1193 
1194 
1195  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1196  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1197 
1198  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1199  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1200 
1202  armnn::WorkloadInfo info;
1203  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1204  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1205  data.m_Parameters.m_A = a;
1206  data.m_Parameters.m_B = b;
1207  data.m_Parameters.m_Function = f;
1208 
1209  armnn::ActivationQueueDescriptor refData = data;
1210  armnn::WorkloadInfo refInfo = info;
1211  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1212  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1213 
1214  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1215  BOOST_ASSERT(workload != nullptr);
1216  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1217  BOOST_ASSERT(workloadRef != nullptr);
1218 
1219  inputHandle->Allocate();
1220  outputHandle->Allocate();
1221  inputHandleRef->Allocate();
1222  outputHandleRef->Allocate();
1223 
1224  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1225  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1226 
1227  workload->Execute();
1228  workloadRef->Execute();
1229 
1230  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1231  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1232 
1233  return ret;
1234 }
1235 
1237  armnn::IWorkloadFactory& workloadFactory,
1239  armnn::IWorkloadFactory& refWorkloadFactory,
1241  unsigned int batchSize)
1242 {
1243  return CompareActivationTestImpl<armnn::DataType::Float32>(
1244  workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
1245 }
1246 
1248  armnn::IWorkloadFactory& workloadFactory,
1250  armnn::IWorkloadFactory& refWorkloadFactory,
1252 {
1253  return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1254  workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
1255 }
1256 
1258  armnn::IWorkloadFactory& workloadFactory,
1260  armnn::IWorkloadFactory& refWorkloadFactory,
1262 {
1263  return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1264  workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
1265 }
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
boost::multi_array< T, n > outputExpected
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f)
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
TensorInfo GetInputTensorInfo(const Network *network)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ActivationFunction
Definition: Types.hpp:55
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)