From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- 20.02/_activation_test_impl_8cpp.xhtml | 2492 ++++++++++++++++++++++++++++++++ 1 file changed, 2492 insertions(+) create mode 100644 20.02/_activation_test_impl_8cpp.xhtml (limited to '20.02/_activation_test_impl_8cpp.xhtml') diff --git a/20.02/_activation_test_impl_8cpp.xhtml b/20.02/_activation_test_impl_8cpp.xhtml new file mode 100644 index 0000000000..092b8d7a49 --- /dev/null +++ b/20.02/_activation_test_impl_8cpp.xhtml @@ -0,0 +1,2492 @@ + + + + + + + + + + + + + +ArmNN: src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp File Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
ActivationTestImpl.cpp File Reference
+
+
+
#include "ActivationTestImpl.hpp"
+#include <QuantizeHelper.hpp>
+#include <ResolveType.hpp>
+#include <backendsCommon/test/ActivationFixture.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+#include <test/TensorHelpers.hpp>
+#include <boost/multi_array.hpp>
+#include <algorithm>
+
+

Go to the source code of this file.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > BoundedReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
 
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > CompareBoundedReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float upperBound, float lowerBound)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale=0.0f, int32_t qOffset=0)
 
LayerTestResult< float, 4 > ConstantLinearActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleSigmoidTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SimpleSigmoidTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > ReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< int16_t, 4 > ReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > ReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > ReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > BoundedReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SoftReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SoftReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > SoftReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > LeakyReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > LeakyReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > AbsTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > AbsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > AbsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > AbsInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 5 > SqrtNNTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SqrtTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SqrtTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > SqrtUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > SqrtInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SquareTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SquareTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > SquareUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > SquareInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > TanhTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > TanhTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > TanhUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > TanhInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > EluTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > EluTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > EluUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > EluInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > HardSwishTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > HardSwishTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > HardSwishUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > HardSwishInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > CompareActivationTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
 
LayerTestResult< float, 4 > CompareActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize)
 
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f)
 
LayerTestResult< int16_t, 4 > CompareActivationInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f)
 
+

Function Documentation

+ +

◆ AbsInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> AbsInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 785 of file ActivationTestImpl.cpp.

+
788 {
789  return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
790 }
+
+
+ +

◆ AbsTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> AbsTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 771 of file ActivationTestImpl.cpp.

+
774 {
775  return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
776 }
+
+
+ +

◆ AbsTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> AbsTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 737 of file ActivationTestImpl.cpp.

+ +

References armnn::Abs.

+
742 {
743  std::vector<float> inputData = {
744  -0.1f, -0.2f, -0.3f, -0.4f,
745  0.1f, 0.2f, 0.3f, 0.4f,
746  -1.0f, -2.0f, -3.0f, -4.0f,
747  1.0f, 2.0f, 3.0f, 4.0f
748  };
749 
750  // Calculate output values for input.
751  auto f = [](float value)
752  {
753  return std::abs(value);
754  };
755  std::vector<float> outputExpectedData(inputData.size());
756  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
757 
758  return SimpleActivationTest<ArmnnType>(workloadFactory,
759  memoryManager,
761  0.f,
762  0.f,
763  qScale,
764  qOffset,
765  inputData,
766  qScale,
767  qOffset,
768  outputExpectedData);
769 }
+
+
+
+ +

◆ AbsUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> AbsUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 778 of file ActivationTestImpl.cpp.

+
781 {
782  return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
783 }
+
+
+ +

◆ BoundedReLuInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> BoundedReLuInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 614 of file ActivationTestImpl.cpp.

+
617 {
618  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
619 }
+
+
+ +

◆ BoundedReLuTestCommon() [1/2]

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> BoundedReLuTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float upperBound,
float lowerBound,
float inputScale,
int32_t inputOffset,
float outputScale,
int32_t outputOffset,
const std::vector< T > & inputData,
const std::vector< T > & outputExpectedData,
unsigned int inputWidth,
unsigned int inputHeight,
unsigned int inputChannels,
unsigned int inputBatchSize 
)
+
+ +

Definition at line 23 of file ActivationTestImpl.cpp.

+ +

References armnn::BoundedReLu, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and TensorInfo::SetQuantizationScale().

+
38 {
39  IgnoreUnused(memoryManager);
40  unsigned int outputWidth = inputWidth;
41  unsigned int outputHeight = inputHeight;
42  unsigned int outputChannels = inputChannels;
43  unsigned int outputBatchSize = inputBatchSize;
44 
45  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
46 
47  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
48 
49  if(armnn::IsQuantizedType<T>())
50  {
51  inputTensorInfo.SetQuantizationScale(inputScale);
52  inputTensorInfo.SetQuantizationOffset(inputOffset);
53 
54  outputTensorInfo.SetQuantizationScale(outputScale);
55  outputTensorInfo.SetQuantizationOffset(outputOffset);
56  }
57 
58  LayerTestResult<T, 4> result(inputTensorInfo);
59 
60  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
61 
62  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
63  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
64 
65  // Setup bounded ReLu.
67  armnn::WorkloadInfo workloadInfo;
68  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
69  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
70 
72  descriptor.m_Parameters.m_A = upperBound;
73  descriptor.m_Parameters.m_B = lowerBound;
74 
75  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
76 
77  inputHandle->Allocate();
78  outputHandle->Allocate();
79 
80  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
81 
82  workload->Execute();
83 
84  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
85 
86  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
87 
88  return result;
89 }
+
void IgnoreUnused(Ts &&...)
+
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
+ +
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
+
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+
min(a, max(b, input)) ReLu1 & ReLu6.
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
+
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
+
Contains information about inputs and outputs to a layer.
+ +
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
+
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
+ +
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+
+ +

◆ BoundedReLuTestCommon() [2/2]

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> BoundedReLuTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 579 of file ActivationTestImpl.cpp.

+ +

References armnn::BoundedReLu.

+
584 {
585  std::vector<float> inputData = {
586  -0.1f, -0.2f, -0.3f, -0.4f,
587  0.1f, 0.2f, 0.3f, 0.4f,
588  -1.0f, -2.0f, -3.0f, -4.0f,
589  1.0f, 2.0f, 3.0f, 4.0f
590  };
591  const float a = 1.0f;
592  const float b = -1.0f;
593  // Calculate output values for input.
594  auto f = [a, b](float value)
595  {
596  return std::min(a, std::max(b, value));
597  };
598  std::vector<float> outputExpectedData(inputData.size());
599  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
600 
601  return SimpleActivationTest<ArmnnType>(workloadFactory,
602  memoryManager,
604  a,
605  b,
606  qScale,
607  qOffset,
608  inputData,
609  qScale,
610  qOffset,
611  outputExpectedData);
612 }
min(a, max(b, input)) ReLu1 & ReLu6.
+
+
+
+ +

◆ BoundedReLuUint8UpperAndLowerBoundTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 184 of file ActivationTestImpl.cpp.

+ +

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::GetInputTensorInfo(), armnn::IgnoreUnused(), and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

+
187 {
188  unsigned int inputWidth = 3u;
189  unsigned int inputHeight = 2u;
190  unsigned int inputChannels = 1u;
191  unsigned int inputBatchSize = 1;
192 
193  std::vector<uint8_t> input = std::vector<uint8_t>{
194  51, 230, 28,
195  251, 8, 92
196  };
197 
198  // Calculated manually.
199  std::vector<uint8_t> output = std::vector<uint8_t>{
200  51, 192, 32,
201  192, 32, 92
202  };
203 
204  int32_t inputOffset = 112;
205  float inputScale = 0.0125f;
206 
207  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
208  workloadFactory, memoryManager, 1.0f, -1.0f,
209  inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
210  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
211 }
+
+
+ +

◆ BoundedReLuUint8UpperBoundOnlyTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 153 of file ActivationTestImpl.cpp.

+
156 {
157  unsigned int inputWidth = 3u;
158  unsigned int inputHeight = 2u;
159  unsigned int inputChannels = 1u;
160  unsigned int inputBatchSize = 1;
161 
162  std::vector<uint8_t> input = std::vector<uint8_t>{
163  51, 124, 28,
164  251, 8, 92
165  };
166 
167  // Calculated manually.
168  std::vector<uint8_t> output = std::vector<uint8_t>{
169  0, 122, 0,
170  255, 0, 58
171  };
172 
173  float inputScale = 12.0f / 255.0f;
174  int32_t inputOffset = 63;
175  float outputScale = 6.0f / 255.0f;
176  int32_t outputOffset = 0;
177 
178  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
179  workloadFactory, memoryManager, 6.0f, 0.0f,
180  inputScale, inputOffset, outputScale, outputOffset,
181  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
182 }
+
+
+ +

◆ BoundedReLuUpperAndLowerBoundTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 91 of file ActivationTestImpl.cpp.

+
94 {
95  unsigned int inputWidth = 4u;
96  unsigned int inputHeight = 5u;
97  unsigned int inputChannels = 1u;
98  unsigned int inputBatchSize = 1;
99 
100  std::vector<float> input = std::vector<float>{
101  -2.0f, 0.1f, 0.5f, 1.25f,
102  0.786f, 0.9875f, -1.5f, 0.384f,
103  1.0001f, 3.5f, 7.5f, 0.896f,
104  2.126f, 2.0f, 0.3f, 0.15f,
105  0.999f, 1.2f, 0.89f, 6.1f,
106  };
107 
108  // Calculated manually.
109  std::vector<float> output = std::vector<float>{
110  -1.0f, 0.1f, 0.5f, 1.0f,
111  0.786f, 0.9875f, -1.0f, 0.384f,
112  1.0f, 1.0f, 1.0f, 0.896f,
113  1.0f, 1.0f, 0.3f, 0.15f,
114  0.999f, 1.0f, 0.89f, 1.0f,
115  };
116 
117  return BoundedReLuTestCommon<armnn::DataType::Float32>(
118  workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
119  inputWidth, inputHeight, inputChannels, inputBatchSize);
120 }
+
+
+ +

◆ BoundedReLuUpperBoundOnlyTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 122 of file ActivationTestImpl.cpp.

+
125 {
126  unsigned int inputWidth = 4u;
127  unsigned int inputHeight = 5u;
128  unsigned int inputChannels = 1u;
129  unsigned int inputBatchSize = 1;
130 
131  std::vector<float> input = std::vector<float>{
132  -1.0f, 0.1f, 0.5f, 6.25f,
133  0.786f, 5.9875f, -0.5f, 0.384f,
134  6.0001f, 3.5f, 7.5f, 0.896f,
135  2.126f, 12.0f, 0.3f, 0.15f,
136  0.999f, 1.2f, 0.89f, 6.1f,
137  };
138 
139  // Calculated manually.
140  std::vector<float> output = std::vector<float>{
141  0.0f, 0.1f, 0.5f, 6.0f,
142  0.786f, 5.9875f, 0.0f, 0.384f,
143  6.0f, 3.5f, 6.0f, 0.896f,
144  2.126f, 6.0f, 0.3f, 0.15f,
145  0.999f, 1.2f, 0.89f, 6.0f,
146  };
147 
148  return BoundedReLuTestCommon<armnn::DataType::Float32>(
149  workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
150  inputWidth, inputHeight, inputChannels, inputBatchSize);
151 }
+
+
+ +

◆ CompareActivationInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t,4> CompareActivationInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
armnn::IWorkloadFactoryrefWorkloadFactory,
armnn::ActivationFunction f 
)
+
+ +

Definition at line 1257 of file ActivationTestImpl.cpp.

+
1262 {
1263  return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1264  workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
1265 }
+
+
+ +

◆ CompareActivationTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<float,4> CompareActivationTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
armnn::IWorkloadFactoryrefWorkloadFactory,
armnn::ActivationFunction f,
unsigned int batchSize 
)
+
+ +

Definition at line 1236 of file ActivationTestImpl.cpp.

+
1242 {
1243  return CompareActivationTestImpl<armnn::DataType::Float32>(
1244  workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
1245 }
+
+
+ +

◆ CompareActivationTestImpl()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T,4> CompareActivationTestImpl (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
armnn::IWorkloadFactoryrefWorkloadFactory,
armnn::ActivationFunction f,
unsigned int batchSize = 5,
float qScale = 0.0f,
int32_t qOffset = 0 
)
+
+ +

Definition at line 1142 of file ActivationTestImpl.cpp.

+ +

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, armnn::numeric_cast(), TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Sqrt.

+
1150 {
1151  IgnoreUnused(memoryManager);
1152  unsigned int width = 17;
1153  unsigned int height = 29;
1154  unsigned int channels = 2;
1155 
1156  float a = 0.234f;
1157  float b = -12.345f;
1158 
1159  armnn::TensorInfo inputTensorInfo;
1160  armnn::TensorInfo outputTensorInfo;
1161 
1162  unsigned int shape[] = {batchSize, channels, height, width};
1163 
1164  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1165  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1166 
1167  // Set quantization parameters if the requested type is a quantized type.
1168  if(armnn::IsQuantizedType<T>())
1169  {
1170  inputTensorInfo.SetQuantizationScale(qScale);
1171  inputTensorInfo.SetQuantizationOffset(qOffset);
1172  outputTensorInfo.SetQuantizationScale(qScale);
1173  outputTensorInfo.SetQuantizationOffset(qOffset);
1174  }
1175 
1176  float minVal = -10.f;
1178  {
1179  minVal = 0.f;
1180  }
1181 
1182  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1183 
1184 
1185  LayerTestResult<T,4> ret(outputTensorInfo);
1186  auto boostArrayExtents = boost::extents
1187  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1188  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1189  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1190  [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1191  ret.output.resize(boostArrayExtents);
1192  ret.outputExpected.resize(boostArrayExtents);
1193 
1194 
1195  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1196  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1197 
1198  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
1199  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
1200 
1203  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1204  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1205  data.m_Parameters.m_A = a;
1206  data.m_Parameters.m_B = b;
1207  data.m_Parameters.m_Function = f;
1208 
1209  armnn::ActivationQueueDescriptor refData = data;
1210  armnn::WorkloadInfo refInfo = info;
1211  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1212  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1213 
1214  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1215  BOOST_ASSERT(workload != nullptr);
1216  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1217  BOOST_ASSERT(workloadRef != nullptr);
1218 
1219  inputHandle->Allocate();
1220  outputHandle->Allocate();
1221  inputHandleRef->Allocate();
1222  outputHandleRef->Allocate();
1223 
1224  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1225  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1226 
1227  workload->Execute();
1228  workloadRef->Execute();
1229 
1230  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1231  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1232 
1233  return ret;
1234 }
+ +
void IgnoreUnused(Ts &&...)
+
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
+ +
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
+
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
+
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
+
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
+ +
Contains information about inputs and outputs to a layer.
+ +
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
+
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
+
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
+ +
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+
+ +

◆ CompareActivationUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t,4> CompareActivationUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
armnn::IWorkloadFactoryrefWorkloadFactory,
armnn::ActivationFunction f 
)
+
+ +

Definition at line 1247 of file ActivationTestImpl.cpp.

+
1252 {
1253  return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1254  workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
1255 }
+
+
+ +

◆ CompareBoundedReLuTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> CompareBoundedReLuTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
armnn::IWorkloadFactoryrefWorkloadFactory,
float upperBound,
float lowerBound 
)
+
+ +

Definition at line 284 of file ActivationTestImpl.cpp.

+ +

References armnn::BoundedReLu, ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, LayerTestResult< T, n >::output, and LayerTestResult< T, n >::outputExpected.

+
290 {
291  LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
292 
293  armnn::ActivationDescriptor activationDescriptor;
294  activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
295  activationDescriptor.m_A = upperBound;
296  activationDescriptor.m_B = lowerBound;
297 
298  result.output = BoundedReLuRandomInputTest(
299  workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
300  result.outputExpected = BoundedReLuRandomInputTest(
301  refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
302 
303  return result;
304 }
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
+
min(a, max(b, input)) ReLu1 & ReLu6.
+
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
+ +
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
+
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
+
+
+
+ +

◆ ConstantLinearActivationInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 383 of file ActivationTestImpl.cpp.

+
386 {
387  return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
388  workloadFactory, memoryManager, 0.1f, 0);
389 }
+
+
+ +

◆ ConstantLinearActivationTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> ConstantLinearActivationTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 368 of file ActivationTestImpl.cpp.

+
371 {
372  return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
373 }
+
+
+ +

◆ ConstantLinearActivationTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T,4> ConstantLinearActivationTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale = 0.0f,
int32_t qOffset = 0 
)
+
+ +

Definition at line 307 of file ActivationTestImpl.cpp.

+ +

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::Linear, ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

+
312 {
313  IgnoreUnused(memoryManager);
314  unsigned int inputHeight = 20;
315  unsigned int inputWidth = 17;
316  unsigned int inputChannels = 3;
317  unsigned int batchSize = 5;
318 
319  armnn::TensorInfo inputTensorInfo;
320  armnn::TensorInfo outputTensorInfo;
321 
322  unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
323 
324  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
325  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
326 
327  // Set quantization parameters if the requested type is a quantized type.
328  if(armnn::IsQuantizedType<T>())
329  {
330  inputTensorInfo.SetQuantizationScale(qScale);
331  inputTensorInfo.SetQuantizationOffset(qOffset);
332  outputTensorInfo.SetQuantizationScale(qScale);
333  outputTensorInfo.SetQuantizationOffset(qOffset);
334  }
335 
336  LayerTestResult<T, 4> ret(outputTensorInfo);
337 
338  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
339  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
340 
341  // Do linear activation that should leave the tensor unchanged.
344  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
345  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
346  data.m_Parameters.m_A = 1.0f;
347  data.m_Parameters.m_B = 0.0f;
349 
350  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
351 
352  inputHandle->Allocate();
353  outputHandle->Allocate();
354 
355  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
356  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
357 
358  workload->Execute();
359 
360  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
361 
362  // Ensure output equals input.
363  ret.outputExpected = input;
364 
365  return ret;
366 }
+
void IgnoreUnused(Ts &&...)
+
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
+ +
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
+
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
+
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
+ +
Contains information about inputs and outputs to a layer.
+ +
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
+ +
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
+
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
+ +
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+
+ +

◆ ConstantLinearActivationUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 375 of file ActivationTestImpl.cpp.

+
378 {
379  return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
380  workloadFactory, memoryManager, 4.0f, 3);
381 }
+
+
+ +

◆ EluInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> EluInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 1070 of file ActivationTestImpl.cpp.

+
1073 {
1074  return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1075 }
+
+
+ +

◆ EluTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> EluTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 1056 of file ActivationTestImpl.cpp.

+
1059 {
1060  return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1061 }
+
+
+ +

◆ EluTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> EluTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 1020 of file ActivationTestImpl.cpp.

+ +

References armnn::Elu.

+
1025 {
1026  std::vector<float> inputData = {
1027  -0.1f, -0.2f, -0.3f, -0.4f,
1028  0.1f, 0.2f, 0.3f, 0.4f,
1029  -1.0f, -2.0f, -3.0f, -4.0f,
1030  1.0f, 2.0f, 3.0f, 4.0f
1031  };
1032 
1033 
1034  const float a = 0.01f;
1035  // Calculate output values for input.
1036  auto f = [a](float value)
1037  {
1038  return (value >= 0) ? value : a * (expf(value) - 1);
1039  };
1040  std::vector<float> outputExpectedData(inputData.size());
1041  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1042 
1043  return SimpleActivationTest<ArmnnType>(workloadFactory,
1044  memoryManager,
1046  a,
1047  0.0f,
1048  qScale,
1049  qOffset,
1050  inputData,
1051  qScale,
1052  qOffset,
1053  outputExpectedData);
1054 }
+
+
+
+ +

◆ EluUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> EluUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 1063 of file ActivationTestImpl.cpp.

+
1066 {
1067  return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1068 }
+
+
+ +

◆ HardSwishInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> HardSwishInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 1133 of file ActivationTestImpl.cpp.

+
1136 {
1137  return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1138 }
+
+
+ +

◆ HardSwishTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> HardSwishTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 1119 of file ActivationTestImpl.cpp.

+
1122 {
1123  return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1124 }
+
+
+ +

◆ HardSwishTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> HardSwishTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 1079 of file ActivationTestImpl.cpp.

+ +

References armnn::HardSwish.

+
1084 {
1085  std::vector<float> inputData = {
1086  -0.1f, -0.2f, -0.3f, -0.4f,
1087  0.1f, 0.2f, 0.3f, 0.4f,
1088  -1.0f, -2.0f, -3.0f, -4.0f,
1089  1.0f, 2.0f, 3.0f, 4.0f
1090  };
1091  // Calculate output values for input.
1092  auto f = [](float x)
1093  {
1094  // Break down the calculation to help with verification.
1095  // hard_swish(x) = x * relu6(x+3) / 6
1096  // relu6(x) = min(max(x,0),6)
1097  float reLu6_step1 = std::max((x + 3),0.0f);
1098  float reLu6Complete = std::min(reLu6_step1, 6.0f);
1099  float hardSwish_step1 = x * reLu6Complete;
1100  float result = hardSwish_step1 / 6;
1101  return result;
1102  };
1103  std::vector<float> outputExpectedData(inputData.size());
1104  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1105 
1106  return SimpleActivationTest<ArmnnType>(workloadFactory,
1107  memoryManager,
1109  0.f,
1110  0.f,
1111  qScale,
1112  qOffset,
1113  inputData,
1114  qScale,
1115  qOffset,
1116  outputExpectedData);
1117 }
+
+
+
+ +

◆ HardSwishUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> HardSwishUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 1126 of file ActivationTestImpl.cpp.

+
1129 {
1130  return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1131 }
+
+
+ +

◆ LeakyReLuInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> LeakyReLuInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 729 of file ActivationTestImpl.cpp.

+
732 {
733  return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
734 }
+
+
+ +

◆ LeakyReLuTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> LeakyReLuTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 715 of file ActivationTestImpl.cpp.

+
718 {
719  return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
720 }
+
+
+ +

◆ LeakyReLuTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> LeakyReLuTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 680 of file ActivationTestImpl.cpp.

+ +

References armnn::LeakyReLu.

+
685 {
686  std::vector<float> inputData = {
687  -0.1f, -0.2f, -0.3f, -0.4f,
688  0.1f, 0.2f, 0.3f, 0.4f,
689  -1.0f, -2.0f, -3.0f, -4.0f,
690  1.0f, 2.0f, 3.0f, 4.0f
691  };
692 
693  const float a = 0.01f;
694  // Calculate output values for input.
695  auto f = [a](float value)
696  {
697  return value > 0.0f ? value : (value * a);
698  };
699  std::vector<float> outputExpectedData(inputData.size());
700  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
701 
702  return SimpleActivationTest<ArmnnType>(workloadFactory,
703  memoryManager,
705  a,
706  0.f,
707  qScale,
708  qOffset,
709  inputData,
710  qScale,
711  qOffset,
712  outputExpectedData);
713 }
+
+
+
+ +

◆ LeakyReLuUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> LeakyReLuUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 722 of file ActivationTestImpl.cpp.

+
725 {
726  return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
727 }
+
+
+ +

◆ ReLuInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> ReLuInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 555 of file ActivationTestImpl.cpp.

+
558 {
559  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
560 }
+
+
+ +

◆ ReLuTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> ReLuTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 570 of file ActivationTestImpl.cpp.

+
573 {
574  return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
575 }
+
+
+ +

◆ ReLuTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> ReLuTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 521 of file ActivationTestImpl.cpp.

+ +

References armnn::ReLu.

+
526 {
527  std::vector<float> inputData = {
528  -0.1f, -0.2f, -0.3f, -0.4f,
529  0.1f, 0.2f, 0.3f, 0.4f,
530  -1.0f, -2.0f, -3.0f, -4.0f,
531  1.0f, 2.0f, 3.0f, 4.0f
532  };
533 
534  // Calculate output values for input.
535  auto f = [](float value)
536  {
537  return std::fmax(0.0f, value);
538  };
539  std::vector<float> outputExpectedData(inputData.size());
540  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
541 
542  return SimpleActivationTest<ArmnnType>(workloadFactory,
543  memoryManager,
545  0.f,
546  0.f,
547  qScale,
548  qOffset,
549  inputData,
550  qScale,
551  qOffset,
552  outputExpectedData);
553 }
+
+
+
+ +

◆ ReLuUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> ReLuUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 563 of file ActivationTestImpl.cpp.

+
566 {
567  return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 0);
568 }
+
+
+ +

◆ SimpleActivationTest()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> SimpleActivationTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
armnn::ActivationFunction activationFunction,
float activationParameterA,
float activationParameterB,
float scale,
int32_t offset,
const std::vector< float > & inputData,
float outScale,
int32_t outOffset,
const std::vector< float > & outputExpectedData 
)
+
+ +

Definition at line 392 of file ActivationTestImpl.cpp.

+ +

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and TensorInfo::SetQuantizationScale().

+
404 {
405  IgnoreUnused(memoryManager);
406  constexpr static unsigned int inputWidth = 16u;
407  constexpr static unsigned int inputHeight = 1u;
408  constexpr static unsigned int inputChannels = 1u;
409  constexpr static unsigned int inputBatchSize = 1u;
410 
411  constexpr static unsigned int outputWidth = inputWidth;
412  constexpr static unsigned int outputHeight = inputHeight;
413  constexpr static unsigned int outputChannels = inputChannels;
414  constexpr static unsigned int outputBatchSize = inputBatchSize;
415 
416  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
417  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
418 
419  // Set quantization parameters if the requested type is a quantized type.
420  if(armnn::IsQuantizedType<T>())
421  {
422  inputTensorInfo.SetQuantizationScale(scale);
423  inputTensorInfo.SetQuantizationOffset(offset);
424  outputTensorInfo.SetQuantizationScale(outScale);
425  outputTensorInfo.SetQuantizationOffset(outOffset);
426  }
427 
428  LayerTestResult<T, 4> result(inputTensorInfo);
429 
430  auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
431 
432  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
433  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
434 
435  // Setup bounded ReLu.
437  armnn::WorkloadInfo workloadInfo;
438  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
439  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
440 
441  descriptor.m_Parameters.m_Function = activationFunction;
442  descriptor.m_Parameters.m_A = activationParameterA;
443  descriptor.m_Parameters.m_B = activationParameterB;
444 
445  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
446 
447  inputHandle->Allocate();
448  outputHandle->Allocate();
449 
450  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
451 
452  workload->Execute();
453 
454  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
455 
456  // Calculated manually.
457  result.outputExpected =
458  MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
459 
460  return result;
461 }
+
void IgnoreUnused(Ts &&...)
+
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
+ +
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
+
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
+
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:37
+
Contains information about inputs and outputs to a layer.
+ +
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:39
+
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
+ +
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+
+ +

◆ SimpleSigmoidInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 513 of file ActivationTestImpl.cpp.

+
516 {
517  return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
518 }
+
+
+ +

◆ SimpleSigmoidTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> SimpleSigmoidTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 499 of file ActivationTestImpl.cpp.

+
502 {
503  return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
504 }
+
+
+ +

◆ SimpleSigmoidTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> SimpleSigmoidTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 464 of file ActivationTestImpl.cpp.

+ +

References armnn::Sigmoid.

+
469 {
470  std::vector<float> inputData =
471  {
472  -0.1f, -0.2f, -0.3f, -0.4f,
473  0.1f, 0.2f, 0.3f, 0.4f,
474  -1.0f, -2.0f, -3.0f, -4.0f,
475  1.0f, 2.0f, 3.0f, 4.0f
476  };
477 
478  // Calculate output values for input.
479  auto f = [](float value)
480  {
481  return 1.0f / (1.0f + std::exp(-value));
482  };
483  std::vector<float> outputExpectedData(inputData.size());
484  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
485 
486  return SimpleActivationTest<ArmnnType>(workloadFactory,
487  memoryManager,
489  0.f,
490  0.f,
491  qScale,
492  qOffset,
493  inputData,
494  1.f / 256.f,
495  0,
496  outputExpectedData);
497 }
+
+
+
+ +

◆ SimpleSigmoidUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 506 of file ActivationTestImpl.cpp.

+
509 {
510  return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 50);
511 }
+
+
+ +

◆ SoftReLuInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> SoftReLuInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 672 of file ActivationTestImpl.cpp.

+
675 {
676  return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
677 }
+
+
+ +

◆ SoftReLuTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> SoftReLuTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 658 of file ActivationTestImpl.cpp.

+
661 {
662  return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
663 }
+
+
+ +

◆ SoftReLuTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> SoftReLuTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 624 of file ActivationTestImpl.cpp.

+ +

References armnn::SoftReLu.

+
629 {
630  std::vector<float> inputData = {
631  -0.1f, -0.2f, -0.3f, -0.4f,
632  0.1f, 0.2f, 0.3f, 0.4f,
633  -1.0f, -2.0f, -3.0f, -4.0f,
634  1.0f, 2.0f, 3.0f, 4.0f
635  };
636 
637  // Calculate output values for input.
638  auto f = [](float value)
639  {
640  return std::log(1.0f + std::exp(value));
641  };
642  std::vector<float> outputExpectedData(inputData.size());
643  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
644 
645  return SimpleActivationTest<ArmnnType>(workloadFactory,
646  memoryManager,
648  0.f,
649  0.f,
650  qScale,
651  qOffset,
652  inputData,
653  qScale,
654  qOffset,
655  outputExpectedData);
656 }
+
+
+
+ +

◆ SoftReLuUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> SoftReLuUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 665 of file ActivationTestImpl.cpp.

+
668 {
669  return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
670 }
+
+
+ +

◆ SqrtInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> SqrtInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 897 of file ActivationTestImpl.cpp.

+
900 {
901  return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
902 }
+
+
+ +

◆ SqrtNNTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 5> SqrtNNTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 792 of file ActivationTestImpl.cpp.

+ +

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and armnn::Sqrt.

+
795 {
796  IgnoreUnused(memoryManager);
797  const int inputDataSize = 120;
798  std::vector<float> inputData(inputDataSize);
799 
800  for (unsigned int i = 0u; i < inputDataSize; ++i)
801  {
802  inputData[i] = static_cast<float>(i) / 10;
803  }
804 
805  auto f = [](float value)
806  {
807  return std::sqrt(value);
808  };
809  std::vector<float> outputExpectedData(inputDataSize);
810  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
811 
812  armnn::TensorInfo inputTensorInfo(
813  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
814  armnn::TensorInfo outputTensorInfo(
815  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
816 
817  LayerTestResult<float, 5> result(inputTensorInfo);
818 
819  auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
820 
821  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
822  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
823 
825  armnn::WorkloadInfo workloadInfo;
826  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
827  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
828 
830 
831  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
832 
833  inputHandle->Allocate();
834  outputHandle->Allocate();
835 
836  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]);
837 
838  workload->Execute();
839 
840  CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
841 
842  // Calculated manually.
843  result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
844 
845  return result;
846 };
+ +
void IgnoreUnused(Ts &&...)
+
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
+ +
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
+ +
Contains information about inputs and outputs to a layer.
+ +
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
Definition: Descriptors.hpp:35
+ +
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+
+ +

◆ SqrtTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> SqrtTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 883 of file ActivationTestImpl.cpp.

+
886 {
887  return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
888 }
+
+
+ +

◆ SqrtTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> SqrtTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 849 of file ActivationTestImpl.cpp.

+ +

References armnn::Sqrt.

+
854 {
855  std::vector<float> inputData = {
856  0.1f, 0.2f, 0.3f, 0.4f,
857  0.1f, 0.2f, 0.3f, 0.4f,
858  1.0f, 2.0f, 3.0f, 4.0f,
859  1.0f, 2.0f, 3.0f, 4.0f
860  };
861 
862  // Calculate output values for input.
863  auto f = [](float value)
864  {
865  return std::sqrt(value);
866  };
867  std::vector<float> outputExpectedData(inputData.size());
868  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
869 
870  return SimpleActivationTest<ArmnnType>(workloadFactory,
871  memoryManager,
873  0.f,
874  0.f,
875  qScale,
876  qOffset,
877  inputData,
878  qScale,
879  qOffset,
880  outputExpectedData);
881 }
+
+
+
+ +

◆ SqrtUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> SqrtUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 890 of file ActivationTestImpl.cpp.

+
893 {
894  return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
895 }
+
+
+ +

◆ SquareInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> SquareInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 953 of file ActivationTestImpl.cpp.

+
956 {
957  return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
958 }
+
+
+ +

◆ SquareTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> SquareTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 939 of file ActivationTestImpl.cpp.

+
942 {
943  return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
944 }
+
+
+ +

◆ SquareTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> SquareTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 905 of file ActivationTestImpl.cpp.

+ +

References armnn::Square.

+
910 {
911  std::vector<float> inputData = {
912  -0.1f, -0.2f, -0.3f, -0.4f,
913  0.1f, 0.2f, 0.3f, 0.4f,
914  -1.0f, -2.0f, -3.0f, -4.0f,
915  1.0f, 2.0f, 3.0f, 4.0f
916  };
917 
918  // Calculate output values for input.
919  auto f = [](float value)
920  {
921  return std::pow(value,2);
922  };
923  std::vector<float> outputExpectedData(inputData.size());
924  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
925 
926  return SimpleActivationTest<ArmnnType>(workloadFactory,
927  memoryManager,
929  0.f,
930  0.f,
931  qScale,
932  qOffset,
933  inputData,
934  qScale,
935  qOffset,
936  outputExpectedData);
937 }
+
+
+
+ +

◆ SquareUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> SquareUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 946 of file ActivationTestImpl.cpp.

+
949 {
950  return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
951 }
+
+
+ +

◆ TanhInt16Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<int16_t, 4> TanhInt16Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 1011 of file ActivationTestImpl.cpp.

+
1014 {
1015  return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1016 }
+
+
+ +

◆ TanhTest()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<float, 4> TanhTest (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 997 of file ActivationTestImpl.cpp.

+
1000 {
1001  return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1002 }
+
+
+ +

◆ TanhTestCommon()

+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LayerTestResult<T, 4> TanhTestCommon (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager,
float qScale,
int32_t qOffset 
)
+
+ +

Definition at line 961 of file ActivationTestImpl.cpp.

+ +

References armnn::TanH.

+
966 {
967  std::vector<float> inputData = {
968  -0.1f, -0.2f, -0.3f, -0.4f,
969  0.1f, 0.2f, 0.3f, 0.4f,
970  -1.0f, -2.0f, -3.0f, -4.0f,
971  1.0f, 2.0f, 3.0f, 4.0f
972  };
973 
974  const float a = 2.0f;
975  const float b = 3.0f;
976  // Calculate output values for input.
977  auto f = [a, b](float value)
978  {
979  return a * tanhf(b * value);
980  };
981  std::vector<float> outputExpectedData(inputData.size());
982  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
983 
984  return SimpleActivationTest<ArmnnType>(workloadFactory,
985  memoryManager,
987  a,
988  b,
989  qScale,
990  qOffset,
991  inputData,
992  qScale,
993  qOffset,
994  outputExpectedData);
995 }
+
+
+
+ +

◆ TanhUint8Test()

+ +
+
+ + + + + + + + + + + + + + + + + + +
LayerTestResult<uint8_t, 4> TanhUint8Test (armnn::IWorkloadFactoryworkloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtrmemoryManager 
)
+
+ +

Definition at line 1004 of file ActivationTestImpl.cpp.

+
1007 {
1008  return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1009 }
+
+
+
+
+ + + + -- cgit v1.2.1