ArmNN
 21.11
ActivationTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > BoundedReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
 
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > CompareBoundedReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, float upperBound, float lowerBound)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale=0.0f, int32_t qOffset=0)
 
LayerTestResult< float, 4 > ConstantLinearActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleSigmoidTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SimpleSigmoidTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > ReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< int16_t, 4 > ReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > ReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > ReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > BoundedReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SoftReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SoftReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > SoftReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > LeakyReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > LeakyReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > AbsTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > AbsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AbsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AbsInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 5 > SqrtNNTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SqrtTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SqrtTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > SqrtUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > SqrtInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SquareTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SquareTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > SquareUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > SquareInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > TanhTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > TanhTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > TanhUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > TanhInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > EluTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > EluTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > EluUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > EluInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > HardSwishTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > HardSwishTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > HardSwishUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > HardSwishInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > CompareActivationTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
 
LayerTestResult< float, 4 > CompareActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize)
 
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
 
LayerTestResult< int16_t, 4 > CompareActivationInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
 

Function Documentation

◆ AbsInt16Test()

LayerTestResult<int16_t, 4> AbsInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 832 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

836 {
837  return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
838 }

◆ AbsTest()

LayerTestResult<float, 4> AbsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 816 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

820 {
821  return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
822 }

◆ AbsTestCommon()

LayerTestResult<T, 4> AbsTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 780 of file ActivationTestImpl.cpp.

References armnn::Abs.

786 {
787  std::vector<float> inputData = {
788  -0.1f, -0.2f, -0.3f, -0.4f,
789  0.1f, 0.2f, 0.3f, 0.4f,
790  -1.0f, -2.0f, -3.0f, -4.0f,
791  1.0f, 2.0f, 3.0f, 4.0f
792  };
793 
794  // Calculate output values for input.
795  auto f = [](float value)
796  {
797  return std::abs(value);
798  };
799  std::vector<float> outputExpected(inputData.size());
800  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
801 
802  return SimpleActivationTest<ArmnnType>(workloadFactory,
803  memoryManager,
804  tensorHandleFactory,
806  0.f,
807  0.f,
808  qScale,
809  qOffset,
810  inputData,
811  qScale,
812  qOffset,
813  outputExpected);
814 }

◆ AbsUint8Test()

LayerTestResult<uint8_t, 4> AbsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 824 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

828 {
829  return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
830 }

◆ BoundedReLuInt16Test()

LayerTestResult<int16_t, 4> BoundedReLuInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 644 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

648 {
649  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
650 }

◆ BoundedReLuTestCommon() [1/2]

LayerTestResult<T, 4> BoundedReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  upperBound,
float  lowerBound,
float  inputScale,
int32_t  inputOffset,
float  outputScale,
int32_t  outputOffset,
const std::vector< T > &  inputData,
const std::vector< T > &  outputExpectedData,
unsigned int  inputWidth,
unsigned int  inputHeight,
unsigned int  inputChannels,
unsigned int  inputBatchSize 
)

Definition at line 23 of file ActivationTestImpl.cpp.

References armnn::BoundedReLu, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and TensorInfo::SetQuantizationScale().

39 {
40  IgnoreUnused(memoryManager);
41  unsigned int outputWidth = inputWidth;
42  unsigned int outputHeight = inputHeight;
43  unsigned int outputChannels = inputChannels;
44  unsigned int outputBatchSize = inputBatchSize;
45 
46  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
47 
48  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
49 
50  if(armnn::IsQuantizedType<T>())
51  {
52  inputTensorInfo.SetQuantizationScale(inputScale);
53  inputTensorInfo.SetQuantizationOffset(inputOffset);
54 
55  outputTensorInfo.SetQuantizationScale(outputScale);
56  outputTensorInfo.SetQuantizationOffset(outputOffset);
57  }
58 
59  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
60 
61  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
62  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
63 
64  // Setup bounded ReLu.
66  armnn::WorkloadInfo workloadInfo;
67  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
69 
71  descriptor.m_Parameters.m_A = upperBound;
72  descriptor.m_Parameters.m_B = lowerBound;
73 
74  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
75 
76  inputHandle->Allocate();
77  outputHandle->Allocate();
78 
79  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
80 
81  workload->Execute();
82 
83  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
84 
85  return LayerTestResult<T, 4>(actualOutput,
86  outputExpectedData,
87  outputHandle->GetShape(),
88  outputTensorInfo.GetShape());
89 }
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
Contains information about TensorInfos of a layer.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ BoundedReLuTestCommon() [2/2]

LayerTestResult<T, 4> BoundedReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 607 of file ActivationTestImpl.cpp.

References armnn::BoundedReLu.

613 {
614  std::vector<float> inputData = {
615  -0.1f, -0.2f, -0.3f, -0.4f,
616  0.1f, 0.2f, 0.3f, 0.4f,
617  -1.0f, -2.0f, -3.0f, -4.0f,
618  1.0f, 2.0f, 3.0f, 4.0f
619  };
620  const float a = 1.0f;
621  const float b = -1.0f;
622  // Calculate output values for input.
623  auto f = [a, b](float value)
624  {
625  return std::min(a, std::max(b, value));
626  };
627  std::vector<float> outputExpected(inputData.size());
628  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
629 
630  return SimpleActivationTest<ArmnnType>(workloadFactory,
631  memoryManager,
632  tensorHandleFactory,
634  a,
635  b,
636  qScale,
637  qOffset,
638  inputData,
639  qScale,
640  qOffset,
641  outputExpected);
642 }
min(a, max(b, input)) ReLu1 & ReLu6.

◆ BoundedReLuUint8UpperAndLowerBoundTest()

LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 187 of file ActivationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, TensorInfo::GetNumElements(), armnn::IgnoreUnused(), and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

Referenced by TEST_SUITE().

191 {
192  unsigned int inputWidth = 3u;
193  unsigned int inputHeight = 2u;
194  unsigned int inputChannels = 1u;
195  unsigned int inputBatchSize = 1;
196 
197  std::vector<uint8_t> input = std::vector<uint8_t>{
198  51, 230, 28,
199  251, 8, 92
200  };
201 
202  // Calculated manually.
203  std::vector<uint8_t> output = std::vector<uint8_t>{
204  51, 192, 32,
205  192, 32, 92
206  };
207 
208  int32_t inputOffset = 112;
209  float inputScale = 0.0125f;
210 
211  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
212  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
213  inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
214  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
215 }

◆ BoundedReLuUint8UpperBoundOnlyTest()

LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 155 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

159 {
160  unsigned int inputWidth = 3u;
161  unsigned int inputHeight = 2u;
162  unsigned int inputChannels = 1u;
163  unsigned int inputBatchSize = 1;
164 
165  std::vector<uint8_t> input = std::vector<uint8_t>{
166  51, 124, 28,
167  251, 8, 92
168  };
169 
170  // Calculated manually.
171  std::vector<uint8_t> output = std::vector<uint8_t>{
172  0, 122, 0,
173  255, 0, 58
174  };
175 
176  float inputScale = 12.0f / 255.0f;
177  int32_t inputOffset = 63;
178  float outputScale = 6.0f / 255.0f;
179  int32_t outputOffset = 0;
180 
181  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
182  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
183  inputScale, inputOffset, outputScale, outputOffset,
184  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
185 }

◆ BoundedReLuUpperAndLowerBoundTest()

LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 91 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

95 {
96  unsigned int inputWidth = 4u;
97  unsigned int inputHeight = 5u;
98  unsigned int inputChannels = 1u;
99  unsigned int inputBatchSize = 1;
100 
101  std::vector<float> input = std::vector<float>{
102  -2.0f, 0.1f, 0.5f, 1.25f,
103  0.786f, 0.9875f, -1.5f, 0.384f,
104  1.0001f, 3.5f, 7.5f, 0.896f,
105  2.126f, 2.0f, 0.3f, 0.15f,
106  0.999f, 1.2f, 0.89f, 6.1f,
107  };
108 
109  // Calculated manually.
110  std::vector<float> output = std::vector<float>{
111  -1.0f, 0.1f, 0.5f, 1.0f,
112  0.786f, 0.9875f, -1.0f, 0.384f,
113  1.0f, 1.0f, 1.0f, 0.896f,
114  1.0f, 1.0f, 0.3f, 0.15f,
115  0.999f, 1.0f, 0.89f, 1.0f,
116  };
117 
118  return BoundedReLuTestCommon<armnn::DataType::Float32>(
119  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
120  inputWidth, inputHeight, inputChannels, inputBatchSize);
121 }

◆ BoundedReLuUpperBoundOnlyTest()

LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 123 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

127 {
128  unsigned int inputWidth = 4u;
129  unsigned int inputHeight = 5u;
130  unsigned int inputChannels = 1u;
131  unsigned int inputBatchSize = 1;
132 
133  std::vector<float> input = std::vector<float>{
134  -1.0f, 0.1f, 0.5f, 6.25f,
135  0.786f, 5.9875f, -0.5f, 0.384f,
136  6.0001f, 3.5f, 7.5f, 0.896f,
137  2.126f, 12.0f, 0.3f, 0.15f,
138  0.999f, 1.2f, 0.89f, 6.1f,
139  };
140 
141  // Calculated manually.
142  std::vector<float> output = std::vector<float>{
143  0.0f, 0.1f, 0.5f, 6.0f,
144  0.786f, 5.9875f, 0.0f, 0.384f,
145  6.0f, 3.5f, 6.0f, 0.896f,
146  2.126f, 6.0f, 0.3f, 0.15f,
147  0.999f, 1.2f, 0.89f, 6.0f,
148  };
149 
150  return BoundedReLuTestCommon<armnn::DataType::Float32>(
151  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
152  inputWidth, inputHeight, inputChannels, inputBatchSize);
153 }

◆ CompareActivationInt16Test()

LayerTestResult<int16_t, 4> CompareActivationInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
armnn::ActivationFunction  f 
)

Definition at line 1334 of file ActivationTestImpl.cpp.

1341 {
1342  return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1343  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1344  refTensorHandleFactory, f, 5, 0.1f, 0);
1345 }

◆ CompareActivationTest()

LayerTestResult<float, 4> CompareActivationTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
armnn::ActivationFunction  f,
unsigned int  batchSize 
)

Definition at line 1307 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1315 {
1316  return CompareActivationTestImpl<armnn::DataType::Float32>(
1317  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1318  refTensorHandleFactory, f, batchSize);
1319 }

◆ CompareActivationTestImpl()

LayerTestResult<T, 4> CompareActivationTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
armnn::ActivationFunction  f,
unsigned int  batchSize = 5,
float  qScale = 0.0f,
int32_t  qOffset = 0 
)

Definition at line 1216 of file ActivationTestImpl.cpp.

References ARMNN_ASSERT, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), TensorInfo::GetShape(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Sqrt.

1226 {
1227  IgnoreUnused(memoryManager);
1228  unsigned int width = 17;
1229  unsigned int height = 29;
1230  unsigned int channels = 2;
1231 
1232  float a = 0.234f;
1233  float b = -12.345f;
1234 
1235  armnn::TensorInfo inputTensorInfo;
1236  armnn::TensorInfo outputTensorInfo;
1237 
1238  unsigned int shape[] = {batchSize, channels, height, width};
1239 
1240  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1241  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1242 
1243  // Set quantization parameters if the requested type is a quantized type.
1244  if(armnn::IsQuantizedType<T>())
1245  {
1246  inputTensorInfo.SetQuantizationScale(qScale);
1247  inputTensorInfo.SetQuantizationOffset(qOffset);
1248  outputTensorInfo.SetQuantizationScale(qScale);
1249  outputTensorInfo.SetQuantizationOffset(qOffset);
1250  }
1251 
1252  float minVal = -10.f;
1254  {
1255  minVal = 0.f;
1256  }
1257 
1258  std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 21453, minVal, 10.f);
1259  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
1260  std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
1261 
1262  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1263  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1264 
1265  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1266  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1267 
1270  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1271  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1272  data.m_Parameters.m_A = a;
1273  data.m_Parameters.m_B = b;
1274  data.m_Parameters.m_Function = f;
1275 
1276  armnn::ActivationQueueDescriptor refData = data;
1277  armnn::WorkloadInfo refInfo = info;
1278  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1279  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1280 
1281  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1282  ARMNN_ASSERT(workload != nullptr);
1283  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1284  ARMNN_ASSERT(workloadRef != nullptr);
1285 
1286  inputHandle->Allocate();
1287  outputHandle->Allocate();
1288  inputHandleRef->Allocate();
1289  outputHandleRef->Allocate();
1290 
1291  CopyDataToITensorHandle(inputHandle.get(), input.data());
1292  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
1293 
1294  workload->Execute();
1295  workloadRef->Execute();
1296 
1297  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1298  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
1299 
1300  return LayerTestResult<T, 4>(actualOutput,
1301  expectedOutput,
1302  outputHandle->GetShape(),
1303  outputTensorInfo.GetShape());
1304 
1305 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ CompareActivationUint8Test()

LayerTestResult<uint8_t, 4> CompareActivationUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
armnn::ActivationFunction  f 
)

Definition at line 1321 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1328 {
1329  return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1330  workloadFactory, memoryManager, refWorkloadFactory,
1331  tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1332 }

◆ CompareBoundedReLuTest()

LayerTestResult<float, 4> CompareBoundedReLuTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
float  upperBound,
float  lowerBound 
)

Definition at line 288 of file ActivationTestImpl.cpp.

References armnn::BoundedReLu, ActivationDescriptor::m_A, LayerTestResult< T, n >::m_ActualData, ActivationDescriptor::m_B, LayerTestResult< T, n >::m_ExpectedData, and ActivationDescriptor::m_Function.

Referenced by TEST_SUITE().

296 {
297  LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
298 
299  armnn::ActivationDescriptor activationDescriptor;
300  activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
301  activationDescriptor.m_A = upperBound;
302  activationDescriptor.m_B = lowerBound;
303 
304  result.m_ActualData = BoundedReLuRandomInputTest(
305  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
306  result.m_ExpectedData = BoundedReLuRandomInputTest(
307  refWorkloadFactory, nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
308 
309  return result;
310 }
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
min(a, max(b, input)) ReLu1 & ReLu6.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ ConstantLinearActivationInt16Test()

LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 395 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

399 {
400  return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
401  workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
402 }

◆ ConstantLinearActivationTest()

LayerTestResult<float, 4> ConstantLinearActivationTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 376 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

380 {
381  return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
382  memoryManager,
383  tensorHandleFactory);
384 }

◆ ConstantLinearActivationTestCommon()

LayerTestResult<T, 4> ConstantLinearActivationTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale = 0.0f,
int32_t  qOffset = 0 
)

Definition at line 313 of file ActivationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), TensorInfo::GetNumElements(), TensorInfo::GetShape(), armnn::IgnoreUnused(), armnn::Linear, ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

319 {
320  IgnoreUnused(memoryManager);
321  unsigned int inputHeight = 20;
322  unsigned int inputWidth = 17;
323  unsigned int inputChannels = 3;
324  unsigned int batchSize = 5;
325 
326  armnn::TensorInfo inputTensorInfo;
327  armnn::TensorInfo outputTensorInfo;
328 
329  unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
330 
331  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
332  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
333 
334  // Set quantization parameters if the requested type is a quantized type.
335  if(armnn::IsQuantizedType<T>())
336  {
337  inputTensorInfo.SetQuantizationScale(qScale);
338  inputTensorInfo.SetQuantizationOffset(qOffset);
339  outputTensorInfo.SetQuantizationScale(qScale);
340  outputTensorInfo.SetQuantizationOffset(qOffset);
341  }
342 
343  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
344  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
345 
346  // Do linear activation that should leave the tensor unchanged.
349  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
350  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
351  data.m_Parameters.m_A = 1.0f;
352  data.m_Parameters.m_B = 0.0f;
354 
355  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
356 
357  inputHandle->Allocate();
358  outputHandle->Allocate();
359 
360  std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 7123561);
361  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
362 
363  CopyDataToITensorHandle(inputHandle.get(), input.data());
364 
365  workload->Execute();
366 
367  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
368 
369  // Use input as ExpectedData as tensor doesn't change.
370  return LayerTestResult<T, 4>(actualOutput,
371  input,
372  outputHandle->GetShape(),
373  outputTensorInfo.GetShape());
374 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ ConstantLinearActivationUint8Test()

LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 386 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

390 {
391  return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
392  workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
393 }

◆ EluInt16Test()

LayerTestResult<int16_t, 4> EluInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1137 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1141 {
1142  return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1143 }

◆ EluTest()

LayerTestResult<float, 4> EluTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1121 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1125 {
1126  return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1127 }

◆ EluTestCommon()

LayerTestResult<T, 4> EluTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 1083 of file ActivationTestImpl.cpp.

References armnn::Elu.

1089 {
1090  std::vector<float> inputData = {
1091  -0.1f, -0.2f, -0.3f, -0.4f,
1092  0.1f, 0.2f, 0.3f, 0.4f,
1093  -1.0f, -2.0f, -3.0f, -4.0f,
1094  1.0f, 2.0f, 3.0f, 4.0f
1095  };
1096 
1097 
1098  const float a = 0.01f;
1099  // Calculate output values for input.
1100  auto f = [a](float value)
1101  {
1102  return (value >= 0) ? value : a * (expf(value) - 1);
1103  };
1104  std::vector<float> expectedOutput(inputData.size());
1105  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1106 
1107  return SimpleActivationTest<ArmnnType>(workloadFactory,
1108  memoryManager,
1109  tensorHandleFactory,
1111  a,
1112  0.0f,
1113  qScale,
1114  qOffset,
1115  inputData,
1116  qScale,
1117  qOffset,
1118  expectedOutput);
1119 }

◆ EluUint8Test()

LayerTestResult<uint8_t, 4> EluUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1129 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1133 {
1134  return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1135 }

◆ HardSwishInt16Test()

LayerTestResult<int16_t, 4> HardSwishInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1206 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1210 {
1211  return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1212 }

◆ HardSwishTest()

LayerTestResult<float, 4> HardSwishTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1189 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1193 {
1194  return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1195 }

◆ HardSwishTestCommon()

LayerTestResult<T, 4> HardSwishTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 1147 of file ActivationTestImpl.cpp.

References armnn::HardSwish.

1153 {
1154  std::vector<float> inputData = {
1155  -0.1f, -0.2f, -0.3f, -0.4f,
1156  0.1f, 0.2f, 0.3f, 0.4f,
1157  -1.0f, -2.0f, -3.0f, -4.0f,
1158  1.0f, 2.0f, 3.0f, 4.0f
1159  };
1160  // Calculate output values for input.
1161  auto f = [](float x)
1162  {
1163  // Break down the calculation to help with verification.
1164  // hard_swish(x) = x * relu6(x+3) / 6
1165  // relu6(x) = min(max(x,0),6)
1166  float reLu6_step1 = std::max((x + 3),0.0f);
1167  float reLu6Complete = std::min(reLu6_step1, 6.0f);
1168  float hardSwish_step1 = x * reLu6Complete;
1169  float result = hardSwish_step1 / 6;
1170  return result;
1171  };
1172  std::vector<float> expectedOutput(inputData.size());
1173  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1174 
1175  return SimpleActivationTest<ArmnnType>(workloadFactory,
1176  memoryManager,
1177  tensorHandleFactory,
1179  0.f,
1180  0.f,
1181  qScale,
1182  qOffset,
1183  inputData,
1184  qScale,
1185  qOffset,
1186  expectedOutput);
1187 }

◆ HardSwishUint8Test()

LayerTestResult<uint8_t, 4> HardSwishUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1197 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1201 {
1202  return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1203  tensorHandleFactory, 0.1f, 64);
1204 }

◆ LeakyReLuInt16Test()

LayerTestResult<int16_t, 4> LeakyReLuInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 771 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

775 {
776  return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
777 }

◆ LeakyReLuTest()

LayerTestResult<float, 4> LeakyReLuTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 754 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

758 {
759  return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
760 }

◆ LeakyReLuTestCommon()

LayerTestResult<T, 4> LeakyReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 717 of file ActivationTestImpl.cpp.

References armnn::LeakyReLu.

723 {
724  std::vector<float> inputData = {
725  -0.1f, -0.2f, -0.3f, -0.4f,
726  0.1f, 0.2f, 0.3f, 0.4f,
727  -1.0f, -2.0f, -3.0f, -4.0f,
728  1.0f, 2.0f, 3.0f, 4.0f
729  };
730 
731  const float a = 0.01f;
732  // Calculate output values for input.
733  auto f = [a](float value)
734  {
735  return value > 0.0f ? value : (value * a);
736  };
737  std::vector<float> outputExpected(inputData.size());
738  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
739 
740  return SimpleActivationTest<ArmnnType>(workloadFactory,
741  memoryManager,
742  tensorHandleFactory,
744  a,
745  0.f,
746  qScale,
747  qOffset,
748  inputData,
749  qScale,
750  qOffset,
751  outputExpected);
752 }

◆ LeakyReLuUint8Test()

LayerTestResult<uint8_t, 4> LeakyReLuUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 762 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

766 {
767  return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
768  tensorHandleFactory, 0.0625f, 64);
769 }

◆ ReLuInt16Test()

LayerTestResult<int16_t, 4> ReLuInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 580 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

584 {
585  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
586 }

◆ ReLuTest()

LayerTestResult<float, 4> ReLuTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 597 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

601 {
602  return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
603 }

◆ ReLuTestCommon()

LayerTestResult<T, 4> ReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 544 of file ActivationTestImpl.cpp.

References armnn::ReLu.

550 {
551  std::vector<float> inputData = {
552  -0.1f, -0.2f, -0.3f, -0.4f,
553  0.1f, 0.2f, 0.3f, 0.4f,
554  -1.0f, -2.0f, -3.0f, -4.0f,
555  1.0f, 2.0f, 3.0f, 4.0f
556  };
557 
558  // Calculate output values for input.
559  auto f = [](float value)
560  {
561  return std::fmax(0.0f, value);
562  };
563  std::vector<float> outputExpected(inputData.size());
564  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
565 
566  return SimpleActivationTest<ArmnnType>(workloadFactory,
567  memoryManager,
568  tensorHandleFactory,
570  0.f,
571  0.f,
572  qScale,
573  qOffset,
574  inputData,
575  qScale,
576  qOffset,
577  outputExpected);
578 }

◆ ReLuUint8Test()

LayerTestResult<uint8_t, 4> ReLuUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 589 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

593 {
594  return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
595 }

◆ SimpleActivationTest()

LayerTestResult<T, 4> SimpleActivationTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
armnn::ActivationFunction  activationFunction,
float  activationParameterA,
float  activationParameterB,
float  scale,
int32_t  offset,
const std::vector< float > &  inputData,
float  outScale,
int32_t  outOffset,
const std::vector< float > &  outputExpectedData 
)

Definition at line 405 of file ActivationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and TensorInfo::SetQuantizationScale().

418 {
419  IgnoreUnused(memoryManager);
420  constexpr static unsigned int inputWidth = 16u;
421  constexpr static unsigned int inputHeight = 1u;
422  constexpr static unsigned int inputChannels = 1u;
423  constexpr static unsigned int inputBatchSize = 1u;
424 
425  constexpr static unsigned int outputWidth = inputWidth;
426  constexpr static unsigned int outputHeight = inputHeight;
427  constexpr static unsigned int outputChannels = inputChannels;
428  constexpr static unsigned int outputBatchSize = inputBatchSize;
429 
430  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
431  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
432 
433  // Set quantization parameters if the requested type is a quantized type.
434  if(armnn::IsQuantizedType<T>())
435  {
436  inputTensorInfo.SetQuantizationScale(scale);
437  inputTensorInfo.SetQuantizationOffset(offset);
438  outputTensorInfo.SetQuantizationScale(outScale);
439  outputTensorInfo.SetQuantizationOffset(outOffset);
440  }
441 
442  std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, scale, offset);
443 
444  // Calculated outputExpected manually.
445  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
446  std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset);
447 
448  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
449  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
450 
451  // Setup bounded ReLu.
453  armnn::WorkloadInfo workloadInfo;
454  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
455  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
456 
457  descriptor.m_Parameters.m_Function = activationFunction;
458  descriptor.m_Parameters.m_A = activationParameterA;
459  descriptor.m_Parameters.m_B = activationParameterB;
460 
461  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
462 
463  inputHandle->Allocate();
464  outputHandle->Allocate();
465 
466  CopyDataToITensorHandle(inputHandle.get(), input.data());
467 
468  workload->Execute();
469 
470  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
471 
472  return LayerTestResult<T, 4>(actualOutput,
473  outputExpected,
474  outputHandle->GetShape(),
475  outputTensorInfo.GetShape());
476 }
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
Contains information about TensorInfos of a layer.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ SimpleSigmoidInt16Test()

LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 534 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

538 {
539  return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
540  tensorHandleFactory, 0.1f, 0);
541 }

◆ SimpleSigmoidTest()

LayerTestResult<float, 4> SimpleSigmoidTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 516 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

520 {
521  return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
522  tensorHandleFactory, 0.0f, 0);
523 }

◆ SimpleSigmoidTestCommon()

LayerTestResult<T, 4> SimpleSigmoidTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 479 of file ActivationTestImpl.cpp.

References armnn::Sigmoid.

485 {
486  std::vector<float> inputData =
487  {
488  -0.1f, -0.2f, -0.3f, -0.4f,
489  0.1f, 0.2f, 0.3f, 0.4f,
490  -1.0f, -2.0f, -3.0f, -4.0f,
491  1.0f, 2.0f, 3.0f, 4.0f
492  };
493 
494  // Calculate output values for input.
495  auto f = [](float value)
496  {
497  return 1.0f / (1.0f + std::exp(-value));
498  };
499  std::vector<float> m_OutputExpected(inputData.size());
500  std::transform(inputData.begin(), inputData.end(), m_OutputExpected.begin(), f);
501 
502  return SimpleActivationTest<ArmnnType>(workloadFactory,
503  memoryManager,
504  tensorHandleFactory,
506  0.f,
507  0.f,
508  qScale,
509  qOffset,
510  inputData,
511  1.f / 256.f,
512  0,
513  m_OutputExpected);
514 }

◆ SimpleSigmoidUint8Test()

LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 525 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

529 {
530  return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
531  tensorHandleFactory, 0.1f, 50);
532 }

◆ SoftReLuInt16Test()

LayerTestResult<int16_t, 4> SoftReLuInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 708 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

712 {
713  return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
714 }

◆ SoftReLuTest()

LayerTestResult<float, 4> SoftReLuTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 691 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

695 {
696  return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
697 }

◆ SoftReLuTestCommon()

LayerTestResult<T, 4> SoftReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 655 of file ActivationTestImpl.cpp.

References armnn::SoftReLu.

661 {
662  std::vector<float> inputData = {
663  -0.1f, -0.2f, -0.3f, -0.4f,
664  0.1f, 0.2f, 0.3f, 0.4f,
665  -1.0f, -2.0f, -3.0f, -4.0f,
666  1.0f, 2.0f, 3.0f, 4.0f
667  };
668 
669  // Calculate output values for input.
670  auto f = [](float value)
671  {
672  return std::log(1.0f + std::exp(value));
673  };
674  std::vector<float> outputExpected(inputData.size());
675  std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
676 
677  return SimpleActivationTest<ArmnnType>(workloadFactory,
678  memoryManager,
679  tensorHandleFactory,
681  0.f,
682  0.f,
683  qScale,
684  qOffset,
685  inputData,
686  qScale,
687  qOffset,
688  outputExpected);
689 }

◆ SoftReLuUint8Test()

LayerTestResult<uint8_t, 4> SoftReLuUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 699 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

703 {
704  return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
705  tensorHandleFactory, 0.0625f, 64);
706 }

◆ SqrtInt16Test()

LayerTestResult<int16_t, 4> SqrtInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 948 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

952 {
953  return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
954 }

◆ SqrtNNTest()

LayerTestResult<float, 5> SqrtNNTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 840 of file ActivationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and armnn::Sqrt.

Referenced by TEST_SUITE().

844 {
845  IgnoreUnused(memoryManager);
846  const int inputDataSize = 120;
847  std::vector<float> inputData(inputDataSize);
848 
849  for (unsigned int i = 0u; i < inputDataSize; ++i)
850  {
851  inputData[i] = static_cast<float>(i) / 10;
852  }
853 
854  auto f = [](float value)
855  {
856  return std::sqrt(value);
857  };
858  std::vector<float> expectedOutput(inputDataSize);
859  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
860 
861  armnn::TensorInfo inputTensorInfo(
862  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
863  armnn::TensorInfo outputTensorInfo(
864  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
865 
866  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
867 
868  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
869  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
870 
872  armnn::WorkloadInfo workloadInfo;
873  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
874  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
875 
877 
878  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
879 
880  inputHandle->Allocate();
881  outputHandle->Allocate();
882 
883  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
884 
885  workload->Execute();
886 
887  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
888 
889  return LayerTestResult<float, 5>(actualOutput,
890  expectedOutput,
891  outputHandle->GetShape(),
892  outputTensorInfo.GetShape());
893 };
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ SqrtTest()

LayerTestResult<float, 4> SqrtTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 932 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

936 {
937  return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
938 }

◆ SqrtTestCommon()

LayerTestResult<T, 4> SqrtTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 896 of file ActivationTestImpl.cpp.

References armnn::Sqrt.

902 {
903  std::vector<float> inputData = {
904  0.1f, 0.2f, 0.3f, 0.4f,
905  0.1f, 0.2f, 0.3f, 0.4f,
906  1.0f, 2.0f, 3.0f, 4.0f,
907  1.0f, 2.0f, 3.0f, 4.0f
908  };
909 
910  // Calculate output values for input.
911  auto f = [](float value)
912  {
913  return std::sqrt(value);
914  };
915  std::vector<float> expectedOutput(inputData.size());
916  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
917 
918  return SimpleActivationTest<ArmnnType>(workloadFactory,
919  memoryManager,
920  tensorHandleFactory,
922  0.f,
923  0.f,
924  qScale,
925  qOffset,
926  inputData,
927  qScale,
928  qOffset,
929  expectedOutput);
930 }

◆ SqrtUint8Test()

LayerTestResult<uint8_t, 4> SqrtUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 940 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

944 {
945  return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
946 }

◆ SquareInt16Test()

LayerTestResult<int16_t, 4> SquareInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1010 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1014 {
1015  return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1016 }

◆ SquareTest()

LayerTestResult<float, 4> SquareTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 993 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

997 {
998  return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
999 }

◆ SquareTestCommon()

LayerTestResult<T, 4> SquareTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 957 of file ActivationTestImpl.cpp.

References armnn::Square.

963 {
964  std::vector<float> inputData = {
965  -0.1f, -0.2f, -0.3f, -0.4f,
966  0.1f, 0.2f, 0.3f, 0.4f,
967  -1.0f, -2.0f, -3.0f, -4.0f,
968  1.0f, 2.0f, 3.0f, 4.0f
969  };
970 
971  // Calculate output values for input.
972  auto f = [](float value)
973  {
974  return std::pow(value,2);
975  };
976  std::vector<float> expectedOutput(inputData.size());
977  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
978 
979  return SimpleActivationTest<ArmnnType>(workloadFactory,
980  memoryManager,
981  tensorHandleFactory,
983  0.f,
984  0.f,
985  qScale,
986  qOffset,
987  inputData,
988  qScale,
989  qOffset,
990  expectedOutput);
991 }

◆ SquareUint8Test()

LayerTestResult<uint8_t, 4> SquareUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1001 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1005 {
1006  return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1007  tensorHandleFactory, 0.0625f, 64);
1008 }

◆ TanhInt16Test()

LayerTestResult<int16_t, 4> TanhInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1073 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1077 {
1078  return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1079 }

◆ TanhTest()

LayerTestResult<float, 4> TanhTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1057 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1061 {
1062  return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1063 }

◆ TanhTestCommon()

LayerTestResult<T, 4> TanhTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 1019 of file ActivationTestImpl.cpp.

References armnn::TanH.

1025 {
1026  std::vector<float> inputData = {
1027  -0.1f, -0.2f, -0.3f, -0.4f,
1028  0.1f, 0.2f, 0.3f, 0.4f,
1029  -1.0f, -2.0f, -3.0f, -4.0f,
1030  1.0f, 2.0f, 3.0f, 4.0f
1031  };
1032 
1033  const float a = 2.0f;
1034  const float b = 3.0f;
1035  // Calculate output values for input.
1036  auto f = [a, b](float value)
1037  {
1038  return a * tanhf(b * value);
1039  };
1040  std::vector<float> expectedOutput(inputData.size());
1041  std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1042 
1043  return SimpleActivationTest<ArmnnType>(workloadFactory,
1044  memoryManager,
1045  tensorHandleFactory,
1047  a,
1048  b,
1049  qScale,
1050  qOffset,
1051  inputData,
1052  qScale,
1053  qOffset,
1054  expectedOutput);
1055 }

◆ TanhUint8Test()

LayerTestResult<uint8_t, 4> TanhUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1065 of file ActivationTestImpl.cpp.

Referenced by TEST_SUITE().

1069 {
1070  return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1071 }