ArmNN
 21.02
ActivationTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > BoundedReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
 
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > CompareBoundedReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, float upperBound, float lowerBound)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale=0.0f, int32_t qOffset=0)
 
LayerTestResult< float, 4 > ConstantLinearActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SimpleSigmoidTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SimpleSigmoidTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > ReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< int16_t, 4 > ReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > ReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > ReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > BoundedReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SoftReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SoftReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > SoftReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > LeakyReLuTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > LeakyReLuTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > AbsTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > AbsTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AbsUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AbsInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 5 > SqrtNNTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SqrtTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SqrtTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > SqrtUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > SqrtInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > SquareTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > SquareTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > SquareUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > SquareInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > TanhTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > TanhTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > TanhUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > TanhInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > EluTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > EluTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > EluUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > EluInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > HardSwishTestCommon (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > HardSwishTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > HardSwishUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > HardSwishInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > CompareActivationTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
 
LayerTestResult< float, 4 > CompareActivationTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize)
 
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
 
LayerTestResult< int16_t, 4 > CompareActivationInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
 

Function Documentation

◆ AbsInt16Test()

LayerTestResult<int16_t, 4> AbsInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 833 of file ActivationTestImpl.cpp.

837 {
838  return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
839 }

◆ AbsTest()

LayerTestResult<float, 4> AbsTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 817 of file ActivationTestImpl.cpp.

821 {
822  return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
823 }

◆ AbsTestCommon()

LayerTestResult<T, 4> AbsTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 781 of file ActivationTestImpl.cpp.

References armnn::Abs.

787 {
788  std::vector<float> inputData = {
789  -0.1f, -0.2f, -0.3f, -0.4f,
790  0.1f, 0.2f, 0.3f, 0.4f,
791  -1.0f, -2.0f, -3.0f, -4.0f,
792  1.0f, 2.0f, 3.0f, 4.0f
793  };
794 
795  // Calculate output values for input.
796  auto f = [](float value)
797  {
798  return std::abs(value);
799  };
800  std::vector<float> outputExpectedData(inputData.size());
801  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
802 
803  return SimpleActivationTest<ArmnnType>(workloadFactory,
804  memoryManager,
805  tensorHandleFactory,
807  0.f,
808  0.f,
809  qScale,
810  qOffset,
811  inputData,
812  qScale,
813  qOffset,
814  outputExpectedData);
815 }

◆ AbsUint8Test()

LayerTestResult<uint8_t, 4> AbsUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 825 of file ActivationTestImpl.cpp.

829 {
830  return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
831 }

◆ BoundedReLuInt16Test()

LayerTestResult<int16_t, 4> BoundedReLuInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 645 of file ActivationTestImpl.cpp.

649 {
650  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
651 }

◆ BoundedReLuTestCommon() [1/2]

LayerTestResult<T, 4> BoundedReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  upperBound,
float  lowerBound,
float  inputScale,
int32_t  inputOffset,
float  outputScale,
int32_t  outputOffset,
const std::vector< T > &  inputData,
const std::vector< T > &  outputExpectedData,
unsigned int  inputWidth,
unsigned int  inputHeight,
unsigned int  inputChannels,
unsigned int  inputBatchSize 
)

Definition at line 25 of file ActivationTestImpl.cpp.

References armnn::BoundedReLu, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and TensorInfo::SetQuantizationScale().

41 {
42  IgnoreUnused(memoryManager);
43  unsigned int outputWidth = inputWidth;
44  unsigned int outputHeight = inputHeight;
45  unsigned int outputChannels = inputChannels;
46  unsigned int outputBatchSize = inputBatchSize;
47 
48  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
49 
50  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
51 
52  if(armnn::IsQuantizedType<T>())
53  {
54  inputTensorInfo.SetQuantizationScale(inputScale);
55  inputTensorInfo.SetQuantizationOffset(inputOffset);
56 
57  outputTensorInfo.SetQuantizationScale(outputScale);
58  outputTensorInfo.SetQuantizationOffset(outputOffset);
59  }
60 
61  LayerTestResult<T, 4> result(inputTensorInfo);
62 
63  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
64 
65  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
66  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
67 
68  // Setup bounded ReLu.
70  armnn::WorkloadInfo workloadInfo;
71  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
72  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
73 
75  descriptor.m_Parameters.m_A = upperBound;
76  descriptor.m_Parameters.m_B = lowerBound;
77 
78  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
79 
80  inputHandle->Allocate();
81  outputHandle->Allocate();
82 
83  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
84 
85  workload->Execute();
86 
87  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
88 
89  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
90 
91  return result;
92 }
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
Contains information about inputs and outputs to a layer.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ BoundedReLuTestCommon() [2/2]

LayerTestResult<T, 4> BoundedReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 608 of file ActivationTestImpl.cpp.

References armnn::BoundedReLu.

614 {
615  std::vector<float> inputData = {
616  -0.1f, -0.2f, -0.3f, -0.4f,
617  0.1f, 0.2f, 0.3f, 0.4f,
618  -1.0f, -2.0f, -3.0f, -4.0f,
619  1.0f, 2.0f, 3.0f, 4.0f
620  };
621  const float a = 1.0f;
622  const float b = -1.0f;
623  // Calculate output values for input.
624  auto f = [a, b](float value)
625  {
626  return std::min(a, std::max(b, value));
627  };
628  std::vector<float> outputExpectedData(inputData.size());
629  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
630 
631  return SimpleActivationTest<ArmnnType>(workloadFactory,
632  memoryManager,
633  tensorHandleFactory,
635  a,
636  b,
637  qScale,
638  qOffset,
639  inputData,
640  qScale,
641  qOffset,
642  outputExpectedData);
643 }
min(a, max(b, input)) ReLu1 & ReLu6.

◆ BoundedReLuUint8UpperAndLowerBoundTest()

LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 190 of file ActivationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, armnn::GetInputTensorInfo(), armnn::IgnoreUnused(), and QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters.

194 {
195  unsigned int inputWidth = 3u;
196  unsigned int inputHeight = 2u;
197  unsigned int inputChannels = 1u;
198  unsigned int inputBatchSize = 1;
199 
200  std::vector<uint8_t> input = std::vector<uint8_t>{
201  51, 230, 28,
202  251, 8, 92
203  };
204 
205  // Calculated manually.
206  std::vector<uint8_t> output = std::vector<uint8_t>{
207  51, 192, 32,
208  192, 32, 92
209  };
210 
211  int32_t inputOffset = 112;
212  float inputScale = 0.0125f;
213 
214  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
215  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
216  inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
217  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
218 }

◆ BoundedReLuUint8UpperBoundOnlyTest()

LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 158 of file ActivationTestImpl.cpp.

162 {
163  unsigned int inputWidth = 3u;
164  unsigned int inputHeight = 2u;
165  unsigned int inputChannels = 1u;
166  unsigned int inputBatchSize = 1;
167 
168  std::vector<uint8_t> input = std::vector<uint8_t>{
169  51, 124, 28,
170  251, 8, 92
171  };
172 
173  // Calculated manually.
174  std::vector<uint8_t> output = std::vector<uint8_t>{
175  0, 122, 0,
176  255, 0, 58
177  };
178 
179  float inputScale = 12.0f / 255.0f;
180  int32_t inputOffset = 63;
181  float outputScale = 6.0f / 255.0f;
182  int32_t outputOffset = 0;
183 
184  return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
185  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
186  inputScale, inputOffset, outputScale, outputOffset,
187  input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
188 }

◆ BoundedReLuUpperAndLowerBoundTest()

LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 94 of file ActivationTestImpl.cpp.

98 {
99  unsigned int inputWidth = 4u;
100  unsigned int inputHeight = 5u;
101  unsigned int inputChannels = 1u;
102  unsigned int inputBatchSize = 1;
103 
104  std::vector<float> input = std::vector<float>{
105  -2.0f, 0.1f, 0.5f, 1.25f,
106  0.786f, 0.9875f, -1.5f, 0.384f,
107  1.0001f, 3.5f, 7.5f, 0.896f,
108  2.126f, 2.0f, 0.3f, 0.15f,
109  0.999f, 1.2f, 0.89f, 6.1f,
110  };
111 
112  // Calculated manually.
113  std::vector<float> output = std::vector<float>{
114  -1.0f, 0.1f, 0.5f, 1.0f,
115  0.786f, 0.9875f, -1.0f, 0.384f,
116  1.0f, 1.0f, 1.0f, 0.896f,
117  1.0f, 1.0f, 0.3f, 0.15f,
118  0.999f, 1.0f, 0.89f, 1.0f,
119  };
120 
121  return BoundedReLuTestCommon<armnn::DataType::Float32>(
122  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
123  inputWidth, inputHeight, inputChannels, inputBatchSize);
124 }

◆ BoundedReLuUpperBoundOnlyTest()

LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 126 of file ActivationTestImpl.cpp.

130 {
131  unsigned int inputWidth = 4u;
132  unsigned int inputHeight = 5u;
133  unsigned int inputChannels = 1u;
134  unsigned int inputBatchSize = 1;
135 
136  std::vector<float> input = std::vector<float>{
137  -1.0f, 0.1f, 0.5f, 6.25f,
138  0.786f, 5.9875f, -0.5f, 0.384f,
139  6.0001f, 3.5f, 7.5f, 0.896f,
140  2.126f, 12.0f, 0.3f, 0.15f,
141  0.999f, 1.2f, 0.89f, 6.1f,
142  };
143 
144  // Calculated manually.
145  std::vector<float> output = std::vector<float>{
146  0.0f, 0.1f, 0.5f, 6.0f,
147  0.786f, 5.9875f, 0.0f, 0.384f,
148  6.0f, 3.5f, 6.0f, 0.896f,
149  2.126f, 6.0f, 0.3f, 0.15f,
150  0.999f, 1.2f, 0.89f, 6.0f,
151  };
152 
153  return BoundedReLuTestCommon<armnn::DataType::Float32>(
154  workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
155  inputWidth, inputHeight, inputChannels, inputBatchSize);
156 }

◆ CompareActivationInt16Test()

LayerTestResult<int16_t,4> CompareActivationInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
armnn::ActivationFunction  f 
)

Definition at line 1341 of file ActivationTestImpl.cpp.

1348 {
1349  return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1350  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1351  refTensorHandleFactory, f, 5, 0.1f, 0);
1352 }

◆ CompareActivationTest()

LayerTestResult<float,4> CompareActivationTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
armnn::ActivationFunction  f,
unsigned int  batchSize 
)

Definition at line 1314 of file ActivationTestImpl.cpp.

1322 {
1323  return CompareActivationTestImpl<armnn::DataType::Float32>(
1324  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1325  refTensorHandleFactory, f, batchSize);
1326 }

◆ CompareActivationTestImpl()

LayerTestResult<T,4> CompareActivationTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
armnn::ActivationFunction  f,
unsigned int  batchSize = 5,
float  qScale = 0.0f,
int32_t  qOffset = 0 
)

Definition at line 1219 of file ActivationTestImpl.cpp.

References ARMNN_ASSERT, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, armnn::numeric_cast(), TensorInfo::SetQuantizationOffset(), TensorInfo::SetQuantizationScale(), and armnn::Sqrt.

1229 {
1230  IgnoreUnused(memoryManager);
1231  unsigned int width = 17;
1232  unsigned int height = 29;
1233  unsigned int channels = 2;
1234 
1235  float a = 0.234f;
1236  float b = -12.345f;
1237 
1238  armnn::TensorInfo inputTensorInfo;
1239  armnn::TensorInfo outputTensorInfo;
1240 
1241  unsigned int shape[] = {batchSize, channels, height, width};
1242 
1243  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1244  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
1245 
1246  // Set quantization parameters if the requested type is a quantized type.
1247  if(armnn::IsQuantizedType<T>())
1248  {
1249  inputTensorInfo.SetQuantizationScale(qScale);
1250  inputTensorInfo.SetQuantizationOffset(qOffset);
1251  outputTensorInfo.SetQuantizationScale(qScale);
1252  outputTensorInfo.SetQuantizationOffset(qOffset);
1253  }
1254 
1255  float minVal = -10.f;
1257  {
1258  minVal = 0.f;
1259  }
1260 
1261  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1262 
1263 
1264  LayerTestResult<T,4> ret(outputTensorInfo);
1265  auto boostArrayExtents = boost::extents
1266  [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
1267  [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1268  [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
1269  [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1270  ret.output.resize(boostArrayExtents);
1271  ret.outputExpected.resize(boostArrayExtents);
1272 
1273  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1274  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1275 
1276  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1277  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1278 
1281  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1282  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1283  data.m_Parameters.m_A = a;
1284  data.m_Parameters.m_B = b;
1285  data.m_Parameters.m_Function = f;
1286 
1287  armnn::ActivationQueueDescriptor refData = data;
1288  armnn::WorkloadInfo refInfo = info;
1289  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1290  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1291 
1292  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
1293  ARMNN_ASSERT(workload != nullptr);
1294  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
1295  ARMNN_ASSERT(workloadRef != nullptr);
1296 
1297  inputHandle->Allocate();
1298  outputHandle->Allocate();
1299  inputHandleRef->Allocate();
1300  outputHandleRef->Allocate();
1301 
1302  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
1303  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
1304 
1305  workload->Execute();
1306  workloadRef->Execute();
1307 
1308  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
1309  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
1310 
1311  return ret;
1312 }
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ CompareActivationUint8Test()

LayerTestResult<uint8_t,4> CompareActivationUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
armnn::ActivationFunction  f 
)

Definition at line 1328 of file ActivationTestImpl.cpp.

1335 {
1336  return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1337  workloadFactory, memoryManager, refWorkloadFactory,
1338  tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1339 }

◆ CompareBoundedReLuTest()

LayerTestResult<float, 4> CompareBoundedReLuTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory,
float  upperBound,
float  lowerBound 
)

Definition at line 292 of file ActivationTestImpl.cpp.

References armnn::BoundedReLu, ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, LayerTestResult< T, n >::output, and LayerTestResult< T, n >::outputExpected.

300 {
301  LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
302 
303  armnn::ActivationDescriptor activationDescriptor;
304  activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
305  activationDescriptor.m_A = upperBound;
306  activationDescriptor.m_B = lowerBound;
307 
308  result.output = BoundedReLuRandomInputTest(
309  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
310  result.outputExpected = BoundedReLuRandomInputTest(
311  refWorkloadFactory, nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
312 
313  return result;
314 }
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
min(a, max(b, input)) ReLu1 & ReLu6.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48

◆ ConstantLinearActivationInt16Test()

LayerTestResult<int16_t, 4> ConstantLinearActivationInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 397 of file ActivationTestImpl.cpp.

401 {
402  return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
403  workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
404 }

◆ ConstantLinearActivationTest()

LayerTestResult<float, 4> ConstantLinearActivationTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 378 of file ActivationTestImpl.cpp.

382 {
383  return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
384  memoryManager,
385  tensorHandleFactory);
386 }

◆ ConstantLinearActivationTestCommon()

LayerTestResult<T,4> ConstantLinearActivationTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale = 0.0f,
int32_t  qOffset = 0 
)

Definition at line 317 of file ActivationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), armnn::Linear, ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

323 {
324  IgnoreUnused(memoryManager);
325  unsigned int inputHeight = 20;
326  unsigned int inputWidth = 17;
327  unsigned int inputChannels = 3;
328  unsigned int batchSize = 5;
329 
330  armnn::TensorInfo inputTensorInfo;
331  armnn::TensorInfo outputTensorInfo;
332 
333  unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
334 
335  inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
336  outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
337 
338  // Set quantization parameters if the requested type is a quantized type.
339  if(armnn::IsQuantizedType<T>())
340  {
341  inputTensorInfo.SetQuantizationScale(qScale);
342  inputTensorInfo.SetQuantizationOffset(qOffset);
343  outputTensorInfo.SetQuantizationScale(qScale);
344  outputTensorInfo.SetQuantizationOffset(qOffset);
345  }
346 
347  LayerTestResult<T, 4> ret(outputTensorInfo);
348  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
349  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
350 
351  // Do linear activation that should leave the tensor unchanged.
354  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
355  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
356  data.m_Parameters.m_A = 1.0f;
357  data.m_Parameters.m_B = 0.0f;
359 
360  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
361 
362  inputHandle->Allocate();
363  outputHandle->Allocate();
364 
365  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
366  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
367 
368  workload->Execute();
369 
370  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
371 
372  // Ensure output equals input.
373  ret.outputExpected = input;
374 
375  return ret;
376 }
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ ConstantLinearActivationUint8Test()

LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 388 of file ActivationTestImpl.cpp.

392 {
393  return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
394  workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
395 }

◆ EluInt16Test()

LayerTestResult<int16_t, 4> EluInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1140 of file ActivationTestImpl.cpp.

1144 {
1145  return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1146 }

◆ EluTest()

LayerTestResult<float, 4> EluTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1124 of file ActivationTestImpl.cpp.

1128 {
1129  return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1130 }

◆ EluTestCommon()

LayerTestResult<T, 4> EluTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 1086 of file ActivationTestImpl.cpp.

References armnn::Elu.

1092 {
1093  std::vector<float> inputData = {
1094  -0.1f, -0.2f, -0.3f, -0.4f,
1095  0.1f, 0.2f, 0.3f, 0.4f,
1096  -1.0f, -2.0f, -3.0f, -4.0f,
1097  1.0f, 2.0f, 3.0f, 4.0f
1098  };
1099 
1100 
1101  const float a = 0.01f;
1102  // Calculate output values for input.
1103  auto f = [a](float value)
1104  {
1105  return (value >= 0) ? value : a * (expf(value) - 1);
1106  };
1107  std::vector<float> outputExpectedData(inputData.size());
1108  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1109 
1110  return SimpleActivationTest<ArmnnType>(workloadFactory,
1111  memoryManager,
1112  tensorHandleFactory,
1114  a,
1115  0.0f,
1116  qScale,
1117  qOffset,
1118  inputData,
1119  qScale,
1120  qOffset,
1121  outputExpectedData);
1122 }

◆ EluUint8Test()

LayerTestResult<uint8_t, 4> EluUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1132 of file ActivationTestImpl.cpp.

1136 {
1137  return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1138 }

◆ HardSwishInt16Test()

LayerTestResult<int16_t, 4> HardSwishInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1209 of file ActivationTestImpl.cpp.

1213 {
1214  return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1215 }

◆ HardSwishTest()

LayerTestResult<float, 4> HardSwishTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1192 of file ActivationTestImpl.cpp.

1196 {
1197  return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1198 }

◆ HardSwishTestCommon()

LayerTestResult<T, 4> HardSwishTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 1150 of file ActivationTestImpl.cpp.

References armnn::HardSwish.

1156 {
1157  std::vector<float> inputData = {
1158  -0.1f, -0.2f, -0.3f, -0.4f,
1159  0.1f, 0.2f, 0.3f, 0.4f,
1160  -1.0f, -2.0f, -3.0f, -4.0f,
1161  1.0f, 2.0f, 3.0f, 4.0f
1162  };
1163  // Calculate output values for input.
1164  auto f = [](float x)
1165  {
1166  // Break down the calculation to help with verification.
1167  // hard_swish(x) = x * relu6(x+3) / 6
1168  // relu6(x) = min(max(x,0),6)
1169  float reLu6_step1 = std::max((x + 3),0.0f);
1170  float reLu6Complete = std::min(reLu6_step1, 6.0f);
1171  float hardSwish_step1 = x * reLu6Complete;
1172  float result = hardSwish_step1 / 6;
1173  return result;
1174  };
1175  std::vector<float> outputExpectedData(inputData.size());
1176  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1177 
1178  return SimpleActivationTest<ArmnnType>(workloadFactory,
1179  memoryManager,
1180  tensorHandleFactory,
1182  0.f,
1183  0.f,
1184  qScale,
1185  qOffset,
1186  inputData,
1187  qScale,
1188  qOffset,
1189  outputExpectedData);
1190 }

◆ HardSwishUint8Test()

LayerTestResult<uint8_t, 4> HardSwishUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1200 of file ActivationTestImpl.cpp.

1204 {
1205  return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1206  tensorHandleFactory, 0.1f, 64);
1207 }

◆ LeakyReLuInt16Test()

LayerTestResult<int16_t, 4> LeakyReLuInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 772 of file ActivationTestImpl.cpp.

776 {
777  return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
778 }

◆ LeakyReLuTest()

LayerTestResult<float, 4> LeakyReLuTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 755 of file ActivationTestImpl.cpp.

759 {
760  return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
761 }

◆ LeakyReLuTestCommon()

LayerTestResult<T, 4> LeakyReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 718 of file ActivationTestImpl.cpp.

References armnn::LeakyReLu.

724 {
725  std::vector<float> inputData = {
726  -0.1f, -0.2f, -0.3f, -0.4f,
727  0.1f, 0.2f, 0.3f, 0.4f,
728  -1.0f, -2.0f, -3.0f, -4.0f,
729  1.0f, 2.0f, 3.0f, 4.0f
730  };
731 
732  const float a = 0.01f;
733  // Calculate output values for input.
734  auto f = [a](float value)
735  {
736  return value > 0.0f ? value : (value * a);
737  };
738  std::vector<float> outputExpectedData(inputData.size());
739  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
740 
741  return SimpleActivationTest<ArmnnType>(workloadFactory,
742  memoryManager,
743  tensorHandleFactory,
745  a,
746  0.f,
747  qScale,
748  qOffset,
749  inputData,
750  qScale,
751  qOffset,
752  outputExpectedData);
753 }

◆ LeakyReLuUint8Test()

LayerTestResult<uint8_t, 4> LeakyReLuUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 763 of file ActivationTestImpl.cpp.

767 {
768  return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
769  tensorHandleFactory, 0.0625f, 64);
770 }

◆ ReLuInt16Test()

LayerTestResult<int16_t, 4> ReLuInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 581 of file ActivationTestImpl.cpp.

585 {
586  return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
587 }

◆ ReLuTest()

LayerTestResult<float, 4> ReLuTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 598 of file ActivationTestImpl.cpp.

602 {
603  return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
604 }

◆ ReLuTestCommon()

LayerTestResult<T, 4> ReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 545 of file ActivationTestImpl.cpp.

References armnn::ReLu.

551 {
552  std::vector<float> inputData = {
553  -0.1f, -0.2f, -0.3f, -0.4f,
554  0.1f, 0.2f, 0.3f, 0.4f,
555  -1.0f, -2.0f, -3.0f, -4.0f,
556  1.0f, 2.0f, 3.0f, 4.0f
557  };
558 
559  // Calculate output values for input.
560  auto f = [](float value)
561  {
562  return std::fmax(0.0f, value);
563  };
564  std::vector<float> outputExpectedData(inputData.size());
565  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
566 
567  return SimpleActivationTest<ArmnnType>(workloadFactory,
568  memoryManager,
569  tensorHandleFactory,
571  0.f,
572  0.f,
573  qScale,
574  qOffset,
575  inputData,
576  qScale,
577  qOffset,
578  outputExpectedData);
579 }

◆ ReLuUint8Test()

LayerTestResult<uint8_t, 4> ReLuUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 590 of file ActivationTestImpl.cpp.

594 {
595  return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
596 }

◆ SimpleActivationTest()

LayerTestResult<T, 4> SimpleActivationTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
armnn::ActivationFunction  activationFunction,
float  activationParameterA,
float  activationParameterB,
float  scale,
int32_t  offset,
const std::vector< float > &  inputData,
float  outScale,
int32_t  outOffset,
const std::vector< float > &  outputExpectedData 
)

Definition at line 407 of file ActivationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, and TensorInfo::SetQuantizationScale().

420 {
421  IgnoreUnused(memoryManager);
422  constexpr static unsigned int inputWidth = 16u;
423  constexpr static unsigned int inputHeight = 1u;
424  constexpr static unsigned int inputChannels = 1u;
425  constexpr static unsigned int inputBatchSize = 1u;
426 
427  constexpr static unsigned int outputWidth = inputWidth;
428  constexpr static unsigned int outputHeight = inputHeight;
429  constexpr static unsigned int outputChannels = inputChannels;
430  constexpr static unsigned int outputBatchSize = inputBatchSize;
431 
432  armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
433  armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
434 
435  // Set quantization parameters if the requested type is a quantized type.
436  if(armnn::IsQuantizedType<T>())
437  {
438  inputTensorInfo.SetQuantizationScale(scale);
439  inputTensorInfo.SetQuantizationOffset(offset);
440  outputTensorInfo.SetQuantizationScale(outScale);
441  outputTensorInfo.SetQuantizationOffset(outOffset);
442  }
443 
444  LayerTestResult<T, 4> result(inputTensorInfo);
445 
446  auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
447 
448  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
449  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
450 
451  // Setup bounded ReLu.
453  armnn::WorkloadInfo workloadInfo;
454  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
455  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
456 
457  descriptor.m_Parameters.m_Function = activationFunction;
458  descriptor.m_Parameters.m_A = activationParameterA;
459  descriptor.m_Parameters.m_B = activationParameterB;
460 
461  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
462 
463  inputHandle->Allocate();
464  outputHandle->Allocate();
465 
466  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
467 
468  workload->Execute();
469 
470  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
471 
472  // Calculated manually.
473  result.outputExpected =
474  MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
475 
476  return result;
477 }
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
Contains information about inputs and outputs to a layer.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ SimpleSigmoidInt16Test()

LayerTestResult<int16_t, 4> SimpleSigmoidInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 535 of file ActivationTestImpl.cpp.

539 {
540  return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
541  tensorHandleFactory, 0.1f, 0);
542 }

◆ SimpleSigmoidTest()

LayerTestResult<float, 4> SimpleSigmoidTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 517 of file ActivationTestImpl.cpp.

521 {
522  return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
523  tensorHandleFactory, 0.0f, 0);
524 }

◆ SimpleSigmoidTestCommon()

LayerTestResult<T, 4> SimpleSigmoidTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 480 of file ActivationTestImpl.cpp.

References armnn::Sigmoid.

486 {
487  std::vector<float> inputData =
488  {
489  -0.1f, -0.2f, -0.3f, -0.4f,
490  0.1f, 0.2f, 0.3f, 0.4f,
491  -1.0f, -2.0f, -3.0f, -4.0f,
492  1.0f, 2.0f, 3.0f, 4.0f
493  };
494 
495  // Calculate output values for input.
496  auto f = [](float value)
497  {
498  return 1.0f / (1.0f + std::exp(-value));
499  };
500  std::vector<float> outputExpectedData(inputData.size());
501  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
502 
503  return SimpleActivationTest<ArmnnType>(workloadFactory,
504  memoryManager,
505  tensorHandleFactory,
507  0.f,
508  0.f,
509  qScale,
510  qOffset,
511  inputData,
512  1.f / 256.f,
513  0,
514  outputExpectedData);
515 }

◆ SimpleSigmoidUint8Test()

LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 526 of file ActivationTestImpl.cpp.

530 {
531  return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
532  tensorHandleFactory, 0.1f, 50);
533 }

◆ SoftReLuInt16Test()

LayerTestResult<int16_t, 4> SoftReLuInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 709 of file ActivationTestImpl.cpp.

713 {
714  return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
715 }

◆ SoftReLuTest()

LayerTestResult<float, 4> SoftReLuTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 692 of file ActivationTestImpl.cpp.

696 {
697  return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
698 }

◆ SoftReLuTestCommon()

LayerTestResult<T, 4> SoftReLuTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 656 of file ActivationTestImpl.cpp.

References armnn::SoftReLu.

662 {
663  std::vector<float> inputData = {
664  -0.1f, -0.2f, -0.3f, -0.4f,
665  0.1f, 0.2f, 0.3f, 0.4f,
666  -1.0f, -2.0f, -3.0f, -4.0f,
667  1.0f, 2.0f, 3.0f, 4.0f
668  };
669 
670  // Calculate output values for input.
671  auto f = [](float value)
672  {
673  return std::log(1.0f + std::exp(value));
674  };
675  std::vector<float> outputExpectedData(inputData.size());
676  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
677 
678  return SimpleActivationTest<ArmnnType>(workloadFactory,
679  memoryManager,
680  tensorHandleFactory,
682  0.f,
683  0.f,
684  qScale,
685  qOffset,
686  inputData,
687  qScale,
688  qOffset,
689  outputExpectedData);
690 }

◆ SoftReLuUint8Test()

LayerTestResult<uint8_t, 4> SoftReLuUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 700 of file ActivationTestImpl.cpp.

704 {
705  return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
706  tensorHandleFactory, 0.0625f, 64);
707 }

◆ SqrtInt16Test()

LayerTestResult<int16_t, 4> SqrtInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 951 of file ActivationTestImpl.cpp.

955 {
956  return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
957 }

◆ SqrtNNTest()

LayerTestResult<float, 5> SqrtNNTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 841 of file ActivationTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateActivation(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), ActivationDescriptor::m_Function, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and armnn::Sqrt.

845 {
846  IgnoreUnused(memoryManager);
847  const int inputDataSize = 120;
848  std::vector<float> inputData(inputDataSize);
849 
850  for (unsigned int i = 0u; i < inputDataSize; ++i)
851  {
852  inputData[i] = static_cast<float>(i) / 10;
853  }
854 
855  auto f = [](float value)
856  {
857  return std::sqrt(value);
858  };
859  std::vector<float> outputExpectedData(inputDataSize);
860  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
861 
862  armnn::TensorInfo inputTensorInfo(
863  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
864  armnn::TensorInfo outputTensorInfo(
865  { 1u, 2u, 3u, 4u, 5u }, armnn::DataType::Float32);
866 
867  LayerTestResult<float, 5> result(inputTensorInfo);
868 
869  auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
870 
871  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
872  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
873 
875  armnn::WorkloadInfo workloadInfo;
876  AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
877  AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
878 
880 
881  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
882 
883  inputHandle->Allocate();
884  outputHandle->Allocate();
885 
886  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0][0]);
887 
888  workload->Execute();
889 
890  CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
891 
892  // Calculated manually.
893  result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
894 
895  return result;
896 };
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ SqrtTest()

LayerTestResult<float, 4> SqrtTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 935 of file ActivationTestImpl.cpp.

939 {
940  return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
941 }

◆ SqrtTestCommon()

LayerTestResult<T, 4> SqrtTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 899 of file ActivationTestImpl.cpp.

References armnn::Sqrt.

905 {
906  std::vector<float> inputData = {
907  0.1f, 0.2f, 0.3f, 0.4f,
908  0.1f, 0.2f, 0.3f, 0.4f,
909  1.0f, 2.0f, 3.0f, 4.0f,
910  1.0f, 2.0f, 3.0f, 4.0f
911  };
912 
913  // Calculate output values for input.
914  auto f = [](float value)
915  {
916  return std::sqrt(value);
917  };
918  std::vector<float> outputExpectedData(inputData.size());
919  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
920 
921  return SimpleActivationTest<ArmnnType>(workloadFactory,
922  memoryManager,
923  tensorHandleFactory,
925  0.f,
926  0.f,
927  qScale,
928  qOffset,
929  inputData,
930  qScale,
931  qOffset,
932  outputExpectedData);
933 }

◆ SqrtUint8Test()

LayerTestResult<uint8_t, 4> SqrtUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 943 of file ActivationTestImpl.cpp.

947 {
948  return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
949 }

◆ SquareInt16Test()

LayerTestResult<int16_t, 4> SquareInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1013 of file ActivationTestImpl.cpp.

1017 {
1018  return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1019 }

◆ SquareTest()

LayerTestResult<float, 4> SquareTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 996 of file ActivationTestImpl.cpp.

1000 {
1001  return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1002 }

◆ SquareTestCommon()

LayerTestResult<T, 4> SquareTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 960 of file ActivationTestImpl.cpp.

References armnn::Square.

966 {
967  std::vector<float> inputData = {
968  -0.1f, -0.2f, -0.3f, -0.4f,
969  0.1f, 0.2f, 0.3f, 0.4f,
970  -1.0f, -2.0f, -3.0f, -4.0f,
971  1.0f, 2.0f, 3.0f, 4.0f
972  };
973 
974  // Calculate output values for input.
975  auto f = [](float value)
976  {
977  return std::pow(value,2);
978  };
979  std::vector<float> outputExpectedData(inputData.size());
980  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
981 
982  return SimpleActivationTest<ArmnnType>(workloadFactory,
983  memoryManager,
984  tensorHandleFactory,
986  0.f,
987  0.f,
988  qScale,
989  qOffset,
990  inputData,
991  qScale,
992  qOffset,
993  outputExpectedData);
994 }

◆ SquareUint8Test()

LayerTestResult<uint8_t, 4> SquareUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1004 of file ActivationTestImpl.cpp.

1008 {
1009  return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1010  tensorHandleFactory, 0.0625f, 64);
1011 }

◆ TanhInt16Test()

LayerTestResult<int16_t, 4> TanhInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1076 of file ActivationTestImpl.cpp.

1080 {
1081  return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1082 }

◆ TanhTest()

LayerTestResult<float, 4> TanhTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1060 of file ActivationTestImpl.cpp.

1064 {
1065  return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1066 }

◆ TanhTestCommon()

LayerTestResult<T, 4> TanhTestCommon ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory,
float  qScale,
int32_t  qOffset 
)

Definition at line 1022 of file ActivationTestImpl.cpp.

References armnn::TanH.

1028 {
1029  std::vector<float> inputData = {
1030  -0.1f, -0.2f, -0.3f, -0.4f,
1031  0.1f, 0.2f, 0.3f, 0.4f,
1032  -1.0f, -2.0f, -3.0f, -4.0f,
1033  1.0f, 2.0f, 3.0f, 4.0f
1034  };
1035 
1036  const float a = 2.0f;
1037  const float b = 3.0f;
1038  // Calculate output values for input.
1039  auto f = [a, b](float value)
1040  {
1041  return a * tanhf(b * value);
1042  };
1043  std::vector<float> outputExpectedData(inputData.size());
1044  std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1045 
1046  return SimpleActivationTest<ArmnnType>(workloadFactory,
1047  memoryManager,
1048  tensorHandleFactory,
1050  a,
1051  b,
1052  qScale,
1053  qOffset,
1054  inputData,
1055  qScale,
1056  qOffset,
1057  outputExpectedData);
1058 }

◆ TanhUint8Test()

LayerTestResult<uint8_t, 4> TanhUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 1068 of file ActivationTestImpl.cpp.

1072 {
1073  return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1074 }