22 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
33 const std::vector<T>& inputData,
34 const std::vector<T>& outputExpectedData,
35 unsigned int inputWidth,
36 unsigned int inputHeight,
37 unsigned int inputChannels,
38 unsigned int inputBatchSize)
41 unsigned int outputWidth = inputWidth;
42 unsigned int outputHeight = inputHeight;
43 unsigned int outputChannels = inputChannels;
44 unsigned int outputBatchSize = inputBatchSize;
46 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
48 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
50 if(armnn::IsQuantizedType<T>())
53 inputTensorInfo.SetQuantizationOffset(inputOffset);
55 outputTensorInfo.SetQuantizationScale(outputScale);
56 outputTensorInfo.SetQuantizationOffset(outputOffset);
59 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
61 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
67 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
74 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
76 inputHandle->Allocate();
77 outputHandle->Allocate();
87 outputHandle->GetShape(),
88 outputTensorInfo.GetShape());
96 unsigned int inputWidth = 4u;
97 unsigned int inputHeight = 5u;
98 unsigned int inputChannels = 1u;
99 unsigned int inputBatchSize = 1;
101 std::vector<float> input = std::vector<float>{
102 -2.0f, 0.1f, 0.5f, 1.25f,
103 0.786f, 0.9875f, -1.5f, 0.384f,
104 1.0001f, 3.5f, 7.5f, 0.896f,
105 2.126f, 2.0f, 0.3f, 0.15f,
106 0.999f, 1.2f, 0.89f, 6.1f,
110 std::vector<float> output = std::vector<float>{
111 -1.0f, 0.1f, 0.5f, 1.0f,
112 0.786f, 0.9875f, -1.0f, 0.384f,
113 1.0f, 1.0f, 1.0f, 0.896f,
114 1.0f, 1.0f, 0.3f, 0.15f,
115 0.999f, 1.0f, 0.89f, 1.0f,
118 return BoundedReLuTestCommon<armnn::DataType::Float32>(
119 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
120 inputWidth, inputHeight, inputChannels, inputBatchSize);
128 unsigned int inputWidth = 4u;
129 unsigned int inputHeight = 5u;
130 unsigned int inputChannels = 1u;
131 unsigned int inputBatchSize = 1;
133 std::vector<float> input = std::vector<float>{
134 -1.0f, 0.1f, 0.5f, 6.25f,
135 0.786f, 5.9875f, -0.5f, 0.384f,
136 6.0001f, 3.5f, 7.5f, 0.896f,
137 2.126f, 12.0f, 0.3f, 0.15f,
138 0.999f, 1.2f, 0.89f, 6.1f,
142 std::vector<float> output = std::vector<float>{
143 0.0f, 0.1f, 0.5f, 6.0f,
144 0.786f, 5.9875f, 0.0f, 0.384f,
145 6.0f, 3.5f, 6.0f, 0.896f,
146 2.126f, 6.0f, 0.3f, 0.15f,
147 0.999f, 1.2f, 0.89f, 6.0f,
150 return BoundedReLuTestCommon<armnn::DataType::Float32>(
151 workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
152 inputWidth, inputHeight, inputChannels, inputBatchSize);
160 unsigned int inputWidth = 3u;
161 unsigned int inputHeight = 2u;
162 unsigned int inputChannels = 1u;
163 unsigned int inputBatchSize = 1;
165 std::vector<uint8_t> input = std::vector<uint8_t>{
171 std::vector<uint8_t> output = std::vector<uint8_t>{
176 float inputScale = 12.0f / 255.0f;
177 int32_t inputOffset = 63;
178 float outputScale = 6.0f / 255.0f;
179 int32_t outputOffset = 0;
181 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
182 workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
183 inputScale, inputOffset, outputScale, outputOffset,
184 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
192 unsigned int inputWidth = 3u;
193 unsigned int inputHeight = 2u;
194 unsigned int inputChannels = 1u;
195 unsigned int inputBatchSize = 1;
197 std::vector<uint8_t> input = std::vector<uint8_t>{
203 std::vector<uint8_t> output = std::vector<uint8_t>{
208 int32_t inputOffset = 112;
209 float inputScale = 0.0125f;
211 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
212 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
213 inputScale, inputOffset, inputScale, inputOffset,
214 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
220 struct BoundedReLuRandomInputTestTraits
222 constexpr
static unsigned int inputHeight = 31u;
223 constexpr
static unsigned int inputWidth = 19u;
224 constexpr
static unsigned int inputChannels = 4u;
225 constexpr
static unsigned int inputBatchSize = 2;
227 constexpr
static unsigned int outputHeight = inputHeight;
228 constexpr
static unsigned int outputWidth = inputWidth;
229 constexpr
static unsigned int outputChannels = inputChannels;
230 constexpr
static unsigned int outputBatchSize = inputBatchSize;
234 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
240 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
245 std::vector<float> BoundedReLuRandomInputTest(
254 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
255 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
259 std::vector<float> input = MakeRandomTensor<float>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
260 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
262 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
263 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
268 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
269 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
272 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
274 inputHandle->Allocate();
275 outputHandle->Allocate();
301 activationDescriptor.
m_A = upperBound;
302 activationDescriptor.
m_B = lowerBound;
305 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
307 refWorkloadFactory,
nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
312 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
321 unsigned int inputHeight = 20;
322 unsigned int inputWidth = 17;
323 unsigned int inputChannels = 3;
324 unsigned int batchSize = 5;
329 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
335 if(armnn::IsQuantizedType<T>())
343 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
344 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
349 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
350 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
355 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(data, info);
357 inputHandle->Allocate();
358 outputHandle->Allocate();
360 std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 7123561);
372 outputHandle->GetShape(),
381 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
383 tensorHandleFactory);
391 return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
392 workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
400 return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
401 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
404 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
410 float activationParameterA,
411 float activationParameterB,
414 const std::vector<float>& inputData,
417 const std::vector<float>& outputExpectedData)
420 constexpr
static unsigned int inputWidth = 16u;
421 constexpr
static unsigned int inputHeight = 1u;
422 constexpr
static unsigned int inputChannels = 1u;
423 constexpr
static unsigned int inputBatchSize = 1u;
425 constexpr
static unsigned int outputWidth = inputWidth;
426 constexpr
static unsigned int outputHeight = inputHeight;
427 constexpr
static unsigned int outputChannels = inputChannels;
428 constexpr
static unsigned int outputBatchSize = inputBatchSize;
430 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
431 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
434 if(armnn::IsQuantizedType<T>())
437 inputTensorInfo.SetQuantizationOffset(offset);
438 outputTensorInfo.SetQuantizationScale(outScale);
439 outputTensorInfo.SetQuantizationOffset(outOffset);
442 std::vector<T> input = armnnUtils::QuantizedVector<T>(inputData, scale, offset);
445 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
446 std::vector<T> outputExpected = armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset);
448 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
449 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
454 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
455 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
461 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
463 inputHandle->Allocate();
464 outputHandle->Allocate();
474 outputHandle->GetShape(),
475 outputTensorInfo.GetShape());
478 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
486 std::vector<float> inputData =
488 -0.1f, -0.2f, -0.3f, -0.4f,
489 0.1f, 0.2f, 0.3f, 0.4f,
490 -1.0f, -2.0f, -3.0f, -4.0f,
491 1.0f, 2.0f, 3.0f, 4.0f
495 auto f = [](
float value)
497 return 1.0f / (1.0f + std::exp(-value));
499 std::vector<float> m_OutputExpected(inputData.size());
500 std::transform(inputData.begin(), inputData.end(), m_OutputExpected.begin(), f);
502 return SimpleActivationTest<ArmnnType>(workloadFactory,
521 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
522 tensorHandleFactory, 0.0f, 0);
530 return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
531 tensorHandleFactory, 0.1f, 50);
539 return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
540 tensorHandleFactory, 0.1f, 0);
543 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
551 std::vector<float> inputData = {
552 -0.1f, -0.2f, -0.3f, -0.4f,
553 0.1f, 0.2f, 0.3f, 0.4f,
554 -1.0f, -2.0f, -3.0f, -4.0f,
555 1.0f, 2.0f, 3.0f, 4.0f
559 auto f = [](
float value)
561 return std::fmax(0.0f, value);
563 std::vector<float> outputExpected(inputData.size());
564 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
566 return SimpleActivationTest<ArmnnType>(workloadFactory,
585 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
594 return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
602 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
606 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
614 std::vector<float> inputData = {
615 -0.1f, -0.2f, -0.3f, -0.4f,
616 0.1f, 0.2f, 0.3f, 0.4f,
617 -1.0f, -2.0f, -3.0f, -4.0f,
618 1.0f, 2.0f, 3.0f, 4.0f
620 const float a = 1.0f;
621 const float b = -1.0f;
623 auto f = [a, b](
float value)
625 return std::min(a, std::max(b, value));
627 std::vector<float> outputExpected(inputData.size());
628 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
630 return SimpleActivationTest<ArmnnType>(workloadFactory,
649 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
654 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
662 std::vector<float> inputData = {
663 -0.1f, -0.2f, -0.3f, -0.4f,
664 0.1f, 0.2f, 0.3f, 0.4f,
665 -1.0f, -2.0f, -3.0f, -4.0f,
666 1.0f, 2.0f, 3.0f, 4.0f
670 auto f = [](
float value)
672 return std::log(1.0f + std::exp(value));
674 std::vector<float> outputExpected(inputData.size());
675 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
677 return SimpleActivationTest<ArmnnType>(workloadFactory,
696 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
704 return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
705 tensorHandleFactory, 0.0625f, 64);
713 return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
716 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
724 std::vector<float> inputData = {
725 -0.1f, -0.2f, -0.3f, -0.4f,
726 0.1f, 0.2f, 0.3f, 0.4f,
727 -1.0f, -2.0f, -3.0f, -4.0f,
728 1.0f, 2.0f, 3.0f, 4.0f
731 const float a = 0.01f;
733 auto f = [a](
float value)
735 return value > 0.0f ? value : (value * a);
737 std::vector<float> outputExpected(inputData.size());
738 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
740 return SimpleActivationTest<ArmnnType>(workloadFactory,
759 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
767 return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
768 tensorHandleFactory, 0.0625f, 64);
776 return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
779 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
787 std::vector<float> inputData = {
788 -0.1f, -0.2f, -0.3f, -0.4f,
789 0.1f, 0.2f, 0.3f, 0.4f,
790 -1.0f, -2.0f, -3.0f, -4.0f,
791 1.0f, 2.0f, 3.0f, 4.0f
795 auto f = [](
float value)
797 return std::abs(value);
799 std::vector<float> outputExpected(inputData.size());
800 std::transform(inputData.begin(), inputData.end(), outputExpected.begin(), f);
802 return SimpleActivationTest<ArmnnType>(workloadFactory,
821 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
829 return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
837 return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
846 const int inputDataSize = 120;
847 std::vector<float> inputData(inputDataSize);
849 for (
unsigned int i = 0u; i < inputDataSize; ++i)
851 inputData[i] =
static_cast<float>(i) / 10;
854 auto f = [](
float value)
856 return std::sqrt(value);
858 std::vector<float> expectedOutput(inputDataSize);
859 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
866 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
868 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
869 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
873 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
874 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
878 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
880 inputHandle->Allocate();
881 outputHandle->Allocate();
891 outputHandle->GetShape(),
892 outputTensorInfo.GetShape());
895 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
903 std::vector<float> inputData = {
904 0.1f, 0.2f, 0.3f, 0.4f,
905 0.1f, 0.2f, 0.3f, 0.4f,
906 1.0f, 2.0f, 3.0f, 4.0f,
907 1.0f, 2.0f, 3.0f, 4.0f
911 auto f = [](
float value)
913 return std::sqrt(value);
915 std::vector<float> expectedOutput(inputData.size());
916 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
918 return SimpleActivationTest<ArmnnType>(workloadFactory,
937 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
945 return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
953 return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
956 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
964 std::vector<float> inputData = {
965 -0.1f, -0.2f, -0.3f, -0.4f,
966 0.1f, 0.2f, 0.3f, 0.4f,
967 -1.0f, -2.0f, -3.0f, -4.0f,
968 1.0f, 2.0f, 3.0f, 4.0f
972 auto f = [](
float value)
974 return std::pow(value,2);
976 std::vector<float> expectedOutput(inputData.size());
977 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
979 return SimpleActivationTest<ArmnnType>(workloadFactory,
998 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1006 return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1007 tensorHandleFactory, 0.0625f, 64);
1015 return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1018 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1026 std::vector<float> inputData = {
1027 -0.1f, -0.2f, -0.3f, -0.4f,
1028 0.1f, 0.2f, 0.3f, 0.4f,
1029 -1.0f, -2.0f, -3.0f, -4.0f,
1030 1.0f, 2.0f, 3.0f, 4.0f
1033 const float a = 2.0f;
1034 const float b = 3.0f;
1036 auto f = [a, b](
float value)
1038 return a * tanhf(b * value);
1040 std::vector<float> expectedOutput(inputData.size());
1041 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1043 return SimpleActivationTest<ArmnnType>(workloadFactory,
1045 tensorHandleFactory,
1062 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1070 return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1078 return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1082 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1090 std::vector<float> inputData = {
1091 -0.1f, -0.2f, -0.3f, -0.4f,
1092 0.1f, 0.2f, 0.3f, 0.4f,
1093 -1.0f, -2.0f, -3.0f, -4.0f,
1094 1.0f, 2.0f, 3.0f, 4.0f
1098 const float a = 0.01f;
1100 auto f = [a](
float value)
1102 return (value >= 0) ? value : a * (expf(value) - 1);
1104 std::vector<float> expectedOutput(inputData.size());
1105 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1107 return SimpleActivationTest<ArmnnType>(workloadFactory,
1109 tensorHandleFactory,
1126 return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1134 return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1142 return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1146 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1154 std::vector<float> inputData = {
1155 -0.1f, -0.2f, -0.3f, -0.4f,
1156 0.1f, 0.2f, 0.3f, 0.4f,
1157 -1.0f, -2.0f, -3.0f, -4.0f,
1158 1.0f, 2.0f, 3.0f, 4.0f
1161 auto f = [](
float x)
1166 float reLu6_step1 = std::max((x + 3),0.0f);
1167 float reLu6Complete = std::min(reLu6_step1, 6.0f);
1168 float hardSwish_step1 = x * reLu6Complete;
1169 float result = hardSwish_step1 / 6;
1172 std::vector<float> expectedOutput(inputData.size());
1173 std::transform(inputData.begin(), inputData.end(), expectedOutput.begin(), f);
1175 return SimpleActivationTest<ArmnnType>(workloadFactory,
1177 tensorHandleFactory,
1194 return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1202 return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1203 tensorHandleFactory, 0.1f, 64);
1211 return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1215 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1223 unsigned int batchSize = 5,
1224 float qScale = 0.0f,
1225 int32_t qOffset = 0)
1228 unsigned int width = 17;
1229 unsigned int height = 29;
1230 unsigned int channels = 2;
1238 unsigned int shape[] = {batchSize, channels, height, width};
1244 if(armnn::IsQuantizedType<T>())
1252 float minVal = -10.f;
1258 std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 21453, minVal, 10.f);
1260 std::vector<T> expectedOutput(outputTensorInfo.
GetNumElements());
1262 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1263 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1265 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1266 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1270 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1271 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1278 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1279 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1281 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(data, info);
1283 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateActivation(refData, refInfo);
1286 inputHandle->Allocate();
1287 outputHandle->Allocate();
1288 inputHandleRef->Allocate();
1289 outputHandleRef->Allocate();
1294 workload->Execute();
1295 workloadRef->Execute();
1302 outputHandle->GetShape(),
1314 unsigned int batchSize)
1316 return CompareActivationTestImpl<armnn::DataType::Float32>(
1317 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1318 refTensorHandleFactory, f, batchSize);
1329 return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1330 workloadFactory, memoryManager, refWorkloadFactory,
1331 tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1342 return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1343 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1344 refTensorHandleFactory, f, 5, 0.1f, 0);
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerDescriptor m_Parameters
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< T > m_ExpectedData
void SetQuantizationScale(float scale)
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
#define ARMNN_ASSERT(COND)
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
An ActivationDescriptor for the ActivationLayer.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
std::vector< T > m_ActualData
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize)
unsigned int GetNumElements() const
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)