18 #include <boost/multi_array.hpp> 22 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
32 const std::vector<T>& inputData,
33 const std::vector<T>& outputExpectedData,
34 unsigned int inputWidth,
35 unsigned int inputHeight,
36 unsigned int inputChannels,
37 unsigned int inputBatchSize)
40 unsigned int outputWidth = inputWidth;
41 unsigned int outputHeight = inputHeight;
42 unsigned int outputChannels = inputChannels;
43 unsigned int outputBatchSize = inputBatchSize;
45 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
47 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
49 if(armnn::IsQuantizedType<T>())
52 inputTensorInfo.SetQuantizationOffset(inputOffset);
54 outputTensorInfo.SetQuantizationScale(outputScale);
55 outputTensorInfo.SetQuantizationOffset(outputOffset);
60 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
62 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
63 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
68 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
69 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
75 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
77 inputHandle->Allocate();
78 outputHandle->Allocate();
86 result.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
95 unsigned int inputWidth = 4u;
96 unsigned int inputHeight = 5u;
97 unsigned int inputChannels = 1u;
98 unsigned int inputBatchSize = 1;
100 std::vector<float> input = std::vector<float>{
101 -2.0f, 0.1f, 0.5f, 1.25f,
102 0.786f, 0.9875f, -1.5f, 0.384f,
103 1.0001f, 3.5f, 7.5f, 0.896f,
104 2.126f, 2.0f, 0.3f, 0.15f,
105 0.999f, 1.2f, 0.89f, 6.1f,
109 std::vector<float> output = std::vector<float>{
110 -1.0f, 0.1f, 0.5f, 1.0f,
111 0.786f, 0.9875f, -1.0f, 0.384f,
112 1.0f, 1.0f, 1.0f, 0.896f,
113 1.0f, 1.0f, 0.3f, 0.15f,
114 0.999f, 1.0f, 0.89f, 1.0f,
117 return BoundedReLuTestCommon<armnn::DataType::Float32>(
118 workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
119 inputWidth, inputHeight, inputChannels, inputBatchSize);
126 unsigned int inputWidth = 4u;
127 unsigned int inputHeight = 5u;
128 unsigned int inputChannels = 1u;
129 unsigned int inputBatchSize = 1;
131 std::vector<float> input = std::vector<float>{
132 -1.0f, 0.1f, 0.5f, 6.25f,
133 0.786f, 5.9875f, -0.5f, 0.384f,
134 6.0001f, 3.5f, 7.5f, 0.896f,
135 2.126f, 12.0f, 0.3f, 0.15f,
136 0.999f, 1.2f, 0.89f, 6.1f,
140 std::vector<float> output = std::vector<float>{
141 0.0f, 0.1f, 0.5f, 6.0f,
142 0.786f, 5.9875f, 0.0f, 0.384f,
143 6.0f, 3.5f, 6.0f, 0.896f,
144 2.126f, 6.0f, 0.3f, 0.15f,
145 0.999f, 1.2f, 0.89f, 6.0f,
148 return BoundedReLuTestCommon<armnn::DataType::Float32>(
149 workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
150 inputWidth, inputHeight, inputChannels, inputBatchSize);
157 unsigned int inputWidth = 3u;
158 unsigned int inputHeight = 2u;
159 unsigned int inputChannels = 1u;
160 unsigned int inputBatchSize = 1;
162 std::vector<uint8_t> input = std::vector<uint8_t>{
168 std::vector<uint8_t> output = std::vector<uint8_t>{
173 float inputScale = 12.0f / 255.0f;
174 int32_t inputOffset = 63;
175 float outputScale = 6.0f / 255.0f;
176 int32_t outputOffset = 0;
178 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
179 workloadFactory, memoryManager, 6.0f, 0.0f,
180 inputScale, inputOffset, outputScale, outputOffset,
181 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
188 unsigned int inputWidth = 3u;
189 unsigned int inputHeight = 2u;
190 unsigned int inputChannels = 1u;
191 unsigned int inputBatchSize = 1;
193 std::vector<uint8_t> input = std::vector<uint8_t>{
199 std::vector<uint8_t> output = std::vector<uint8_t>{
204 int32_t inputOffset = 112;
205 float inputScale = 0.0125f;
207 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
208 workloadFactory, memoryManager, 1.0f, -1.0f,
209 inputScale, inputOffset, inputScale, inputOffset,
210 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
216 struct BoundedReLuRandomInputTestTraits
218 constexpr
static unsigned int inputHeight = 31u;
219 constexpr
static unsigned int inputWidth = 19u;
220 constexpr
static unsigned int inputChannels = 4u;
221 constexpr
static unsigned int inputBatchSize = 2;
223 constexpr
static unsigned int outputHeight = inputHeight;
224 constexpr
static unsigned int outputWidth = inputWidth;
225 constexpr
static unsigned int outputChannels = inputChannels;
226 constexpr
static unsigned int outputBatchSize = inputBatchSize;
230 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
236 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
241 boost::multi_array<float, 4> BoundedReLuRandomInputTest(
250 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
252 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
256 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
258 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
259 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
264 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
265 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
268 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
270 inputHandle->Allocate();
271 outputHandle->Allocate();
295 activationDescriptor.
m_A = upperBound;
296 activationDescriptor.
m_B = lowerBound;
298 result.
output = BoundedReLuRandomInputTest(
299 workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
301 refWorkloadFactory,
nullptr, 0.0f, upperBound, activationDescriptor);
306 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
314 unsigned int inputHeight = 20;
315 unsigned int inputWidth = 17;
316 unsigned int inputChannels = 3;
317 unsigned int batchSize = 5;
322 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
328 if(armnn::IsQuantizedType<T>())
338 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
339 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
344 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
345 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
350 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(data, info);
352 inputHandle->Allocate();
353 outputHandle->Allocate();
355 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
372 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
379 return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
380 workloadFactory, memoryManager, 4.0f, 3);
387 return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
388 workloadFactory, memoryManager, 0.1f, 0);
391 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
396 float activationParameterA,
397 float activationParameterB,
400 const std::vector<float>& inputData,
403 const std::vector<float>& outputExpectedData)
406 constexpr
static unsigned int inputWidth = 16u;
407 constexpr
static unsigned int inputHeight = 1u;
408 constexpr
static unsigned int inputChannels = 1u;
409 constexpr
static unsigned int inputBatchSize = 1u;
411 constexpr
static unsigned int outputWidth = inputWidth;
412 constexpr
static unsigned int outputHeight = inputHeight;
413 constexpr
static unsigned int outputChannels = inputChannels;
414 constexpr
static unsigned int outputBatchSize = inputBatchSize;
416 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
417 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
420 if(armnn::IsQuantizedType<T>())
423 inputTensorInfo.SetQuantizationOffset(offset);
424 outputTensorInfo.SetQuantizationScale(outScale);
425 outputTensorInfo.SetQuantizationOffset(outOffset);
430 auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
432 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
433 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
438 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
439 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
445 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
447 inputHandle->Allocate();
448 outputHandle->Allocate();
458 MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
463 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
470 std::vector<float> inputData =
472 -0.1f, -0.2f, -0.3f, -0.4f,
473 0.1f, 0.2f, 0.3f, 0.4f,
474 -1.0f, -2.0f, -3.0f, -4.0f,
475 1.0f, 2.0f, 3.0f, 4.0f
479 auto f = [](
float value)
481 return 1.0f / (1.0f + std::exp(-value));
483 std::vector<float> outputExpectedData(inputData.size());
484 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
486 return SimpleActivationTest<ArmnnType>(workloadFactory,
503 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
510 return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 50);
517 return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
520 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
527 std::vector<float> inputData = {
528 -0.1f, -0.2f, -0.3f, -0.4f,
529 0.1f, 0.2f, 0.3f, 0.4f,
530 -1.0f, -2.0f, -3.0f, -4.0f,
531 1.0f, 2.0f, 3.0f, 4.0f
535 auto f = [](
float value)
537 return std::fmax(0.0f, value);
539 std::vector<float> outputExpectedData(inputData.size());
540 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
542 return SimpleActivationTest<ArmnnType>(workloadFactory,
559 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
567 return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 0);
574 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
578 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
585 std::vector<float> inputData = {
586 -0.1f, -0.2f, -0.3f, -0.4f,
587 0.1f, 0.2f, 0.3f, 0.4f,
588 -1.0f, -2.0f, -3.0f, -4.0f,
589 1.0f, 2.0f, 3.0f, 4.0f
591 const float a = 1.0f;
592 const float b = -1.0f;
594 auto f = [a, b](
float value)
596 return std::min(a, std::max(b, value));
598 std::vector<float> outputExpectedData(inputData.size());
599 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
601 return SimpleActivationTest<ArmnnType>(workloadFactory,
618 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
623 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
630 std::vector<float> inputData = {
631 -0.1f, -0.2f, -0.3f, -0.4f,
632 0.1f, 0.2f, 0.3f, 0.4f,
633 -1.0f, -2.0f, -3.0f, -4.0f,
634 1.0f, 2.0f, 3.0f, 4.0f
638 auto f = [](
float value)
640 return std::log(1.0f + std::exp(value));
642 std::vector<float> outputExpectedData(inputData.size());
643 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
645 return SimpleActivationTest<ArmnnType>(workloadFactory,
662 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
669 return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
676 return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
679 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
686 std::vector<float> inputData = {
687 -0.1f, -0.2f, -0.3f, -0.4f,
688 0.1f, 0.2f, 0.3f, 0.4f,
689 -1.0f, -2.0f, -3.0f, -4.0f,
690 1.0f, 2.0f, 3.0f, 4.0f
693 const float a = 0.01f;
695 auto f = [a](
float value)
697 return value > 0.0f ? value : (value * a);
699 std::vector<float> outputExpectedData(inputData.size());
700 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
702 return SimpleActivationTest<ArmnnType>(workloadFactory,
719 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
726 return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
733 return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
736 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
743 std::vector<float> inputData = {
744 -0.1f, -0.2f, -0.3f, -0.4f,
745 0.1f, 0.2f, 0.3f, 0.4f,
746 -1.0f, -2.0f, -3.0f, -4.0f,
747 1.0f, 2.0f, 3.0f, 4.0f
751 auto f = [](
float value)
753 return std::abs(value);
755 std::vector<float> outputExpectedData(inputData.size());
756 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
758 return SimpleActivationTest<ArmnnType>(workloadFactory,
775 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
782 return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
789 return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
797 const int inputDataSize = 120;
798 std::vector<float> inputData(inputDataSize);
800 for (
unsigned int i = 0u; i < inputDataSize; ++i)
802 inputData[i] =
static_cast<float>(i) / 10;
805 auto f = [](
float value)
807 return std::sqrt(value);
809 std::vector<float> outputExpectedData(inputDataSize);
810 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
819 auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
821 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
822 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
826 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
827 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
831 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
833 inputHandle->Allocate();
834 outputHandle->Allocate();
843 result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
848 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
855 std::vector<float> inputData = {
856 0.1f, 0.2f, 0.3f, 0.4f,
857 0.1f, 0.2f, 0.3f, 0.4f,
858 1.0f, 2.0f, 3.0f, 4.0f,
859 1.0f, 2.0f, 3.0f, 4.0f
863 auto f = [](
float value)
865 return std::sqrt(value);
867 std::vector<float> outputExpectedData(inputData.size());
868 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
870 return SimpleActivationTest<ArmnnType>(workloadFactory,
887 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
894 return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
901 return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
904 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
911 std::vector<float> inputData = {
912 -0.1f, -0.2f, -0.3f, -0.4f,
913 0.1f, 0.2f, 0.3f, 0.4f,
914 -1.0f, -2.0f, -3.0f, -4.0f,
915 1.0f, 2.0f, 3.0f, 4.0f
919 auto f = [](
float value)
921 return std::pow(value,2);
923 std::vector<float> outputExpectedData(inputData.size());
924 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
926 return SimpleActivationTest<ArmnnType>(workloadFactory,
943 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
950 return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.0625f, 64);
957 return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
960 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
967 std::vector<float> inputData = {
968 -0.1f, -0.2f, -0.3f, -0.4f,
969 0.1f, 0.2f, 0.3f, 0.4f,
970 -1.0f, -2.0f, -3.0f, -4.0f,
971 1.0f, 2.0f, 3.0f, 4.0f
974 const float a = 2.0f;
975 const float b = 3.0f;
977 auto f = [a, b](
float value)
979 return a * tanhf(b * value);
981 std::vector<float> outputExpectedData(inputData.size());
982 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
984 return SimpleActivationTest<ArmnnType>(workloadFactory,
1001 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1008 return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1015 return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1019 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1026 std::vector<float> inputData = {
1027 -0.1f, -0.2f, -0.3f, -0.4f,
1028 0.1f, 0.2f, 0.3f, 0.4f,
1029 -1.0f, -2.0f, -3.0f, -4.0f,
1030 1.0f, 2.0f, 3.0f, 4.0f
1034 const float a = 0.01f;
1036 auto f = [a](
float value)
1038 return (value >= 0) ? value : a * (expf(value) - 1);
1040 std::vector<float> outputExpectedData(inputData.size());
1041 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1043 return SimpleActivationTest<ArmnnType>(workloadFactory,
1053 outputExpectedData);
1060 return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1067 return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1074 return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1078 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1085 std::vector<float> inputData = {
1086 -0.1f, -0.2f, -0.3f, -0.4f,
1087 0.1f, 0.2f, 0.3f, 0.4f,
1088 -1.0f, -2.0f, -3.0f, -4.0f,
1089 1.0f, 2.0f, 3.0f, 4.0f
1092 auto f = [](
float x)
1097 float reLu6_step1 = std::max((x + 3),0.0f);
1098 float reLu6Complete = std::min(reLu6_step1, 6.0f);
1099 float hardSwish_step1 = x * reLu6Complete;
1100 float result = hardSwish_step1 / 6;
1103 std::vector<float> outputExpectedData(inputData.size());
1104 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1106 return SimpleActivationTest<ArmnnType>(workloadFactory,
1116 outputExpectedData);
1123 return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.1f, 0);
1130 return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 0.1f, 64);
1137 return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 0.1f, 0);
1141 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1147 unsigned int batchSize = 5,
1148 float qScale = 0.0f,
1149 int32_t qOffset = 0)
1152 unsigned int width = 17;
1153 unsigned int height = 29;
1154 unsigned int channels = 2;
1162 unsigned int shape[] = {batchSize, channels, height, width};
1168 if(armnn::IsQuantizedType<T>())
1176 float minVal = -10.f;
1182 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1186 auto boostArrayExtents = boost::extents
1188 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1190 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1191 ret.output.resize(boostArrayExtents);
1192 ret.outputExpected.resize(boostArrayExtents);
1195 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
1196 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
1198 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo);
1199 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
1203 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1204 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1211 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1212 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1214 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(data, info);
1215 BOOST_ASSERT(workload !=
nullptr);
1216 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateActivation(refData, refInfo);
1217 BOOST_ASSERT(workloadRef !=
nullptr);
1219 inputHandle->Allocate();
1220 outputHandle->Allocate();
1221 inputHandleRef->Allocate();
1222 outputHandleRef->Allocate();
1227 workload->Execute();
1228 workloadRef->Execute();
1241 unsigned int batchSize)
1243 return CompareActivationTestImpl<armnn::DataType::Float32>(
1244 workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
1253 return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1254 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
1263 return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1264 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 0);
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
boost::multi_array< T, n > outputExpected
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerDescriptor m_Parameters
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f)
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
An ActivationDescriptor for the ActivationLayer.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
boost::multi_array< T, n > output
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f, unsigned int batchSize)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::ActivationFunction f)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
TensorInfo GetInputTensorInfo(const Network *network)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)