20 #include <boost/multi_array.hpp> 24 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
35 const std::vector<T>& inputData,
36 const std::vector<T>& outputExpectedData,
37 unsigned int inputWidth,
38 unsigned int inputHeight,
39 unsigned int inputChannels,
40 unsigned int inputBatchSize)
43 unsigned int outputWidth = inputWidth;
44 unsigned int outputHeight = inputHeight;
45 unsigned int outputChannels = inputChannels;
46 unsigned int outputBatchSize = inputBatchSize;
48 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
50 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
52 if(armnn::IsQuantizedType<T>())
55 inputTensorInfo.SetQuantizationOffset(inputOffset);
57 outputTensorInfo.SetQuantizationScale(outputScale);
58 outputTensorInfo.SetQuantizationOffset(outputOffset);
63 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
65 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
66 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
71 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
72 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
78 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
80 inputHandle->Allocate();
81 outputHandle->Allocate();
89 result.
outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
99 unsigned int inputWidth = 4u;
100 unsigned int inputHeight = 5u;
101 unsigned int inputChannels = 1u;
102 unsigned int inputBatchSize = 1;
104 std::vector<float> input = std::vector<float>{
105 -2.0f, 0.1f, 0.5f, 1.25f,
106 0.786f, 0.9875f, -1.5f, 0.384f,
107 1.0001f, 3.5f, 7.5f, 0.896f,
108 2.126f, 2.0f, 0.3f, 0.15f,
109 0.999f, 1.2f, 0.89f, 6.1f,
113 std::vector<float> output = std::vector<float>{
114 -1.0f, 0.1f, 0.5f, 1.0f,
115 0.786f, 0.9875f, -1.0f, 0.384f,
116 1.0f, 1.0f, 1.0f, 0.896f,
117 1.0f, 1.0f, 0.3f, 0.15f,
118 0.999f, 1.0f, 0.89f, 1.0f,
121 return BoundedReLuTestCommon<armnn::DataType::Float32>(
122 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
123 inputWidth, inputHeight, inputChannels, inputBatchSize);
131 unsigned int inputWidth = 4u;
132 unsigned int inputHeight = 5u;
133 unsigned int inputChannels = 1u;
134 unsigned int inputBatchSize = 1;
136 std::vector<float> input = std::vector<float>{
137 -1.0f, 0.1f, 0.5f, 6.25f,
138 0.786f, 5.9875f, -0.5f, 0.384f,
139 6.0001f, 3.5f, 7.5f, 0.896f,
140 2.126f, 12.0f, 0.3f, 0.15f,
141 0.999f, 1.2f, 0.89f, 6.1f,
145 std::vector<float> output = std::vector<float>{
146 0.0f, 0.1f, 0.5f, 6.0f,
147 0.786f, 5.9875f, 0.0f, 0.384f,
148 6.0f, 3.5f, 6.0f, 0.896f,
149 2.126f, 6.0f, 0.3f, 0.15f,
150 0.999f, 1.2f, 0.89f, 6.0f,
153 return BoundedReLuTestCommon<armnn::DataType::Float32>(
154 workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
155 inputWidth, inputHeight, inputChannels, inputBatchSize);
163 unsigned int inputWidth = 3u;
164 unsigned int inputHeight = 2u;
165 unsigned int inputChannels = 1u;
166 unsigned int inputBatchSize = 1;
168 std::vector<uint8_t> input = std::vector<uint8_t>{
174 std::vector<uint8_t> output = std::vector<uint8_t>{
179 float inputScale = 12.0f / 255.0f;
180 int32_t inputOffset = 63;
181 float outputScale = 6.0f / 255.0f;
182 int32_t outputOffset = 0;
184 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
185 workloadFactory, memoryManager, tensorHandleFactory, 6.0f, 0.0f,
186 inputScale, inputOffset, outputScale, outputOffset,
187 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
195 unsigned int inputWidth = 3u;
196 unsigned int inputHeight = 2u;
197 unsigned int inputChannels = 1u;
198 unsigned int inputBatchSize = 1;
200 std::vector<uint8_t> input = std::vector<uint8_t>{
206 std::vector<uint8_t> output = std::vector<uint8_t>{
211 int32_t inputOffset = 112;
212 float inputScale = 0.0125f;
214 return BoundedReLuTestCommon<armnn::DataType::QAsymmU8>(
215 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -1.0f,
216 inputScale, inputOffset, inputScale, inputOffset,
217 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
223 struct BoundedReLuRandomInputTestTraits
225 constexpr
static unsigned int inputHeight = 31u;
226 constexpr
static unsigned int inputWidth = 19u;
227 constexpr
static unsigned int inputChannels = 4u;
228 constexpr
static unsigned int inputBatchSize = 2;
230 constexpr
static unsigned int outputHeight = inputHeight;
231 constexpr
static unsigned int outputWidth = inputWidth;
232 constexpr
static unsigned int outputChannels = inputChannels;
233 constexpr
static unsigned int outputBatchSize = inputBatchSize;
237 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
243 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
248 boost::multi_array<float, 4> BoundedReLuRandomInputTest(
258 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
260 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
264 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
266 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
267 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
272 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
273 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
276 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
278 inputHandle->Allocate();
279 outputHandle->Allocate();
305 activationDescriptor.
m_A = upperBound;
306 activationDescriptor.
m_B = lowerBound;
308 result.
output = BoundedReLuRandomInputTest(
309 workloadFactory, memoryManager, tensorHandleFactory, 0.0f, upperBound, activationDescriptor);
311 refWorkloadFactory,
nullptr, refTensorHandleFactory, 0.0f, upperBound, activationDescriptor);
316 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
325 unsigned int inputHeight = 20;
326 unsigned int inputWidth = 17;
327 unsigned int inputChannels = 3;
328 unsigned int batchSize = 5;
333 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
339 if(armnn::IsQuantizedType<T>())
348 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
349 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
354 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
355 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
360 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(data, info);
362 inputHandle->Allocate();
363 outputHandle->Allocate();
365 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
383 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory,
385 tensorHandleFactory);
393 return ConstantLinearActivationTestCommon<armnn::DataType::QAsymmU8>(
394 workloadFactory, memoryManager, tensorHandleFactory, 4.0f, 3);
402 return ConstantLinearActivationTestCommon<armnn::DataType::QSymmS16>(
403 workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
406 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
412 float activationParameterA,
413 float activationParameterB,
416 const std::vector<float>& inputData,
419 const std::vector<float>& outputExpectedData)
422 constexpr
static unsigned int inputWidth = 16u;
423 constexpr
static unsigned int inputHeight = 1u;
424 constexpr
static unsigned int inputChannels = 1u;
425 constexpr
static unsigned int inputBatchSize = 1u;
427 constexpr
static unsigned int outputWidth = inputWidth;
428 constexpr
static unsigned int outputHeight = inputHeight;
429 constexpr
static unsigned int outputChannels = inputChannels;
430 constexpr
static unsigned int outputBatchSize = inputBatchSize;
432 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
433 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
436 if(armnn::IsQuantizedType<T>())
439 inputTensorInfo.SetQuantizationOffset(offset);
440 outputTensorInfo.SetQuantizationScale(outScale);
441 outputTensorInfo.SetQuantizationOffset(outOffset);
446 auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
448 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
449 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
454 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
455 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
461 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
463 inputHandle->Allocate();
464 outputHandle->Allocate();
474 MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
479 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
487 std::vector<float> inputData =
489 -0.1f, -0.2f, -0.3f, -0.4f,
490 0.1f, 0.2f, 0.3f, 0.4f,
491 -1.0f, -2.0f, -3.0f, -4.0f,
492 1.0f, 2.0f, 3.0f, 4.0f
496 auto f = [](
float value)
498 return 1.0f / (1.0f + std::exp(-value));
500 std::vector<float> outputExpectedData(inputData.size());
501 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
503 return SimpleActivationTest<ArmnnType>(workloadFactory,
522 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager,
523 tensorHandleFactory, 0.0f, 0);
531 return SimpleSigmoidTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
532 tensorHandleFactory, 0.1f, 50);
540 return SimpleSigmoidTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager,
541 tensorHandleFactory, 0.1f, 0);
544 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
552 std::vector<float> inputData = {
553 -0.1f, -0.2f, -0.3f, -0.4f,
554 0.1f, 0.2f, 0.3f, 0.4f,
555 -1.0f, -2.0f, -3.0f, -4.0f,
556 1.0f, 2.0f, 3.0f, 4.0f
560 auto f = [](
float value)
562 return std::fmax(0.0f, value);
564 std::vector<float> outputExpectedData(inputData.size());
565 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
567 return SimpleActivationTest<ArmnnType>(workloadFactory,
586 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
595 return ReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
603 return ReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
607 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
615 std::vector<float> inputData = {
616 -0.1f, -0.2f, -0.3f, -0.4f,
617 0.1f, 0.2f, 0.3f, 0.4f,
618 -1.0f, -2.0f, -3.0f, -4.0f,
619 1.0f, 2.0f, 3.0f, 4.0f
621 const float a = 1.0f;
622 const float b = -1.0f;
624 auto f = [a, b](
float value)
626 return std::min(a, std::max(b, value));
628 std::vector<float> outputExpectedData(inputData.size());
629 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
631 return SimpleActivationTest<ArmnnType>(workloadFactory,
650 return ReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
655 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
663 std::vector<float> inputData = {
664 -0.1f, -0.2f, -0.3f, -0.4f,
665 0.1f, 0.2f, 0.3f, 0.4f,
666 -1.0f, -2.0f, -3.0f, -4.0f,
667 1.0f, 2.0f, 3.0f, 4.0f
671 auto f = [](
float value)
673 return std::log(1.0f + std::exp(value));
675 std::vector<float> outputExpectedData(inputData.size());
676 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
678 return SimpleActivationTest<ArmnnType>(workloadFactory,
697 return SoftReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
705 return SoftReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
706 tensorHandleFactory, 0.0625f, 64);
714 return SoftReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
717 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
725 std::vector<float> inputData = {
726 -0.1f, -0.2f, -0.3f, -0.4f,
727 0.1f, 0.2f, 0.3f, 0.4f,
728 -1.0f, -2.0f, -3.0f, -4.0f,
729 1.0f, 2.0f, 3.0f, 4.0f
732 const float a = 0.01f;
734 auto f = [a](
float value)
736 return value > 0.0f ? value : (value * a);
738 std::vector<float> outputExpectedData(inputData.size());
739 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
741 return SimpleActivationTest<ArmnnType>(workloadFactory,
760 return LeakyReLuTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
768 return LeakyReLuTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
769 tensorHandleFactory, 0.0625f, 64);
777 return LeakyReLuTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
780 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
788 std::vector<float> inputData = {
789 -0.1f, -0.2f, -0.3f, -0.4f,
790 0.1f, 0.2f, 0.3f, 0.4f,
791 -1.0f, -2.0f, -3.0f, -4.0f,
792 1.0f, 2.0f, 3.0f, 4.0f
796 auto f = [](
float value)
798 return std::abs(value);
800 std::vector<float> outputExpectedData(inputData.size());
801 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
803 return SimpleActivationTest<ArmnnType>(workloadFactory,
822 return AbsTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
830 return AbsTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
838 return AbsTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
847 const int inputDataSize = 120;
848 std::vector<float> inputData(inputDataSize);
850 for (
unsigned int i = 0u; i < inputDataSize; ++i)
852 inputData[i] =
static_cast<float>(i) / 10;
855 auto f = [](
float value)
857 return std::sqrt(value);
859 std::vector<float> outputExpectedData(inputDataSize);
860 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
869 auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
871 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
872 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
876 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
877 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
881 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(descriptor, workloadInfo);
883 inputHandle->Allocate();
884 outputHandle->Allocate();
893 result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
898 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
906 std::vector<float> inputData = {
907 0.1f, 0.2f, 0.3f, 0.4f,
908 0.1f, 0.2f, 0.3f, 0.4f,
909 1.0f, 2.0f, 3.0f, 4.0f,
910 1.0f, 2.0f, 3.0f, 4.0f
914 auto f = [](
float value)
916 return std::sqrt(value);
918 std::vector<float> outputExpectedData(inputData.size());
919 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
921 return SimpleActivationTest<ArmnnType>(workloadFactory,
940 return SqrtTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
948 return SqrtTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.0625f, 64);
956 return SqrtTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
959 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
967 std::vector<float> inputData = {
968 -0.1f, -0.2f, -0.3f, -0.4f,
969 0.1f, 0.2f, 0.3f, 0.4f,
970 -1.0f, -2.0f, -3.0f, -4.0f,
971 1.0f, 2.0f, 3.0f, 4.0f
975 auto f = [](
float value)
977 return std::pow(value,2);
979 std::vector<float> outputExpectedData(inputData.size());
980 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
982 return SimpleActivationTest<ArmnnType>(workloadFactory,
1001 return SquareTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1009 return SquareTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1010 tensorHandleFactory, 0.0625f, 64);
1018 return SquareTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1021 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1029 std::vector<float> inputData = {
1030 -0.1f, -0.2f, -0.3f, -0.4f,
1031 0.1f, 0.2f, 0.3f, 0.4f,
1032 -1.0f, -2.0f, -3.0f, -4.0f,
1033 1.0f, 2.0f, 3.0f, 4.0f
1036 const float a = 2.0f;
1037 const float b = 3.0f;
1039 auto f = [a, b](
float value)
1041 return a * tanhf(b * value);
1043 std::vector<float> outputExpectedData(inputData.size());
1044 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1046 return SimpleActivationTest<ArmnnType>(workloadFactory,
1048 tensorHandleFactory,
1057 outputExpectedData);
1065 return TanhTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1073 return TanhTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1081 return TanhTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1085 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1093 std::vector<float> inputData = {
1094 -0.1f, -0.2f, -0.3f, -0.4f,
1095 0.1f, 0.2f, 0.3f, 0.4f,
1096 -1.0f, -2.0f, -3.0f, -4.0f,
1097 1.0f, 2.0f, 3.0f, 4.0f
1101 const float a = 0.01f;
1103 auto f = [a](
float value)
1105 return (value >= 0) ? value : a * (expf(value) - 1);
1107 std::vector<float> outputExpectedData(inputData.size());
1108 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1110 return SimpleActivationTest<ArmnnType>(workloadFactory,
1112 tensorHandleFactory,
1121 outputExpectedData);
1129 return EluTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1137 return EluTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 64);
1145 return EluTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1149 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1157 std::vector<float> inputData = {
1158 -0.1f, -0.2f, -0.3f, -0.4f,
1159 0.1f, 0.2f, 0.3f, 0.4f,
1160 -1.0f, -2.0f, -3.0f, -4.0f,
1161 1.0f, 2.0f, 3.0f, 4.0f
1164 auto f = [](
float x)
1169 float reLu6_step1 = std::max((x + 3),0.0f);
1170 float reLu6Complete = std::min(reLu6_step1, 6.0f);
1171 float hardSwish_step1 = x * reLu6Complete;
1172 float result = hardSwish_step1 / 6;
1175 std::vector<float> outputExpectedData(inputData.size());
1176 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
1178 return SimpleActivationTest<ArmnnType>(workloadFactory,
1180 tensorHandleFactory,
1189 outputExpectedData);
1197 return HardSwishTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1205 return HardSwishTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager,
1206 tensorHandleFactory, 0.1f, 64);
1214 return HardSwishTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 0.1f, 0);
1218 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1226 unsigned int batchSize = 5,
1227 float qScale = 0.0f,
1228 int32_t qOffset = 0)
1231 unsigned int width = 17;
1232 unsigned int height = 29;
1233 unsigned int channels = 2;
1241 unsigned int shape[] = {batchSize, channels, height, width};
1247 if(armnn::IsQuantizedType<T>())
1255 float minVal = -10.f;
1261 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
1265 auto boostArrayExtents = boost::extents
1267 [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
1269 [armnn::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
1270 ret.output.resize(boostArrayExtents);
1271 ret.outputExpected.resize(boostArrayExtents);
1273 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1274 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1276 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
1277 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
1281 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1282 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1289 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
1290 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
1292 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateActivation(data, info);
1294 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreateActivation(refData, refInfo);
1297 inputHandle->Allocate();
1298 outputHandle->Allocate();
1299 inputHandleRef->Allocate();
1300 outputHandleRef->Allocate();
1305 workload->Execute();
1306 workloadRef->Execute();
1321 unsigned int batchSize)
1323 return CompareActivationTestImpl<armnn::DataType::Float32>(
1324 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1325 refTensorHandleFactory, f, batchSize);
1336 return CompareActivationTestImpl<armnn::DataType::QAsymmU8>(
1337 workloadFactory, memoryManager, refWorkloadFactory,
1338 tensorHandleFactory, refTensorHandleFactory, f, 5, 0.1f, 50);
1349 return CompareActivationTestImpl<armnn::DataType::QSymmS16>(
1350 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory,
1351 refTensorHandleFactory, f, 5, 0.1f, 0);
LayerTestResult< T, 4 > SimpleSigmoidTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > CompareActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
LayerTestResult< T, 4 > ConstantLinearActivationTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 4 > SqrtTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ConstantLinearActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleSigmoidTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SoftReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > CompareActivationTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize=5, float qScale=0.0f, int32_t qOffset=0)
LayerTestResult< float, 4 > CompareBoundedReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, float upperBound, float lowerBound)
LayerTestResult< T, 4 > TanhTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > LeakyReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > CompareActivationInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f)
boost::multi_array< T, n > outputExpected
LayerTestResult< float, 4 > CompareActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::ActivationFunction f, unsigned int batchSize)
LayerTestResult< float, 4 > ConstantLinearActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleSigmoidInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > TanhUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > SoftReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
virtual std::unique_ptr< IWorkload > CreateActivation(const ActivationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerDescriptor m_Parameters
LayerTestResult< uint8_t, 4 > SoftReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > SqrtNNTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > TanhTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > TanhInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SqrtInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > BoundedReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SquareTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< float, 4 > BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > AbsTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > EluUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
LayerTestResult< T, 4 > SimpleActivationTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, armnn::ActivationFunction activationFunction, float activationParameterA, float activationParameterB, float scale, int32_t offset, const std::vector< float > &inputData, float outScale, int32_t outOffset, const std::vector< float > &outputExpectedData)
LayerTestResult< float, 4 > EluTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
#define ARMNN_ASSERT(COND)
LayerTestResult< uint8_t, 4 > ReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SquareUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
An ActivationDescriptor for the ActivationLayer.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
min(a, max(b, input)) ReLu1 & ReLu6.
LayerTestResult< float, 4 > LeakyReLuTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SqrtTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > SoftReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > output
LayerTestResult< uint8_t, 4 > BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
LayerTestResult< uint8_t, 4 > SimpleSigmoidUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > LeakyReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< T, 4 > HardSwishTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< int16_t, 4 > EluInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > EluTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > SqrtUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SquareInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ReLuInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantLinearActivationUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
LayerTestResult< T, 4 > ReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< float, 4 > AbsTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > BoundedReLuTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float upperBound, float lowerBound, float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset, const std::vector< T > &inputData, const std::vector< T > &outputExpectedData, unsigned int inputWidth, unsigned int inputHeight, unsigned int inputChannels, unsigned int inputBatchSize)
LayerTestResult< float, 4 > HardSwishTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > HardSwishUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AbsInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > SquareTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
TensorInfo GetInputTensorInfo(const Network *network)
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
LayerTestResult< uint8_t, 4 > AbsUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > HardSwishInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< uint8_t, 4 > LeakyReLuUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)