32 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
40 const std::vector<T>& input,
41 const std::vector<T>& outputExpected,
59 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(outputShape[channelsIndex]);
63 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
66 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
69 if(armnn::IsQuantizedType<T>())
71 inputTensorInfo.SetQuantizationScale(qScale);
72 inputTensorInfo.SetQuantizationOffset(qOffset);
80 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
81 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
88 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
89 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
93 const size_t reasonIfUnsupportedMaxLen = 255;
94 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
97 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
98 if (!result.m_Supported)
103 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
105 inputHandle->Allocate();
106 outputHandle->Allocate();
114 result.m_ActualData = actualOutput;
115 result.m_ExpectedData = outputExpected;
129 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
150 unsigned int inputWidth = 8;
151 unsigned int inputHeight = 13;
152 unsigned int outputWidth =
155 unsigned int outputHeight =
158 unsigned int channels = 2;
159 unsigned int batchSize = 2;
161 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
162 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
165 if(armnn::IsQuantizedType<T>())
168 inputTensorInfo.SetQuantizationOffset(qOffset);
173 std::vector<float> singleChannelData({
174 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
175 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
176 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
177 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
178 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
179 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
180 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
181 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
182 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
183 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
184 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
185 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
186 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
190 std::vector<float> inputData;
191 auto negator = [](
float f) {
return -f; };
194 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
195 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
198 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
199 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
201 auto input = QuantizedVector<T>(inputData, qScale, qOffset);
204 std::vector<T> outputExpected;
207 outputExpected = QuantizedVector<T>(
229 outputExpected = QuantizedVector<T>(
231 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
232 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
233 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
235 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
236 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
237 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
239 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
240 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
241 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
243 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
244 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
245 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
250 return SimplePooling2dTestImpl<ArmnnType>(
251 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
252 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
255 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
275 if(armnn::IsQuantizedType<T>())
283 std::vector<T> inputData(
285 1.0f, 2.0f, 5.0f, 6.0f,
286 3.0f, 4.0f, 7.0f, 8.0f,
287 9.0f, 10.0f, 13.0f, 14.0f,
288 11.0f, 12.0f, 15.0f, 16.0f,
290 17.0f, 18.0f, 21.0f, 22.0f,
291 19.0f, 20.0f, 23.0f, 24.0f,
292 25.0f, 26.0f, 29.0f, 30.0f,
293 27.0f, 28.0f, 31.0f, 32.0f,
297 std::vector<T> outputData(
310 std::vector<T> tmp(inputData.size());
314 std::vector<T> tmp1(outputData.size());
319 return SimplePooling2dTestImpl<ArmnnType>(
320 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
321 inputData, outputData, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
324 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
344 if(armnn::IsQuantizedType<T>())
352 std::vector<T> inputData(
354 2.0f, 2.0f, 6.0f, 6.0f,
355 4.0f, 4.0f, 8.0f, 8.0f,
356 10.0f, 12.0f, 14.0f, 16.0f,
357 10.0f, 12.0f, 16.0f, 14.0f,
359 18.0f, 20.0f, 24.0f, 22.0f,
360 20.0f, 18.0f, 22.0f, 24.0f,
361 26.0f, 28.0f, 0.0f, 0.0f,
362 26.0f, 28.0f, 0.0f, 0.0f,
366 std::vector<T> outputData(
379 std::vector<T> tmp(inputData.size());
383 std::vector<T> tmp1(outputData.size());
388 return SimplePooling2dTestImpl<ArmnnType>(
389 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
390 inputData, outputData, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
393 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
415 if(armnn::IsQuantizedType<T>())
423 std::vector<T> input;
430 std::vector<T> outputExpected;
434 outputExpected.push_back(1);
437 return SimplePooling2dTestImpl<ArmnnType>(
438 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
439 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
442 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
461 std::vector<T> inputData(
463 1.0f, 7.0f, 5.0f, 5.0f,
464 1.0f, 7.0f, 5.0f, 5.0f,
465 3.0f, 3.0f, 1.0f, 1.0f,
466 3.0f, 3.0f, 1.0f, 1.0f,
468 1.0f, 7.0f, 0.0f, 0.0f,
469 1.0f, 7.0f, 2.0f, 0.0f,
470 0.0f, 2.0f, 1.0f, 1.0f,
471 0.0f, 0.0f, 1.0f, 1.0f,
475 std::vector<T> outputData(
488 std::vector<T> tmp(inputData.size());
492 std::vector<T> tmp1(outputData.size());
497 return SimplePooling2dTestImpl<ArmnnType>(
498 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
499 inputData, outputData, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
502 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
517 auto input = QuantizedVector<T>(
519 2.0f, 1.0f, 5.0f, 2.0f,
520 1.0f, 2.0f, 2.0f, 1.0f,
521 5.0f, 4.0f, 1.0f, 5.0f,
522 2.0f, 1.0f, 5.0f, 2.0f,
527 auto outputExpected = QuantizedVector<T>(
534 return SimplePooling2dTestImpl<ArmnnType>(
535 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
536 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
539 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
554 auto input = QuantizedVector<T>(
556 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
557 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
558 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
559 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
560 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
561 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
562 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
563 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
564 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
569 auto outputExpected = QuantizedVector<T>(
577 return SimplePooling2dTestImpl<ArmnnType>(
578 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
579 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
582 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
597 auto input = QuantizedVector<T>(
599 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
602 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
603 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
604 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
605 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
610 auto outputExpected = QuantizedVector<T>(
617 return SimplePooling2dTestImpl<ArmnnType>(
618 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
619 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
622 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
637 auto input = QuantizedVector<T>(
639 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
640 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
641 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
642 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
643 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
644 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
645 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
650 auto outputExpected = QuantizedVector<T>(
656 return SimplePooling2dTestImpl<ArmnnType>(
657 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
658 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
661 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
676 auto input = QuantizedVector<T>(
678 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
679 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
680 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
681 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
682 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
683 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
684 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
685 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
686 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
691 auto outputExpected = QuantizedVector<T>(
697 return SimplePooling2dTestImpl<ArmnnType>(
698 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
699 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
702 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
715 descriptor.m_PoolWidth = 2;
716 descriptor.m_PoolHeight = 3;
717 descriptor.m_StrideX = 2;
718 descriptor.m_StrideY = 1;
719 descriptor.m_PadLeft = 2;
720 descriptor.m_PadRight = 0;
721 descriptor.m_PadTop = 1;
722 descriptor.m_PadBottom = 2;
727 auto input = QuantizedVector<T>(
734 auto outputExpected = QuantizedVector<T>(
736 0.0f, 3.0f, 0.0f, 3.0f,
740 return SimplePooling2dTestImpl<ArmnnType>(
741 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
742 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
745 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
757 const unsigned int inputWidth = 16;
758 const unsigned int inputHeight = 32;
759 const unsigned int channelCount = 2;
760 const unsigned int batchSize = 5;
762 const unsigned int poolSize = 3;
763 const unsigned int strideX = 2;
764 const unsigned int strideY = 4;
765 const unsigned int padX = 0;
766 const unsigned int padY = 0;
768 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
769 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
774 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
775 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
781 if(armnn::IsQuantizedType<T>())
789 std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 81715);
795 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
796 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
800 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
801 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
813 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
814 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
818 const size_t reasonIfUnsupportedMaxLen = 255;
819 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
822 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
823 if (!comparisonResult.m_Supported)
825 return comparisonResult;
830 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
831 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
833 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(data, info);
834 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreatePooling2d(refData, refInfo);
836 outputHandleRef->Allocate();
837 inputHandleRef->Allocate();
838 inputHandle->Allocate();
839 outputHandle->Allocate();
845 workloadRef->Execute();
850 comparisonResult.m_ActualData = actualOutput;
851 comparisonResult.m_ExpectedData = expectedOutput;
853 return comparisonResult;
865 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
885 unsigned int inputWidth = 4;
887 unsigned int inputHeight = 4;
889 unsigned int outputWidth =
892 unsigned int outputHeight =
895 unsigned int channels = 1;
896 unsigned int batchSize = 1;
898 std::vector<float> inputData = {
899 510.0f, 222.0f, 780.0f, 654.0f,
900 141.0f, 276.0f, 15.0f, 546.0f,
901 303.0f, 618.0f, 582.0f, 339.0f,
902 438.0f, 564.0f, 573.0f, 402.0f
906 std::vector<float> expectedOutputDataWithPadding = {
907 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
908 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
911 std::vector<float> expectedOutputDataNoPadding = {
916 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
919 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
922 if(armnn::IsQuantizedType<T>())
925 inputTensorInfo.SetQuantizationOffset(qOffset);
930 auto input = QuantizedVector<T>(inputData, qScale, qOffset);
932 auto outputExpected =
933 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
934 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
936 return SimplePooling2dTestImpl<ArmnnType>(
937 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
938 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
950 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
965 descriptor.
m_PadLeft = (forceNoPadding) ? 0 : 1;
972 unsigned int inputWidth = 3;
973 unsigned int inputHeight = 2;
974 unsigned int outputWidth =
977 unsigned int outputHeight =
980 unsigned int channels = 1;
981 unsigned int batchSize = 1;
983 std::vector<float> inputData = {
988 std::vector<float> expectedOutputDataWithPadding = {
992 std::vector<float> expectedOutputDataNoPadding = {
996 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
999 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
1002 if(armnn::IsQuantizedType<T>())
1005 inputTensorInfo.SetQuantizationOffset(qOffset);
1010 auto input = QuantizedVector<T>(inputData, qScale, qOffset);
1012 auto outputExpected =
1013 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1014 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
1016 return SimplePooling2dTestImpl<ArmnnType>(
1017 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1018 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1022 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1027 float qScale = 1.0f,
1028 int32_t qOffset = 0)
1044 if(armnn::IsQuantizedType<T>())
1047 inputTensorInfo.SetQuantizationOffset(qOffset);
1052 auto input = QuantizedVector<T>(
1054 -1.0f, -2.0f, 3.0f, 4.0f,
1055 -1.0f, -2.0f, 3.0f, 4.0f,
1056 1.0f, 2.0f, -3.0f, -4.0f,
1057 1.0f, 2.0f, -3.0f, -4.0f,
1061 auto outputExpected = QuantizedVector<T>(
1069 return SimplePooling2dTestImpl<ArmnnType>(
1070 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1071 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1074 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1079 float qScale = 1.0f,
1080 int32_t qOffset = 0)
1096 if(armnn::IsQuantizedType<T>())
1099 inputTensorInfo.SetQuantizationOffset(qOffset);
1104 auto input = QuantizedVector<T>(
1106 -1.0f, -2.0f, 3.0f, 4.0f,
1107 -1.0f, -2.0f, 3.0f, 4.0f,
1108 1.0f, 2.0f, -3.0f, -4.0f,
1109 1.0f, 2.0f, -3.0f, -4.0f,
1113 auto outputExpected = QuantizedVector<T>(
1115 -1.0f, 3.0f, 4.0f, 4.0f,
1116 2.0f, 3.0f, 4.0f, 4.0f,
1117 2.0f, 3.0f, 4.0f, 4.0f,
1118 2.0f, 2.0f, 2.0f, -3.0f,
1122 return SimplePooling2dTestImpl<ArmnnType>(
1123 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1124 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1127 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1132 float qScale = 1.0f,
1133 int32_t qOffset = 0)
1149 if(armnn::IsQuantizedType<T>())
1152 inputTensorInfo.SetQuantizationOffset(qOffset);
1157 auto input = QuantizedVector<T>(
1159 12.0f, 20.0f, 32.0f, 40.0f,
1160 12.0f, 20.0f, 32.0f, 40.0f,
1161 12.0f, 20.0f, 32.0f, 40.0f,
1162 12.0f, 20.0f, 32.0f, 40.0f,
1166 auto outputExpected = QuantizedVector<T>(
1174 return SimplePooling2dTestImpl<ArmnnType>(
1175 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1176 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1179 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1184 float qScale = 1.0f,
1185 int32_t qOffset = 0)
1202 if(armnn::IsQuantizedType<T>())
1205 inputTensorInfo.SetQuantizationOffset(qOffset);
1210 auto input = QuantizedVector<T>(
1212 1.0f, 2.0f, 3.0f, 4.0f,
1213 1.0f, 2.0f, 3.0f, 4.0f,
1214 1.0f, 2.0f, 3.0f, 4.0f,
1215 1.0f, 2.0f, 3.0f, 4.0f,
1219 auto outputExpected = QuantizedVector<T>(
1226 return SimplePooling2dTestImpl<ArmnnType>(
1227 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1228 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1231 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1236 float qScale = 1.0f,
1237 int32_t qOffset = 0)
1253 if(armnn::IsQuantizedType<T>())
1256 inputTensorInfo.SetQuantizationOffset(qOffset);
1261 auto input = QuantizedVector<T>(
1263 9.0f, 27.0f, 18.0f, 36.0f,
1264 18.0f, 9.0f, 18.0f, 9.0f,
1265 27.0f, 18.0f, 9.0f, 27.0f,
1266 9.0f, 27.0f, 9.0f, 18.0f,
1270 auto outputExpected = QuantizedVector<T>(
1272 7.0f, 11.0f, 13.0f, 9.0f,
1273 12.0f, 17.0f, 19.0f, 13.0f,
1274 12.0f, 16.0f, 16.0f, 10.0f,
1275 9.0f, 11.0f, 12.0f, 7.0f,
1279 return SimplePooling2dTestImpl<ArmnnType>(
1280 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1281 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1284 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1289 float qScale = 1.0f,
1290 int32_t qOffset = 0)
1306 if(armnn::IsQuantizedType<T>())
1309 inputTensorInfo.SetQuantizationOffset(qOffset);
1314 auto input = QuantizedVector<T>(
1316 2.0f, 4.0f, 8.0f, 16.0f,
1317 4.0f, 2.0f, 2.0f, 4.0f,
1318 8.0f, 2.0f, 4.0f, 2.0f,
1319 16.0f, 2.0f, 2.0f, 8.0f,
1323 auto outputExpected = QuantizedVector<T>(
1325 1.0f, 4.4721f, 8.0f,
1326 4.4721f, 2.6457f, 2.236f,
1327 8.0f, 1.4142f, 4.0f,
1331 return SimplePooling2dTestImpl<ArmnnType>(
1332 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1333 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1336 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1341 float qScale = 1.0f,
1342 int32_t qOffset = 0)
1358 if(armnn::IsQuantizedType<T>())
1361 inputTensorInfo.SetQuantizationOffset(qOffset);
1366 auto input = QuantizedVector<T>(
1368 1.0f, 2.0f, 3.0f, 4.0f,
1369 1.0f, 2.0f, 3.0f, 4.0f,
1370 1.0f, 2.0f, 3.0f, 4.0f,
1371 1.0f, 2.0f, 3.0f, 4.0f,
1375 auto outputExpected = QuantizedVector<T>(
1377 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1378 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1379 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1380 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1384 return SimplePooling2dTestImpl<ArmnnType>(
1385 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1386 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1395 bool forceNoPadding)
1397 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1398 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1405 bool forceNoPadding)
1407 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1408 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
1415 bool forceNoPadding)
1417 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1418 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1425 bool forceNoPadding)
1427 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1428 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1435 bool forceNoPadding)
1437 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1438 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
1445 bool forceNoPadding)
1447 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1448 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1457 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1458 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1467 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1468 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1477 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1478 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1485 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1486 workloadFactory, memoryManager, tensorHandleFactory);
1494 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1495 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1503 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1504 workloadFactory, memoryManager, tensorHandleFactory);
1512 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1513 workloadFactory, memoryManager, tensorHandleFactory);
1521 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1522 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1530 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1531 workloadFactory, memoryManager, tensorHandleFactory);
1540 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1541 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1550 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1551 workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
1560 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1561 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1568 bool forceNoPadding)
1570 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1571 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1579 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1580 workloadFactory, memoryManager, tensorHandleFactory);
1588 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1589 workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
1597 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1598 workloadFactory, memoryManager, tensorHandleFactory);
1605 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1606 workloadFactory, memoryManager, tensorHandleFactory);
1614 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1615 workloadFactory, memoryManager, tensorHandleFactory);
1623 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1624 workloadFactory, memoryManager, tensorHandleFactory);
1632 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1633 workloadFactory, memoryManager, tensorHandleFactory);
1641 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1642 workloadFactory, memoryManager, tensorHandleFactory);
1650 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1651 workloadFactory, memoryManager, tensorHandleFactory);
1659 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1660 workloadFactory, memoryManager, tensorHandleFactory);
1668 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1669 workloadFactory, memoryManager, tensorHandleFactory);
1677 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1678 workloadFactory, memoryManager, tensorHandleFactory);
1687 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1688 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1697 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1698 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1707 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1708 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1716 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1717 workloadFactory, memoryManager, tensorHandleFactory);
1725 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1726 workloadFactory, memoryManager, tensorHandleFactory);
1734 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1735 workloadFactory, memoryManager, tensorHandleFactory);
1743 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1744 workloadFactory, memoryManager, tensorHandleFactory);
1752 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1753 workloadFactory, memoryManager, tensorHandleFactory);
1761 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1762 workloadFactory, memoryManager, tensorHandleFactory);
1769 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1770 workloadFactory, memoryManager, tensorHandleFactory);
1778 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1779 workloadFactory, memoryManager, tensorHandleFactory);
1787 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1788 workloadFactory, memoryManager, tensorHandleFactory);
1796 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1797 workloadFactory, memoryManager, tensorHandleFactory);
1805 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1806 workloadFactory, memoryManager, tensorHandleFactory);
1814 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1815 workloadFactory, memoryManager, tensorHandleFactory);
1823 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1824 workloadFactory, memoryManager, tensorHandleFactory);
1832 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1833 workloadFactory, memoryManager, tensorHandleFactory);
1841 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1842 workloadFactory, memoryManager, tensorHandleFactory);
1849 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1850 workloadFactory, memoryManager, tensorHandleFactory);
1858 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1859 workloadFactory, memoryManager, tensorHandleFactory);
1867 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1868 workloadFactory, memoryManager, tensorHandleFactory);
1876 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1877 workloadFactory, memoryManager, tensorHandleFactory);
1885 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1886 workloadFactory, memoryManager, tensorHandleFactory);
1894 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1895 workloadFactory, memoryManager, tensorHandleFactory);
1903 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1904 workloadFactory, memoryManager, tensorHandleFactory);
1912 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1913 workloadFactory, memoryManager, tensorHandleFactory);
1921 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1922 workloadFactory, memoryManager, tensorHandleFactory);
1933 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1934 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1945 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1946 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1947 poolingType, 0.1f, 128);
1958 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1959 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
unsigned int GetWidthIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetHeightIndex() const
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
uint32_t m_PadRight
Padding right value in the width dimension.
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields count, but are ignored.
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationOffset(int32_t offset)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
unsigned int GetChannelsIndex() const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int GetNumElements() const
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)