24 #include <boost/numeric/conversion/cast.hpp> 31 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
38 const boost::multi_array<T, 4>& input,
39 const boost::multi_array<T, 4>& outputExpected)
41 boost::ignore_unused(memoryManager);
48 unsigned int inputHeight = boost::numeric_cast<
unsigned int>(input.shape()[heightIndex]);
49 unsigned int inputWidth = boost::numeric_cast<
unsigned int>(input.shape()[widthIndex]);
50 unsigned int inputChannels = boost::numeric_cast<
unsigned int>(input.shape()[channelsIndex]);
51 unsigned int inputBatchSize = boost::numeric_cast<
unsigned int>(input.shape()[0]);
53 unsigned int outputHeight = boost::numeric_cast<
unsigned int>(outputExpected.shape()[heightIndex]);
54 unsigned int outputWidth = boost::numeric_cast<
unsigned int>(outputExpected.shape()[widthIndex]);
55 unsigned int outputChannels = boost::numeric_cast<
unsigned int>(outputExpected.shape()[channelsIndex]);
56 unsigned int outputBatchSize = boost::numeric_cast<
unsigned int>(outputExpected.shape()[0]);
59 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
62 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
65 if(armnn::IsQuantizedType<T>())
67 inputTensorInfo.SetQuantizationScale(qScale);
68 inputTensorInfo.SetQuantizationOffset(qOffset);
75 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
76 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
83 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
84 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
88 const size_t reasonIfUnsupportedMaxLen = 255;
89 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
92 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
93 if (!result.supported)
98 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
100 inputHandle->Allocate();
101 outputHandle->Allocate();
109 result.outputExpected = outputExpected;
123 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
143 unsigned int inputWidth = 8;
144 unsigned int inputHeight = 13;
145 unsigned int outputWidth =
148 unsigned int outputHeight =
151 unsigned int channels = 2;
152 unsigned int batchSize = 2;
154 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
155 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
158 if(armnn::IsQuantizedType<T>())
161 inputTensorInfo.SetQuantizationOffset(qOffset);
166 std::vector<float> singleChannelData({
167 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
168 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
169 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
170 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
171 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
172 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
173 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
174 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
175 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
176 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
177 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
178 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
179 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
183 std::vector<float> inputData;
184 auto negator = [](
float f) {
return -f; };
187 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
188 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
191 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
192 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
194 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
197 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
198 boost::multi_array<T, 4> outputExpected(shape);
201 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
223 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
225 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
226 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
227 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
229 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
230 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
231 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
233 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
234 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
235 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
237 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
238 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
239 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
244 return SimplePooling2dTestImpl<ArmnnType>(
245 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
248 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
267 if(armnn::IsQuantizedType<T>())
275 std::vector<T> inputData(
277 1.0f, 2.0f, 5.0f, 6.0f,
278 3.0f, 4.0f, 7.0f, 8.0f,
279 9.0f, 10.0f, 13.0f, 14.0f,
280 11.0f, 12.0f, 15.0f, 16.0f,
282 17.0f, 18.0f, 21.0f, 22.0f,
283 19.0f, 20.0f, 23.0f, 24.0f,
284 25.0f, 26.0f, 29.0f, 30.0f,
285 27.0f, 28.0f, 31.0f, 32.0f,
289 std::vector<T> outputData(
302 std::vector<T> tmp(inputData.size());
306 std::vector<T> tmp1(outputData.size());
311 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
313 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
315 return SimplePooling2dTestImpl<ArmnnType>(
316 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
319 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
338 if(armnn::IsQuantizedType<T>())
346 std::vector<T> inputData(
348 2.0f, 2.0f, 6.0f, 6.0f,
349 4.0f, 4.0f, 8.0f, 8.0f,
350 10.0f, 12.0f, 14.0f, 16.0f,
351 10.0f, 12.0f, 16.0f, 14.0f,
353 18.0f, 20.0f, 24.0f, 22.0f,
354 20.0f, 18.0f, 22.0f, 24.0f,
355 26.0f, 28.0f, 0.0f, 0.0f,
356 26.0f, 28.0f, 0.0f, 0.0f,
360 std::vector<T> outputData(
373 std::vector<T> tmp(inputData.size());
377 std::vector<T> tmp1(outputData.size());
382 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
384 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
386 return SimplePooling2dTestImpl<ArmnnType>(
387 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
390 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
411 if(armnn::IsQuantizedType<T>())
419 std::vector<T> inputVec;
423 inputVec.push_back(1);
426 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
428 std::vector<T> outputVec;
432 outputVec.push_back(1);
435 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
437 return SimplePooling2dTestImpl<ArmnnType>(
438 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
441 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
459 std::vector<T> inputData(
461 1.0f, 7.0f, 5.0f, 5.0f,
462 1.0f, 7.0f, 5.0f, 5.0f,
463 3.0f, 3.0f, 1.0f, 1.0f,
464 3.0f, 3.0f, 1.0f, 1.0f,
466 1.0f, 7.0f, 0.0f, 0.0f,
467 1.0f, 7.0f, 2.0f, 0.0f,
468 0.0f, 2.0f, 1.0f, 1.0f,
469 0.0f, 0.0f, 1.0f, 1.0f,
473 std::vector<T> outputData(
486 std::vector<T> tmp(inputData.size());
490 std::vector<T> tmp1(outputData.size());
495 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
497 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
499 return SimplePooling2dTestImpl<ArmnnType>(
500 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
503 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
517 auto input = MakeTensor<T, 4>(inputTensorInfo,
519 2.0f, 1.0f, 5.0f, 2.0f,
520 1.0f, 2.0f, 2.0f, 1.0f,
521 5.0f, 4.0f, 1.0f, 5.0f,
522 2.0f, 1.0f, 5.0f, 2.0f,
527 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
534 return SimplePooling2dTestImpl<ArmnnType>(
535 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
538 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
552 auto input = MakeTensor<T, 4>(inputTensorInfo,
554 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
555 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
556 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
557 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
558 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
559 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
560 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
561 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
562 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
567 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
575 return SimplePooling2dTestImpl<ArmnnType>(
576 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
579 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
593 auto input = MakeTensor<T, 4>(inputTensorInfo,
595 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
596 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
597 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
598 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
599 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
606 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
613 return SimplePooling2dTestImpl<ArmnnType>(
614 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
617 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
631 auto input = MakeTensor<T, 4>(inputTensorInfo,
633 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
634 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
635 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
636 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
637 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
638 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
639 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
644 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
650 return SimplePooling2dTestImpl<ArmnnType>(
651 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
654 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
668 auto input = MakeTensor<T, 4>(inputTensorInfo,
670 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
671 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
672 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
673 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
674 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
675 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
676 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
677 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
678 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
683 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
689 return SimplePooling2dTestImpl<ArmnnType>(
690 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
693 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
705 descriptor.m_PoolWidth = 2;
706 descriptor.m_PoolHeight = 3;
707 descriptor.m_StrideX = 2;
708 descriptor.m_StrideY = 1;
709 descriptor.m_PadLeft = 2;
710 descriptor.m_PadRight = 0;
711 descriptor.m_PadTop = 1;
712 descriptor.m_PadBottom = 2;
717 auto input = MakeTensor<T, 4>(inputTensorInfo,
724 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
726 0.0f, 3.0f, 0.0f, 3.0f,
730 return SimplePooling2dTestImpl<ArmnnType>(
731 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
734 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
743 boost::ignore_unused(memoryManager);
744 const unsigned int inputWidth = 16;
745 const unsigned int inputHeight = 32;
746 const unsigned int channelCount = 2;
747 const unsigned int batchSize = 5;
749 const unsigned int poolSize = 3;
750 const unsigned int strideX = 2;
751 const unsigned int strideY = 4;
752 const unsigned int padX = 0;
753 const unsigned int padY = 0;
755 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
756 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
761 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
762 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
768 if(armnn::IsQuantizedType<T>())
776 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
780 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
781 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
785 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
786 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
798 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
799 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo);
803 const size_t reasonIfUnsupportedMaxLen = 255;
804 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
807 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
808 if (!comparisonResult.supported)
810 return comparisonResult;
815 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
816 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
818 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(data, info);
819 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreatePooling2d(refData, refInfo);
821 outputHandleRef->Allocate();
822 inputHandleRef->Allocate();
823 inputHandle->Allocate();
824 outputHandle->Allocate();
830 workloadRef->Execute();
835 return comparisonResult;
847 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
866 unsigned int inputWidth = 4;
868 unsigned int inputHeight = 4;
870 unsigned int outputWidth =
873 unsigned int outputHeight =
876 unsigned int channels = 1;
877 unsigned int batchSize = 1;
879 std::vector<float> inputData = {
880 510.0f, 222.0f, 780.0f, 654.0f,
881 141.0f, 276.0f, 15.0f, 546.0f,
882 303.0f, 618.0f, 582.0f, 339.0f,
883 438.0f, 564.0f, 573.0f, 402.0f
887 std::vector<float> expectedOutputDataWithPadding = {
888 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
889 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
892 std::vector<float> expectedOutputDataNoPadding = {
897 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
900 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
903 if(armnn::IsQuantizedType<T>())
906 inputTensorInfo.SetQuantizationOffset(qOffset);
911 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
913 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
914 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
915 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
917 return SimplePooling2dTestImpl<ArmnnType>(
918 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
930 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
944 descriptor.
m_PadLeft = (forceNoPadding) ? 0 : 1;
951 unsigned int inputWidth = 3;
952 unsigned int inputHeight = 2;
953 unsigned int outputWidth =
956 unsigned int outputHeight =
959 unsigned int channels = 1;
960 unsigned int batchSize = 1;
962 std::vector<float> inputData = {
967 std::vector<float> expectedOutputDataWithPadding = {
971 std::vector<float> expectedOutputDataNoPadding = {
975 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
978 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
981 if(armnn::IsQuantizedType<T>())
984 inputTensorInfo.SetQuantizationOffset(qOffset);
989 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
991 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
992 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
993 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
995 return SimplePooling2dTestImpl<ArmnnType>(
996 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1000 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1004 float qScale = 1.0f,
1005 int32_t qOffset = 0)
1021 if(armnn::IsQuantizedType<T>())
1024 inputTensorInfo.SetQuantizationOffset(qOffset);
1029 auto input = MakeTensor<T, 4>(inputTensorInfo,
1030 QuantizedVector<T>({
1031 -1.0f, -2.0f, 3.0f, 4.0f,
1032 -1.0f, -2.0f, 3.0f, 4.0f,
1033 1.0f, 2.0f, -3.0f, -4.0f,
1034 1.0f, 2.0f, -3.0f, -4.0f,
1038 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1039 QuantizedVector<T>({
1046 return SimplePooling2dTestImpl<ArmnnType>(
1047 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1050 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1054 float qScale = 1.0f,
1055 int32_t qOffset = 0)
1071 if(armnn::IsQuantizedType<T>())
1074 inputTensorInfo.SetQuantizationOffset(qOffset);
1079 auto input = MakeTensor<T, 4>(inputTensorInfo,
1080 QuantizedVector<T>({
1081 -1.0f, -2.0f, 3.0f, 4.0f,
1082 -1.0f, -2.0f, 3.0f, 4.0f,
1083 1.0f, 2.0f, -3.0f, -4.0f,
1084 1.0f, 2.0f, -3.0f, -4.0f,
1088 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1089 QuantizedVector<T>({
1090 -1.0f, 3.0f, 4.0f, 4.0f,
1091 2.0f, 3.0f, 4.0f, 4.0f,
1092 2.0f, 3.0f, 4.0f, 4.0f,
1093 2.0f, 2.0f, 2.0f, -3.0f,
1097 return SimplePooling2dTestImpl<ArmnnType>(
1098 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1101 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1105 float qScale = 1.0f,
1106 int32_t qOffset = 0)
1122 if(armnn::IsQuantizedType<T>())
1125 inputTensorInfo.SetQuantizationOffset(qOffset);
1130 auto input = MakeTensor<T, 4>(inputTensorInfo,
1131 QuantizedVector<T>({
1132 12.0f, 20.0f, 32.0f, 40.0f,
1133 12.0f, 20.0f, 32.0f, 40.0f,
1134 12.0f, 20.0f, 32.0f, 40.0f,
1135 12.0f, 20.0f, 32.0f, 40.0f,
1139 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1140 QuantizedVector<T>({
1147 return SimplePooling2dTestImpl<ArmnnType>(
1148 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1151 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1155 float qScale = 1.0f,
1156 int32_t qOffset = 0)
1173 if(armnn::IsQuantizedType<T>())
1176 inputTensorInfo.SetQuantizationOffset(qOffset);
1181 auto input = MakeTensor<T, 4>(inputTensorInfo,
1182 QuantizedVector<T>({
1183 1.0f, 2.0f, 3.0f, 4.0f,
1184 1.0f, 2.0f, 3.0f, 4.0f,
1185 1.0f, 2.0f, 3.0f, 4.0f,
1186 1.0f, 2.0f, 3.0f, 4.0f,
1190 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1191 QuantizedVector<T>({
1197 return SimplePooling2dTestImpl<ArmnnType>(
1198 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1201 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1205 float qScale = 1.0f,
1206 int32_t qOffset = 0)
1222 if(armnn::IsQuantizedType<T>())
1225 inputTensorInfo.SetQuantizationOffset(qOffset);
1230 auto input = MakeTensor<T, 4>(inputTensorInfo,
1231 QuantizedVector<T>({
1232 9.0f, 27.0f, 18.0f, 36.0f,
1233 18.0f, 9.0f, 18.0f, 9.0f,
1234 27.0f, 18.0f, 9.0f, 27.0f,
1235 9.0f, 27.0f, 9.0f, 18.0f,
1239 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1240 QuantizedVector<T>({
1241 7.0f, 11.0f, 13.0f, 9.0f,
1242 12.0f, 17.0f, 19.0f, 13.0f,
1243 12.0f, 16.0f, 16.0f, 10.0f,
1244 9.0f, 11.0f, 12.0f, 7.0f,
1248 return SimplePooling2dTestImpl<ArmnnType>(
1249 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1252 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1256 float qScale = 1.0f,
1257 int32_t qOffset = 0)
1273 if(armnn::IsQuantizedType<T>())
1276 inputTensorInfo.SetQuantizationOffset(qOffset);
1281 auto input = MakeTensor<T, 4>(inputTensorInfo,
1282 QuantizedVector<T>({
1283 2.0f, 4.0f, 8.0f, 16.0f,
1284 4.0f, 2.0f, 2.0f, 4.0f,
1285 8.0f, 2.0f, 4.0f, 2.0f,
1286 16.0f, 2.0f, 2.0f, 8.0f,
1290 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1291 QuantizedVector<T>({
1292 1.0f, 4.4721f, 8.0f,
1293 4.4721f, 2.6457f, 2.236f,
1294 8.0f, 1.4142f, 4.0f,
1298 return SimplePooling2dTestImpl<ArmnnType>(
1299 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1302 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1306 float qScale = 1.0f,
1307 int32_t qOffset = 0)
1323 if(armnn::IsQuantizedType<T>())
1326 inputTensorInfo.SetQuantizationOffset(qOffset);
1331 auto input = MakeTensor<T, 4>(inputTensorInfo,
1332 QuantizedVector<T>({
1333 1.0f, 2.0f, 3.0f, 4.0f,
1334 1.0f, 2.0f, 3.0f, 4.0f,
1335 1.0f, 2.0f, 3.0f, 4.0f,
1336 1.0f, 2.0f, 3.0f, 4.0f,
1340 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1341 QuantizedVector<T>({
1342 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1343 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1344 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1345 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1349 return SimplePooling2dTestImpl<ArmnnType>(
1350 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1358 bool forceNoPadding)
1360 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1361 workloadFactory, memoryManager, forceNoPadding);
1367 bool forceNoPadding)
1369 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1370 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1376 bool forceNoPadding)
1378 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1379 workloadFactory, memoryManager, forceNoPadding);
1385 bool forceNoPadding)
1387 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1388 workloadFactory, memoryManager, forceNoPadding);
1394 bool forceNoPadding)
1396 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1397 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1403 bool forceNoPadding)
1405 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1406 workloadFactory, memoryManager, forceNoPadding);
1414 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1422 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1430 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1436 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1443 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1444 workloadFactory, memoryManager, 1.0f, -5);
1451 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1452 workloadFactory, memoryManager);
1459 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1466 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1467 workloadFactory, memoryManager, 1.0f, -5);
1474 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1475 workloadFactory, memoryManager);
1483 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1491 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1492 workloadFactory, memoryManager, dataLayout, 0.5, -1);
1500 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1501 workloadFactory, memoryManager, dataLayout);
1507 bool forceNoPadding)
1509 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1510 workloadFactory, memoryManager, forceNoPadding);
1517 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1524 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1525 workloadFactory, memoryManager, 0.5, -1);
1532 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1533 workloadFactory, memoryManager);
1539 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1546 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1547 workloadFactory, memoryManager);
1554 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1555 workloadFactory, memoryManager);
1562 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1563 workloadFactory, memoryManager);
1570 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1571 workloadFactory, memoryManager);
1578 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1579 workloadFactory, memoryManager);
1586 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1593 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1594 workloadFactory, memoryManager);
1601 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1602 workloadFactory, memoryManager);
1610 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1618 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1626 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1633 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1640 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1647 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1654 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1661 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1668 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1674 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1681 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1688 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1695 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1702 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1709 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1716 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1723 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1730 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1736 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1743 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1750 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1757 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1764 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1771 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1778 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1785 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1792 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1801 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1802 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1811 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1812 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1821 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1822 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
uint32_t m_PoolHeight
Pooling height value.
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetHeightIndex() const
LayerDescriptor m_Parameters
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PadRight
Padding right value in the width dimension.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetWidthIndex() const
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
The padding fields count, but are ignored.
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
The padding fields don't count and are ignored.
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
void SetQuantizationScale(float scale)
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumElements() const
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
unsigned int GetChannelsIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationOffset(int32_t offset)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
const TensorShape & GetShape() const
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)