26 #include <boost/numeric/conversion/cast.hpp> 33 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
40 const boost::multi_array<T, 4>& input,
41 const boost::multi_array<T, 4>& outputExpected)
52 unsigned int inputChannels =
boost::numeric_cast<
unsigned int>(input.shape()[channelsIndex]);
55 unsigned int outputHeight =
boost::numeric_cast<
unsigned int>(outputExpected.shape()[heightIndex]);
56 unsigned int outputWidth =
boost::numeric_cast<
unsigned int>(outputExpected.shape()[widthIndex]);
57 unsigned int outputChannels =
boost::numeric_cast<
unsigned int>(outputExpected.shape()[channelsIndex]);
58 unsigned int outputBatchSize =
boost::numeric_cast<
unsigned int>(outputExpected.shape()[0]);
61 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
64 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
67 if(armnn::IsQuantizedType<T>())
69 inputTensorInfo.SetQuantizationScale(qScale);
70 inputTensorInfo.SetQuantizationOffset(qOffset);
77 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
78 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
85 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
86 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
90 const size_t reasonIfUnsupportedMaxLen = 255;
91 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
94 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
95 if (!result.supported)
100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
102 inputHandle->Allocate();
103 outputHandle->Allocate();
111 result.outputExpected = outputExpected;
125 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
145 unsigned int inputWidth = 8;
146 unsigned int inputHeight = 13;
147 unsigned int outputWidth =
150 unsigned int outputHeight =
153 unsigned int channels = 2;
154 unsigned int batchSize = 2;
156 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
157 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
160 if(armnn::IsQuantizedType<T>())
163 inputTensorInfo.SetQuantizationOffset(qOffset);
168 std::vector<float> singleChannelData({
169 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
170 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
171 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
172 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
173 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
174 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
175 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
176 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
177 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
178 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
179 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
180 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
181 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
185 std::vector<float> inputData;
186 auto negator = [](
float f) {
return -f; };
189 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
190 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
193 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
194 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
196 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
199 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
200 boost::multi_array<T, 4> outputExpected(shape);
203 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
225 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
227 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
228 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
229 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
231 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
232 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
233 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
235 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
236 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
237 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
239 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
240 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
241 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
246 return SimplePooling2dTestImpl<ArmnnType>(
247 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
250 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
269 if(armnn::IsQuantizedType<T>())
277 std::vector<T> inputData(
279 1.0f, 2.0f, 5.0f, 6.0f,
280 3.0f, 4.0f, 7.0f, 8.0f,
281 9.0f, 10.0f, 13.0f, 14.0f,
282 11.0f, 12.0f, 15.0f, 16.0f,
284 17.0f, 18.0f, 21.0f, 22.0f,
285 19.0f, 20.0f, 23.0f, 24.0f,
286 25.0f, 26.0f, 29.0f, 30.0f,
287 27.0f, 28.0f, 31.0f, 32.0f,
291 std::vector<T> outputData(
304 std::vector<T> tmp(inputData.size());
308 std::vector<T> tmp1(outputData.size());
313 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
315 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
317 return SimplePooling2dTestImpl<ArmnnType>(
318 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
321 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
340 if(armnn::IsQuantizedType<T>())
348 std::vector<T> inputData(
350 2.0f, 2.0f, 6.0f, 6.0f,
351 4.0f, 4.0f, 8.0f, 8.0f,
352 10.0f, 12.0f, 14.0f, 16.0f,
353 10.0f, 12.0f, 16.0f, 14.0f,
355 18.0f, 20.0f, 24.0f, 22.0f,
356 20.0f, 18.0f, 22.0f, 24.0f,
357 26.0f, 28.0f, 0.0f, 0.0f,
358 26.0f, 28.0f, 0.0f, 0.0f,
362 std::vector<T> outputData(
375 std::vector<T> tmp(inputData.size());
379 std::vector<T> tmp1(outputData.size());
384 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
386 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
388 return SimplePooling2dTestImpl<ArmnnType>(
389 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
392 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
413 if(armnn::IsQuantizedType<T>())
421 std::vector<T> inputVec;
425 inputVec.push_back(1);
428 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
430 std::vector<T> outputVec;
434 outputVec.push_back(1);
437 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
439 return SimplePooling2dTestImpl<ArmnnType>(
440 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
443 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
461 std::vector<T> inputData(
463 1.0f, 7.0f, 5.0f, 5.0f,
464 1.0f, 7.0f, 5.0f, 5.0f,
465 3.0f, 3.0f, 1.0f, 1.0f,
466 3.0f, 3.0f, 1.0f, 1.0f,
468 1.0f, 7.0f, 0.0f, 0.0f,
469 1.0f, 7.0f, 2.0f, 0.0f,
470 0.0f, 2.0f, 1.0f, 1.0f,
471 0.0f, 0.0f, 1.0f, 1.0f,
475 std::vector<T> outputData(
488 std::vector<T> tmp(inputData.size());
492 std::vector<T> tmp1(outputData.size());
497 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
499 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
501 return SimplePooling2dTestImpl<ArmnnType>(
502 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
505 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
519 auto input = MakeTensor<T, 4>(inputTensorInfo,
521 2.0f, 1.0f, 5.0f, 2.0f,
522 1.0f, 2.0f, 2.0f, 1.0f,
523 5.0f, 4.0f, 1.0f, 5.0f,
524 2.0f, 1.0f, 5.0f, 2.0f,
529 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
536 return SimplePooling2dTestImpl<ArmnnType>(
537 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
540 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
554 auto input = MakeTensor<T, 4>(inputTensorInfo,
556 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
557 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
558 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
559 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
560 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
561 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
562 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
563 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
564 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
569 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
577 return SimplePooling2dTestImpl<ArmnnType>(
578 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
581 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
595 auto input = MakeTensor<T, 4>(inputTensorInfo,
597 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
598 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
599 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
600 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
601 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
602 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
603 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
608 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
615 return SimplePooling2dTestImpl<ArmnnType>(
616 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
619 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
633 auto input = MakeTensor<T, 4>(inputTensorInfo,
635 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
636 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
637 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
638 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
639 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
640 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
641 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
646 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
652 return SimplePooling2dTestImpl<ArmnnType>(
653 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
656 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
670 auto input = MakeTensor<T, 4>(inputTensorInfo,
672 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
673 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
674 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
675 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
676 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
677 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
678 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
679 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
680 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
685 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
691 return SimplePooling2dTestImpl<ArmnnType>(
692 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
695 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
707 descriptor.m_PoolWidth = 2;
708 descriptor.m_PoolHeight = 3;
709 descriptor.m_StrideX = 2;
710 descriptor.m_StrideY = 1;
711 descriptor.m_PadLeft = 2;
712 descriptor.m_PadRight = 0;
713 descriptor.m_PadTop = 1;
714 descriptor.m_PadBottom = 2;
719 auto input = MakeTensor<T, 4>(inputTensorInfo,
726 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
728 0.0f, 3.0f, 0.0f, 3.0f,
732 return SimplePooling2dTestImpl<ArmnnType>(
733 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
736 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
746 const unsigned int inputWidth = 16;
747 const unsigned int inputHeight = 32;
748 const unsigned int channelCount = 2;
749 const unsigned int batchSize = 5;
751 const unsigned int poolSize = 3;
752 const unsigned int strideX = 2;
753 const unsigned int strideY = 4;
754 const unsigned int padX = 0;
755 const unsigned int padY = 0;
757 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
758 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
763 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
764 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
770 if(armnn::IsQuantizedType<T>())
778 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
782 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
783 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
787 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
788 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
800 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
801 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo);
805 const size_t reasonIfUnsupportedMaxLen = 255;
806 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
809 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
810 if (!comparisonResult.supported)
812 return comparisonResult;
817 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
818 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
820 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(data, info);
821 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreatePooling2d(refData, refInfo);
823 outputHandleRef->Allocate();
824 inputHandleRef->Allocate();
825 inputHandle->Allocate();
826 outputHandle->Allocate();
832 workloadRef->Execute();
837 return comparisonResult;
849 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
868 unsigned int inputWidth = 4;
870 unsigned int inputHeight = 4;
872 unsigned int outputWidth =
875 unsigned int outputHeight =
878 unsigned int channels = 1;
879 unsigned int batchSize = 1;
881 std::vector<float> inputData = {
882 510.0f, 222.0f, 780.0f, 654.0f,
883 141.0f, 276.0f, 15.0f, 546.0f,
884 303.0f, 618.0f, 582.0f, 339.0f,
885 438.0f, 564.0f, 573.0f, 402.0f
889 std::vector<float> expectedOutputDataWithPadding = {
890 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
891 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
894 std::vector<float> expectedOutputDataNoPadding = {
899 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
902 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
905 if(armnn::IsQuantizedType<T>())
908 inputTensorInfo.SetQuantizationOffset(qOffset);
913 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
915 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
916 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
917 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
919 return SimplePooling2dTestImpl<ArmnnType>(
920 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
932 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
946 descriptor.
m_PadLeft = (forceNoPadding) ? 0 : 1;
953 unsigned int inputWidth = 3;
954 unsigned int inputHeight = 2;
955 unsigned int outputWidth =
958 unsigned int outputHeight =
961 unsigned int channels = 1;
962 unsigned int batchSize = 1;
964 std::vector<float> inputData = {
969 std::vector<float> expectedOutputDataWithPadding = {
973 std::vector<float> expectedOutputDataNoPadding = {
977 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
980 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
983 if(armnn::IsQuantizedType<T>())
986 inputTensorInfo.SetQuantizationOffset(qOffset);
991 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
993 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
994 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
995 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
997 return SimplePooling2dTestImpl<ArmnnType>(
998 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1002 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1006 float qScale = 1.0f,
1007 int32_t qOffset = 0)
1023 if(armnn::IsQuantizedType<T>())
1026 inputTensorInfo.SetQuantizationOffset(qOffset);
1031 auto input = MakeTensor<T, 4>(inputTensorInfo,
1032 QuantizedVector<T>({
1033 -1.0f, -2.0f, 3.0f, 4.0f,
1034 -1.0f, -2.0f, 3.0f, 4.0f,
1035 1.0f, 2.0f, -3.0f, -4.0f,
1036 1.0f, 2.0f, -3.0f, -4.0f,
1040 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1041 QuantizedVector<T>({
1048 return SimplePooling2dTestImpl<ArmnnType>(
1049 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1052 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1056 float qScale = 1.0f,
1057 int32_t qOffset = 0)
1073 if(armnn::IsQuantizedType<T>())
1076 inputTensorInfo.SetQuantizationOffset(qOffset);
1081 auto input = MakeTensor<T, 4>(inputTensorInfo,
1082 QuantizedVector<T>({
1083 -1.0f, -2.0f, 3.0f, 4.0f,
1084 -1.0f, -2.0f, 3.0f, 4.0f,
1085 1.0f, 2.0f, -3.0f, -4.0f,
1086 1.0f, 2.0f, -3.0f, -4.0f,
1090 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1091 QuantizedVector<T>({
1092 -1.0f, 3.0f, 4.0f, 4.0f,
1093 2.0f, 3.0f, 4.0f, 4.0f,
1094 2.0f, 3.0f, 4.0f, 4.0f,
1095 2.0f, 2.0f, 2.0f, -3.0f,
1099 return SimplePooling2dTestImpl<ArmnnType>(
1100 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1103 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1107 float qScale = 1.0f,
1108 int32_t qOffset = 0)
1124 if(armnn::IsQuantizedType<T>())
1127 inputTensorInfo.SetQuantizationOffset(qOffset);
1132 auto input = MakeTensor<T, 4>(inputTensorInfo,
1133 QuantizedVector<T>({
1134 12.0f, 20.0f, 32.0f, 40.0f,
1135 12.0f, 20.0f, 32.0f, 40.0f,
1136 12.0f, 20.0f, 32.0f, 40.0f,
1137 12.0f, 20.0f, 32.0f, 40.0f,
1141 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1142 QuantizedVector<T>({
1149 return SimplePooling2dTestImpl<ArmnnType>(
1150 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1153 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1157 float qScale = 1.0f,
1158 int32_t qOffset = 0)
1175 if(armnn::IsQuantizedType<T>())
1178 inputTensorInfo.SetQuantizationOffset(qOffset);
1183 auto input = MakeTensor<T, 4>(inputTensorInfo,
1184 QuantizedVector<T>({
1185 1.0f, 2.0f, 3.0f, 4.0f,
1186 1.0f, 2.0f, 3.0f, 4.0f,
1187 1.0f, 2.0f, 3.0f, 4.0f,
1188 1.0f, 2.0f, 3.0f, 4.0f,
1192 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1193 QuantizedVector<T>({
1199 return SimplePooling2dTestImpl<ArmnnType>(
1200 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1203 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1207 float qScale = 1.0f,
1208 int32_t qOffset = 0)
1224 if(armnn::IsQuantizedType<T>())
1227 inputTensorInfo.SetQuantizationOffset(qOffset);
1232 auto input = MakeTensor<T, 4>(inputTensorInfo,
1233 QuantizedVector<T>({
1234 9.0f, 27.0f, 18.0f, 36.0f,
1235 18.0f, 9.0f, 18.0f, 9.0f,
1236 27.0f, 18.0f, 9.0f, 27.0f,
1237 9.0f, 27.0f, 9.0f, 18.0f,
1241 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1242 QuantizedVector<T>({
1243 7.0f, 11.0f, 13.0f, 9.0f,
1244 12.0f, 17.0f, 19.0f, 13.0f,
1245 12.0f, 16.0f, 16.0f, 10.0f,
1246 9.0f, 11.0f, 12.0f, 7.0f,
1250 return SimplePooling2dTestImpl<ArmnnType>(
1251 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1254 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1258 float qScale = 1.0f,
1259 int32_t qOffset = 0)
1275 if(armnn::IsQuantizedType<T>())
1278 inputTensorInfo.SetQuantizationOffset(qOffset);
1283 auto input = MakeTensor<T, 4>(inputTensorInfo,
1284 QuantizedVector<T>({
1285 2.0f, 4.0f, 8.0f, 16.0f,
1286 4.0f, 2.0f, 2.0f, 4.0f,
1287 8.0f, 2.0f, 4.0f, 2.0f,
1288 16.0f, 2.0f, 2.0f, 8.0f,
1292 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1293 QuantizedVector<T>({
1294 1.0f, 4.4721f, 8.0f,
1295 4.4721f, 2.6457f, 2.236f,
1296 8.0f, 1.4142f, 4.0f,
1300 return SimplePooling2dTestImpl<ArmnnType>(
1301 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1304 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1308 float qScale = 1.0f,
1309 int32_t qOffset = 0)
1325 if(armnn::IsQuantizedType<T>())
1328 inputTensorInfo.SetQuantizationOffset(qOffset);
1333 auto input = MakeTensor<T, 4>(inputTensorInfo,
1334 QuantizedVector<T>({
1335 1.0f, 2.0f, 3.0f, 4.0f,
1336 1.0f, 2.0f, 3.0f, 4.0f,
1337 1.0f, 2.0f, 3.0f, 4.0f,
1338 1.0f, 2.0f, 3.0f, 4.0f,
1342 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1343 QuantizedVector<T>({
1344 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1345 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1346 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1347 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1351 return SimplePooling2dTestImpl<ArmnnType>(
1352 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1360 bool forceNoPadding)
1362 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1363 workloadFactory, memoryManager, forceNoPadding);
1369 bool forceNoPadding)
1371 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1372 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1378 bool forceNoPadding)
1380 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1381 workloadFactory, memoryManager, forceNoPadding);
1387 bool forceNoPadding)
1389 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1390 workloadFactory, memoryManager, forceNoPadding);
1396 bool forceNoPadding)
1398 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1399 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1405 bool forceNoPadding)
1407 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1408 workloadFactory, memoryManager, forceNoPadding);
1416 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1424 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1432 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1438 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1445 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1446 workloadFactory, memoryManager, 1.0f, -5);
1453 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1454 workloadFactory, memoryManager);
1461 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1468 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1469 workloadFactory, memoryManager, 1.0f, -5);
1476 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1477 workloadFactory, memoryManager);
1485 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1493 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1494 workloadFactory, memoryManager, dataLayout, 0.5, -1);
1502 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1503 workloadFactory, memoryManager, dataLayout);
1509 bool forceNoPadding)
1511 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1512 workloadFactory, memoryManager, forceNoPadding);
1519 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1526 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1527 workloadFactory, memoryManager, 0.5, -1);
1534 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1535 workloadFactory, memoryManager);
1541 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1548 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1549 workloadFactory, memoryManager);
1556 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1557 workloadFactory, memoryManager);
1564 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1565 workloadFactory, memoryManager);
1572 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1573 workloadFactory, memoryManager);
1580 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1581 workloadFactory, memoryManager);
1588 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1595 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1596 workloadFactory, memoryManager);
1603 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1604 workloadFactory, memoryManager);
1612 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1620 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1628 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1635 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1642 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1649 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1656 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1663 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1670 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1676 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1683 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1690 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1697 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1704 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1711 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1718 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1725 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1732 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1738 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1745 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1752 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1759 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1766 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1773 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1780 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1787 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1794 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1803 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1804 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1813 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1814 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1823 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1824 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetNumElements() const
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
unsigned int GetWidthIndex() const
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorShape & GetShape() const
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetHeightIndex() const
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadRight
Padding right value in the width dimension.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
The padding fields count, but are ignored.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetChannelsIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)