32 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
40 const boost::multi_array<T, 4>& input,
41 const boost::multi_array<T, 4>& outputExpected)
52 unsigned int inputChannels =
armnn::numeric_cast<
unsigned int>(input.shape()[channelsIndex]);
55 unsigned int outputHeight =
armnn::numeric_cast<
unsigned int>(outputExpected.shape()[heightIndex]);
56 unsigned int outputWidth =
armnn::numeric_cast<
unsigned int>(outputExpected.shape()[widthIndex]);
57 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(outputExpected.shape()[channelsIndex]);
58 unsigned int outputBatchSize =
armnn::numeric_cast<
unsigned int>(outputExpected.shape()[0]);
61 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
64 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
67 if(armnn::IsQuantizedType<T>())
69 inputTensorInfo.SetQuantizationScale(qScale);
70 inputTensorInfo.SetQuantizationOffset(qOffset);
77 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
78 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
85 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
86 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
90 const size_t reasonIfUnsupportedMaxLen = 255;
91 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
94 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
95 if (!result.supported)
100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
102 inputHandle->Allocate();
103 outputHandle->Allocate();
111 result.outputExpected = outputExpected;
125 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
146 unsigned int inputWidth = 8;
147 unsigned int inputHeight = 13;
148 unsigned int outputWidth =
151 unsigned int outputHeight =
154 unsigned int channels = 2;
155 unsigned int batchSize = 2;
157 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
158 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
161 if(armnn::IsQuantizedType<T>())
164 inputTensorInfo.SetQuantizationOffset(qOffset);
169 std::vector<float> singleChannelData({
170 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
171 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
172 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
173 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
174 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
175 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
176 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
177 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
178 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
179 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
180 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
181 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
182 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
186 std::vector<float> inputData;
187 auto negator = [](
float f) {
return -f; };
190 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
191 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
194 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
195 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
197 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
200 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
201 boost::multi_array<T, 4> outputExpected(shape);
204 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
226 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
228 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
229 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
230 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
232 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
233 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
234 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
236 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
237 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
238 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
240 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
241 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
242 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
247 return SimplePooling2dTestImpl<ArmnnType>(
248 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
251 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
271 if(armnn::IsQuantizedType<T>())
279 std::vector<T> inputData(
281 1.0f, 2.0f, 5.0f, 6.0f,
282 3.0f, 4.0f, 7.0f, 8.0f,
283 9.0f, 10.0f, 13.0f, 14.0f,
284 11.0f, 12.0f, 15.0f, 16.0f,
286 17.0f, 18.0f, 21.0f, 22.0f,
287 19.0f, 20.0f, 23.0f, 24.0f,
288 25.0f, 26.0f, 29.0f, 30.0f,
289 27.0f, 28.0f, 31.0f, 32.0f,
293 std::vector<T> outputData(
306 std::vector<T> tmp(inputData.size());
310 std::vector<T> tmp1(outputData.size());
315 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
317 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
319 return SimplePooling2dTestImpl<ArmnnType>(
320 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
323 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
343 if(armnn::IsQuantizedType<T>())
351 std::vector<T> inputData(
353 2.0f, 2.0f, 6.0f, 6.0f,
354 4.0f, 4.0f, 8.0f, 8.0f,
355 10.0f, 12.0f, 14.0f, 16.0f,
356 10.0f, 12.0f, 16.0f, 14.0f,
358 18.0f, 20.0f, 24.0f, 22.0f,
359 20.0f, 18.0f, 22.0f, 24.0f,
360 26.0f, 28.0f, 0.0f, 0.0f,
361 26.0f, 28.0f, 0.0f, 0.0f,
365 std::vector<T> outputData(
378 std::vector<T> tmp(inputData.size());
382 std::vector<T> tmp1(outputData.size());
387 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
389 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
391 return SimplePooling2dTestImpl<ArmnnType>(
392 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
395 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
417 if(armnn::IsQuantizedType<T>())
425 std::vector<T> inputVec;
429 inputVec.push_back(1);
432 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
434 std::vector<T> outputVec;
438 outputVec.push_back(1);
441 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
443 return SimplePooling2dTestImpl<ArmnnType>(
444 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
447 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
466 std::vector<T> inputData(
468 1.0f, 7.0f, 5.0f, 5.0f,
469 1.0f, 7.0f, 5.0f, 5.0f,
470 3.0f, 3.0f, 1.0f, 1.0f,
471 3.0f, 3.0f, 1.0f, 1.0f,
473 1.0f, 7.0f, 0.0f, 0.0f,
474 1.0f, 7.0f, 2.0f, 0.0f,
475 0.0f, 2.0f, 1.0f, 1.0f,
476 0.0f, 0.0f, 1.0f, 1.0f,
480 std::vector<T> outputData(
493 std::vector<T> tmp(inputData.size());
497 std::vector<T> tmp1(outputData.size());
502 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
504 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
506 return SimplePooling2dTestImpl<ArmnnType>(
507 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
510 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
525 auto input = MakeTensor<T, 4>(inputTensorInfo,
527 2.0f, 1.0f, 5.0f, 2.0f,
528 1.0f, 2.0f, 2.0f, 1.0f,
529 5.0f, 4.0f, 1.0f, 5.0f,
530 2.0f, 1.0f, 5.0f, 2.0f,
535 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
542 return SimplePooling2dTestImpl<ArmnnType>(
543 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
546 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
561 auto input = MakeTensor<T, 4>(inputTensorInfo,
563 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
564 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
565 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
566 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
567 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
568 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
569 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
570 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
571 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
576 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
584 return SimplePooling2dTestImpl<ArmnnType>(
585 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
588 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
603 auto input = MakeTensor<T, 4>(inputTensorInfo,
605 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
606 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
607 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
608 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
609 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
610 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
611 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
616 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
623 return SimplePooling2dTestImpl<ArmnnType>(
624 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
627 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
642 auto input = MakeTensor<T, 4>(inputTensorInfo,
644 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
645 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
646 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
647 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
648 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
649 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
650 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
655 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
661 return SimplePooling2dTestImpl<ArmnnType>(
662 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
665 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
680 auto input = MakeTensor<T, 4>(inputTensorInfo,
682 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
683 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
684 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
685 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
686 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
687 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
688 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
689 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
690 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
695 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
701 return SimplePooling2dTestImpl<ArmnnType>(
702 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
705 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
718 descriptor.m_PoolWidth = 2;
719 descriptor.m_PoolHeight = 3;
720 descriptor.m_StrideX = 2;
721 descriptor.m_StrideY = 1;
722 descriptor.m_PadLeft = 2;
723 descriptor.m_PadRight = 0;
724 descriptor.m_PadTop = 1;
725 descriptor.m_PadBottom = 2;
730 auto input = MakeTensor<T, 4>(inputTensorInfo,
737 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
739 0.0f, 3.0f, 0.0f, 3.0f,
743 return SimplePooling2dTestImpl<ArmnnType>(
744 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
747 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
759 const unsigned int inputWidth = 16;
760 const unsigned int inputHeight = 32;
761 const unsigned int channelCount = 2;
762 const unsigned int batchSize = 5;
764 const unsigned int poolSize = 3;
765 const unsigned int strideX = 2;
766 const unsigned int strideY = 4;
767 const unsigned int padX = 0;
768 const unsigned int padY = 0;
770 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
771 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
776 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
777 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
783 if(armnn::IsQuantizedType<T>())
791 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
795 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
796 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
800 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
801 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
813 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
814 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
818 const size_t reasonIfUnsupportedMaxLen = 255;
819 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
822 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
823 if (!comparisonResult.supported)
825 return comparisonResult;
830 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
831 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
833 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(data, info);
834 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreatePooling2d(refData, refInfo);
836 outputHandleRef->Allocate();
837 inputHandleRef->Allocate();
838 inputHandle->Allocate();
839 outputHandle->Allocate();
845 workloadRef->Execute();
850 return comparisonResult;
862 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
882 unsigned int inputWidth = 4;
884 unsigned int inputHeight = 4;
886 unsigned int outputWidth =
889 unsigned int outputHeight =
892 unsigned int channels = 1;
893 unsigned int batchSize = 1;
895 std::vector<float> inputData = {
896 510.0f, 222.0f, 780.0f, 654.0f,
897 141.0f, 276.0f, 15.0f, 546.0f,
898 303.0f, 618.0f, 582.0f, 339.0f,
899 438.0f, 564.0f, 573.0f, 402.0f
903 std::vector<float> expectedOutputDataWithPadding = {
904 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
905 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
908 std::vector<float> expectedOutputDataNoPadding = {
913 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
916 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
919 if(armnn::IsQuantizedType<T>())
922 inputTensorInfo.SetQuantizationOffset(qOffset);
927 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
929 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
930 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
931 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
933 return SimplePooling2dTestImpl<ArmnnType>(
934 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
946 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
961 descriptor.
m_PadLeft = (forceNoPadding) ? 0 : 1;
968 unsigned int inputWidth = 3;
969 unsigned int inputHeight = 2;
970 unsigned int outputWidth =
973 unsigned int outputHeight =
976 unsigned int channels = 1;
977 unsigned int batchSize = 1;
979 std::vector<float> inputData = {
984 std::vector<float> expectedOutputDataWithPadding = {
988 std::vector<float> expectedOutputDataNoPadding = {
992 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
995 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
998 if(armnn::IsQuantizedType<T>())
1001 inputTensorInfo.SetQuantizationOffset(qOffset);
1006 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
1008 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1009 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1010 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
1012 return SimplePooling2dTestImpl<ArmnnType>(
1013 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1017 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1022 float qScale = 1.0f,
1023 int32_t qOffset = 0)
1039 if(armnn::IsQuantizedType<T>())
1042 inputTensorInfo.SetQuantizationOffset(qOffset);
1047 auto input = MakeTensor<T, 4>(inputTensorInfo,
1048 QuantizedVector<T>({
1049 -1.0f, -2.0f, 3.0f, 4.0f,
1050 -1.0f, -2.0f, 3.0f, 4.0f,
1051 1.0f, 2.0f, -3.0f, -4.0f,
1052 1.0f, 2.0f, -3.0f, -4.0f,
1056 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1057 QuantizedVector<T>({
1064 return SimplePooling2dTestImpl<ArmnnType>(
1065 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1068 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1073 float qScale = 1.0f,
1074 int32_t qOffset = 0)
1090 if(armnn::IsQuantizedType<T>())
1093 inputTensorInfo.SetQuantizationOffset(qOffset);
1098 auto input = MakeTensor<T, 4>(inputTensorInfo,
1099 QuantizedVector<T>({
1100 -1.0f, -2.0f, 3.0f, 4.0f,
1101 -1.0f, -2.0f, 3.0f, 4.0f,
1102 1.0f, 2.0f, -3.0f, -4.0f,
1103 1.0f, 2.0f, -3.0f, -4.0f,
1107 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1108 QuantizedVector<T>({
1109 -1.0f, 3.0f, 4.0f, 4.0f,
1110 2.0f, 3.0f, 4.0f, 4.0f,
1111 2.0f, 3.0f, 4.0f, 4.0f,
1112 2.0f, 2.0f, 2.0f, -3.0f,
1116 return SimplePooling2dTestImpl<ArmnnType>(
1117 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1120 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1125 float qScale = 1.0f,
1126 int32_t qOffset = 0)
1142 if(armnn::IsQuantizedType<T>())
1145 inputTensorInfo.SetQuantizationOffset(qOffset);
1150 auto input = MakeTensor<T, 4>(inputTensorInfo,
1151 QuantizedVector<T>({
1152 12.0f, 20.0f, 32.0f, 40.0f,
1153 12.0f, 20.0f, 32.0f, 40.0f,
1154 12.0f, 20.0f, 32.0f, 40.0f,
1155 12.0f, 20.0f, 32.0f, 40.0f,
1159 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1160 QuantizedVector<T>({
1167 return SimplePooling2dTestImpl<ArmnnType>(
1168 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1171 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1176 float qScale = 1.0f,
1177 int32_t qOffset = 0)
1194 if(armnn::IsQuantizedType<T>())
1197 inputTensorInfo.SetQuantizationOffset(qOffset);
1202 auto input = MakeTensor<T, 4>(inputTensorInfo,
1203 QuantizedVector<T>({
1204 1.0f, 2.0f, 3.0f, 4.0f,
1205 1.0f, 2.0f, 3.0f, 4.0f,
1206 1.0f, 2.0f, 3.0f, 4.0f,
1207 1.0f, 2.0f, 3.0f, 4.0f,
1211 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1212 QuantizedVector<T>({
1218 return SimplePooling2dTestImpl<ArmnnType>(
1219 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1222 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1227 float qScale = 1.0f,
1228 int32_t qOffset = 0)
1244 if(armnn::IsQuantizedType<T>())
1247 inputTensorInfo.SetQuantizationOffset(qOffset);
1252 auto input = MakeTensor<T, 4>(inputTensorInfo,
1253 QuantizedVector<T>({
1254 9.0f, 27.0f, 18.0f, 36.0f,
1255 18.0f, 9.0f, 18.0f, 9.0f,
1256 27.0f, 18.0f, 9.0f, 27.0f,
1257 9.0f, 27.0f, 9.0f, 18.0f,
1261 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1262 QuantizedVector<T>({
1263 7.0f, 11.0f, 13.0f, 9.0f,
1264 12.0f, 17.0f, 19.0f, 13.0f,
1265 12.0f, 16.0f, 16.0f, 10.0f,
1266 9.0f, 11.0f, 12.0f, 7.0f,
1270 return SimplePooling2dTestImpl<ArmnnType>(
1271 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1274 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1279 float qScale = 1.0f,
1280 int32_t qOffset = 0)
1296 if(armnn::IsQuantizedType<T>())
1299 inputTensorInfo.SetQuantizationOffset(qOffset);
1304 auto input = MakeTensor<T, 4>(inputTensorInfo,
1305 QuantizedVector<T>({
1306 2.0f, 4.0f, 8.0f, 16.0f,
1307 4.0f, 2.0f, 2.0f, 4.0f,
1308 8.0f, 2.0f, 4.0f, 2.0f,
1309 16.0f, 2.0f, 2.0f, 8.0f,
1313 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1314 QuantizedVector<T>({
1315 1.0f, 4.4721f, 8.0f,
1316 4.4721f, 2.6457f, 2.236f,
1317 8.0f, 1.4142f, 4.0f,
1321 return SimplePooling2dTestImpl<ArmnnType>(
1322 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1325 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1330 float qScale = 1.0f,
1331 int32_t qOffset = 0)
1347 if(armnn::IsQuantizedType<T>())
1350 inputTensorInfo.SetQuantizationOffset(qOffset);
1355 auto input = MakeTensor<T, 4>(inputTensorInfo,
1356 QuantizedVector<T>({
1357 1.0f, 2.0f, 3.0f, 4.0f,
1358 1.0f, 2.0f, 3.0f, 4.0f,
1359 1.0f, 2.0f, 3.0f, 4.0f,
1360 1.0f, 2.0f, 3.0f, 4.0f,
1364 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1365 QuantizedVector<T>({
1366 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1367 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1368 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1369 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1373 return SimplePooling2dTestImpl<ArmnnType>(
1374 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1383 bool forceNoPadding)
1385 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1386 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1393 bool forceNoPadding)
1395 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1396 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
1403 bool forceNoPadding)
1405 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1406 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1413 bool forceNoPadding)
1415 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1416 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1423 bool forceNoPadding)
1425 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1426 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
1433 bool forceNoPadding)
1435 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1436 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1445 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1446 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1455 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1456 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1465 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1466 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1473 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1474 workloadFactory, memoryManager, tensorHandleFactory);
1482 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1483 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1491 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1492 workloadFactory, memoryManager, tensorHandleFactory);
1500 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1501 workloadFactory, memoryManager, tensorHandleFactory);
1509 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1510 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1518 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1519 workloadFactory, memoryManager, tensorHandleFactory);
1528 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1529 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1538 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1539 workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
1548 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1549 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1556 bool forceNoPadding)
1558 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1559 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1567 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1568 workloadFactory, memoryManager, tensorHandleFactory);
1576 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1577 workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
1585 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1586 workloadFactory, memoryManager, tensorHandleFactory);
1593 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1594 workloadFactory, memoryManager, tensorHandleFactory);
1602 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1603 workloadFactory, memoryManager, tensorHandleFactory);
1611 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1612 workloadFactory, memoryManager, tensorHandleFactory);
1620 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1621 workloadFactory, memoryManager, tensorHandleFactory);
1629 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1630 workloadFactory, memoryManager, tensorHandleFactory);
1638 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1639 workloadFactory, memoryManager, tensorHandleFactory);
1647 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1648 workloadFactory, memoryManager, tensorHandleFactory);
1656 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1657 workloadFactory, memoryManager, tensorHandleFactory);
1665 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1666 workloadFactory, memoryManager, tensorHandleFactory);
1675 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1676 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1685 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1686 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1695 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1696 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1704 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1705 workloadFactory, memoryManager, tensorHandleFactory);
1713 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1714 workloadFactory, memoryManager, tensorHandleFactory);
1722 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1723 workloadFactory, memoryManager, tensorHandleFactory);
1731 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1732 workloadFactory, memoryManager, tensorHandleFactory);
1740 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1741 workloadFactory, memoryManager, tensorHandleFactory);
1749 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1750 workloadFactory, memoryManager, tensorHandleFactory);
1757 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1758 workloadFactory, memoryManager, tensorHandleFactory);
1766 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1767 workloadFactory, memoryManager, tensorHandleFactory);
1775 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1776 workloadFactory, memoryManager, tensorHandleFactory);
1784 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1785 workloadFactory, memoryManager, tensorHandleFactory);
1793 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1794 workloadFactory, memoryManager, tensorHandleFactory);
1802 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1803 workloadFactory, memoryManager, tensorHandleFactory);
1811 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1812 workloadFactory, memoryManager, tensorHandleFactory);
1820 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1821 workloadFactory, memoryManager, tensorHandleFactory);
1829 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1830 workloadFactory, memoryManager, tensorHandleFactory);
1837 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1838 workloadFactory, memoryManager, tensorHandleFactory);
1846 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1847 workloadFactory, memoryManager, tensorHandleFactory);
1855 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1856 workloadFactory, memoryManager, tensorHandleFactory);
1864 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1865 workloadFactory, memoryManager, tensorHandleFactory);
1873 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1874 workloadFactory, memoryManager, tensorHandleFactory);
1882 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1883 workloadFactory, memoryManager, tensorHandleFactory);
1891 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1892 workloadFactory, memoryManager, tensorHandleFactory);
1900 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1901 workloadFactory, memoryManager, tensorHandleFactory);
1909 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1910 workloadFactory, memoryManager, tensorHandleFactory);
1921 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1922 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1933 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1934 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1935 poolingType, 0.1f, 128);
1946 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1947 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
unsigned int GetWidthIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetHeightIndex() const
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
uint32_t m_PadRight
Padding right value in the width dimension.
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields count, but are ignored.
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationOffset(int32_t offset)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
unsigned int GetChannelsIndex() const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)