26 #include <boost/numeric/conversion/cast.hpp> 33 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
40 const boost::multi_array<T, 4>& input,
41 const boost::multi_array<T, 4>& outputExpected)
52 unsigned int inputChannels =
boost::numeric_cast<
unsigned int>(input.shape()[channelsIndex]);
55 unsigned int outputHeight =
boost::numeric_cast<
unsigned int>(outputExpected.shape()[heightIndex]);
56 unsigned int outputWidth =
boost::numeric_cast<
unsigned int>(outputExpected.shape()[widthIndex]);
57 unsigned int outputChannels =
boost::numeric_cast<
unsigned int>(outputExpected.shape()[channelsIndex]);
58 unsigned int outputBatchSize =
boost::numeric_cast<
unsigned int>(outputExpected.shape()[0]);
61 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
64 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
67 if(armnn::IsQuantizedType<T>())
69 inputTensorInfo.SetQuantizationScale(qScale);
70 inputTensorInfo.SetQuantizationOffset(qOffset);
78 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
79 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
87 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
88 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
92 const size_t reasonIfUnsupportedMaxLen = 255;
93 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
95 queueDescriptor.m_Parameters,
96 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
97 if (!result.supported)
102 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(queueDescriptor, workloadInfo);
104 inputHandle->Allocate();
105 outputHandle->Allocate();
113 result.outputExpected = outputExpected;
127 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
147 unsigned int inputWidth = 8;
148 unsigned int inputHeight = 13;
149 unsigned int outputWidth =
152 unsigned int outputHeight =
155 unsigned int channels = 2;
156 unsigned int batchSize = 2;
158 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
159 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
162 if(armnn::IsQuantizedType<T>())
165 inputTensorInfo.SetQuantizationOffset(qOffset);
170 std::vector<float> singleChannelData({
171 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
172 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
173 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
174 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
175 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
176 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
177 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
178 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
179 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
180 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
181 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
182 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
183 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
187 std::vector<float> inputData;
188 auto negator = [](
float f) {
return -f; };
191 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
192 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
195 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
196 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
198 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
201 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
202 boost::multi_array<T, 4> outputExpected(shape);
205 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
227 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
229 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
230 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
231 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
233 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
234 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
235 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
237 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
238 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
239 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
241 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
242 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
243 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
248 return SimplePooling2dTestImpl<ArmnnType>(
249 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
252 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
271 if(armnn::IsQuantizedType<T>())
279 std::vector<T> inputData(
281 1.0f, 2.0f, 5.0f, 6.0f,
282 3.0f, 4.0f, 7.0f, 8.0f,
283 9.0f, 10.0f, 13.0f, 14.0f,
284 11.0f, 12.0f, 15.0f, 16.0f,
286 17.0f, 18.0f, 21.0f, 22.0f,
287 19.0f, 20.0f, 23.0f, 24.0f,
288 25.0f, 26.0f, 29.0f, 30.0f,
289 27.0f, 28.0f, 31.0f, 32.0f,
293 std::vector<T> outputData(
306 std::vector<T> tmp(inputData.size());
310 std::vector<T> tmp1(outputData.size());
315 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
317 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
319 return SimplePooling2dTestImpl<ArmnnType>(
320 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
323 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
342 if(armnn::IsQuantizedType<T>())
350 std::vector<T> inputData(
352 2.0f, 2.0f, 6.0f, 6.0f,
353 4.0f, 4.0f, 8.0f, 8.0f,
354 10.0f, 12.0f, 14.0f, 16.0f,
355 10.0f, 12.0f, 16.0f, 14.0f,
357 18.0f, 20.0f, 24.0f, 22.0f,
358 20.0f, 18.0f, 22.0f, 24.0f,
359 26.0f, 28.0f, 0.0f, 0.0f,
360 26.0f, 28.0f, 0.0f, 0.0f,
364 std::vector<T> outputData(
377 std::vector<T> tmp(inputData.size());
381 std::vector<T> tmp1(outputData.size());
386 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
388 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
390 return SimplePooling2dTestImpl<ArmnnType>(
391 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
394 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
415 if(armnn::IsQuantizedType<T>())
423 std::vector<T> inputVec;
427 inputVec.push_back(1);
430 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
432 std::vector<T> outputVec;
436 outputVec.push_back(1);
439 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
441 return SimplePooling2dTestImpl<ArmnnType>(
442 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
445 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
463 std::vector<T> inputData(
465 1.0f, 7.0f, 5.0f, 5.0f,
466 1.0f, 7.0f, 5.0f, 5.0f,
467 3.0f, 3.0f, 1.0f, 1.0f,
468 3.0f, 3.0f, 1.0f, 1.0f,
470 1.0f, 7.0f, 0.0f, 0.0f,
471 1.0f, 7.0f, 2.0f, 0.0f,
472 0.0f, 2.0f, 1.0f, 1.0f,
473 0.0f, 0.0f, 1.0f, 1.0f,
477 std::vector<T> outputData(
490 std::vector<T> tmp(inputData.size());
494 std::vector<T> tmp1(outputData.size());
499 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
501 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
503 return SimplePooling2dTestImpl<ArmnnType>(
504 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
507 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
521 auto input = MakeTensor<T, 4>(inputTensorInfo,
523 2.0f, 1.0f, 5.0f, 2.0f,
524 1.0f, 2.0f, 2.0f, 1.0f,
525 5.0f, 4.0f, 1.0f, 5.0f,
526 2.0f, 1.0f, 5.0f, 2.0f,
531 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
538 return SimplePooling2dTestImpl<ArmnnType>(
539 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
542 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
556 auto input = MakeTensor<T, 4>(inputTensorInfo,
558 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
559 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
560 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
561 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
562 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
563 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
564 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
565 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
566 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
571 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
579 return SimplePooling2dTestImpl<ArmnnType>(
580 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
583 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
597 auto input = MakeTensor<T, 4>(inputTensorInfo,
599 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
602 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
603 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
604 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
605 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
610 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
617 return SimplePooling2dTestImpl<ArmnnType>(
618 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
621 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
635 auto input = MakeTensor<T, 4>(inputTensorInfo,
637 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
638 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
639 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
640 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
641 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
642 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
643 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
648 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
654 return SimplePooling2dTestImpl<ArmnnType>(
655 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
658 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
672 auto input = MakeTensor<T, 4>(inputTensorInfo,
674 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
675 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
676 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
677 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
678 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
679 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
680 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
681 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
682 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
687 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
693 return SimplePooling2dTestImpl<ArmnnType>(
694 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
697 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
709 descriptor.m_PoolWidth = 2;
710 descriptor.m_PoolHeight = 3;
711 descriptor.m_StrideX = 2;
712 descriptor.m_StrideY = 1;
713 descriptor.m_PadLeft = 2;
714 descriptor.m_PadRight = 0;
715 descriptor.m_PadTop = 1;
716 descriptor.m_PadBottom = 2;
721 auto input = MakeTensor<T, 4>(inputTensorInfo,
728 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
730 0.0f, 3.0f, 0.0f, 3.0f,
734 return SimplePooling2dTestImpl<ArmnnType>(
735 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
738 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
748 const unsigned int inputWidth = 16;
749 const unsigned int inputHeight = 32;
750 const unsigned int channelCount = 2;
751 const unsigned int batchSize = 5;
753 const unsigned int poolSize = 3;
754 const unsigned int strideX = 2;
755 const unsigned int strideY = 4;
756 const unsigned int padX = 0;
757 const unsigned int padY = 0;
759 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
760 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
765 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
766 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
772 if(armnn::IsQuantizedType<T>())
780 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
785 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
786 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
791 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
792 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
793 data.m_Parameters.m_PoolType = poolingType;
794 data.m_Parameters.m_PoolWidth = poolSize;
795 data.m_Parameters.m_PoolHeight = poolSize;
796 data.m_Parameters.m_StrideX = strideX;
797 data.m_Parameters.m_StrideY = strideY;
798 data.m_Parameters.m_PadLeft = padX;
799 data.m_Parameters.m_PadRight = padX;
800 data.m_Parameters.m_PadTop = padY;
801 data.m_Parameters.m_PadBottom = padY;
805 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.
CreateTensorHandle(outputTensorInfo);
806 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.
CreateTensorHandle(inputTensorInfo);
811 const size_t reasonIfUnsupportedMaxLen = 255;
812 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
815 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
816 if (!comparisonResult.supported)
818 return comparisonResult;
823 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
824 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
826 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreatePooling2d(data, info);
827 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.
CreatePooling2d(refData, refInfo);
829 outputHandleRef->Allocate();
830 inputHandleRef->Allocate();
831 inputHandle->Allocate();
832 outputHandle->Allocate();
838 workloadRef->Execute();
843 return comparisonResult;
855 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
874 unsigned int inputWidth = 4;
876 unsigned int inputHeight = 4;
878 unsigned int outputWidth =
881 unsigned int outputHeight =
884 unsigned int channels = 1;
885 unsigned int batchSize = 1;
887 std::vector<float> inputData = {
888 510.0f, 222.0f, 780.0f, 654.0f,
889 141.0f, 276.0f, 15.0f, 546.0f,
890 303.0f, 618.0f, 582.0f, 339.0f,
891 438.0f, 564.0f, 573.0f, 402.0f
895 std::vector<float> expectedOutputDataWithPadding = {
896 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
897 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
900 std::vector<float> expectedOutputDataNoPadding = {
905 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
908 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
911 if(armnn::IsQuantizedType<T>())
914 inputTensorInfo.SetQuantizationOffset(qOffset);
919 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
921 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
922 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
923 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
925 return SimplePooling2dTestImpl<ArmnnType>(
926 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
938 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
952 descriptor.
m_PadLeft = (forceNoPadding) ? 0 : 1;
959 unsigned int inputWidth = 3;
960 unsigned int inputHeight = 2;
961 unsigned int outputWidth =
964 unsigned int outputHeight =
967 unsigned int channels = 1;
968 unsigned int batchSize = 1;
970 std::vector<float> inputData = {
975 std::vector<float> expectedOutputDataWithPadding = {
979 std::vector<float> expectedOutputDataNoPadding = {
983 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
986 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
989 if(armnn::IsQuantizedType<T>())
992 inputTensorInfo.SetQuantizationOffset(qOffset);
997 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
999 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1000 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1001 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
1003 return SimplePooling2dTestImpl<ArmnnType>(
1004 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1008 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1012 float qScale = 1.0f,
1013 int32_t qOffset = 0)
1029 if(armnn::IsQuantizedType<T>())
1032 inputTensorInfo.SetQuantizationOffset(qOffset);
1037 auto input = MakeTensor<T, 4>(inputTensorInfo,
1038 QuantizedVector<T>({
1039 -1.0f, -2.0f, 3.0f, 4.0f,
1040 -1.0f, -2.0f, 3.0f, 4.0f,
1041 1.0f, 2.0f, -3.0f, -4.0f,
1042 1.0f, 2.0f, -3.0f, -4.0f,
1046 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1047 QuantizedVector<T>({
1054 return SimplePooling2dTestImpl<ArmnnType>(
1055 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1058 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1062 float qScale = 1.0f,
1063 int32_t qOffset = 0)
1079 if(armnn::IsQuantizedType<T>())
1082 inputTensorInfo.SetQuantizationOffset(qOffset);
1087 auto input = MakeTensor<T, 4>(inputTensorInfo,
1088 QuantizedVector<T>({
1089 -1.0f, -2.0f, 3.0f, 4.0f,
1090 -1.0f, -2.0f, 3.0f, 4.0f,
1091 1.0f, 2.0f, -3.0f, -4.0f,
1092 1.0f, 2.0f, -3.0f, -4.0f,
1096 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1097 QuantizedVector<T>({
1098 -1.0f, 3.0f, 4.0f, 4.0f,
1099 2.0f, 3.0f, 4.0f, 4.0f,
1100 2.0f, 3.0f, 4.0f, 4.0f,
1101 2.0f, 2.0f, 2.0f, -3.0f,
1105 return SimplePooling2dTestImpl<ArmnnType>(
1106 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1109 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1113 float qScale = 1.0f,
1114 int32_t qOffset = 0)
1130 if(armnn::IsQuantizedType<T>())
1133 inputTensorInfo.SetQuantizationOffset(qOffset);
1138 auto input = MakeTensor<T, 4>(inputTensorInfo,
1139 QuantizedVector<T>({
1140 12.0f, 20.0f, 32.0f, 40.0f,
1141 12.0f, 20.0f, 32.0f, 40.0f,
1142 12.0f, 20.0f, 32.0f, 40.0f,
1143 12.0f, 20.0f, 32.0f, 40.0f,
1147 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1148 QuantizedVector<T>({
1155 return SimplePooling2dTestImpl<ArmnnType>(
1156 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1159 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1163 float qScale = 1.0f,
1164 int32_t qOffset = 0)
1181 if(armnn::IsQuantizedType<T>())
1184 inputTensorInfo.SetQuantizationOffset(qOffset);
1189 auto input = MakeTensor<T, 4>(inputTensorInfo,
1190 QuantizedVector<T>({
1191 1.0f, 2.0f, 3.0f, 4.0f,
1192 1.0f, 2.0f, 3.0f, 4.0f,
1193 1.0f, 2.0f, 3.0f, 4.0f,
1194 1.0f, 2.0f, 3.0f, 4.0f,
1198 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1199 QuantizedVector<T>({
1205 return SimplePooling2dTestImpl<ArmnnType>(
1206 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1209 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1213 float qScale = 1.0f,
1214 int32_t qOffset = 0)
1230 if(armnn::IsQuantizedType<T>())
1233 inputTensorInfo.SetQuantizationOffset(qOffset);
1238 auto input = MakeTensor<T, 4>(inputTensorInfo,
1239 QuantizedVector<T>({
1240 9.0f, 27.0f, 18.0f, 36.0f,
1241 18.0f, 9.0f, 18.0f, 9.0f,
1242 27.0f, 18.0f, 9.0f, 27.0f,
1243 9.0f, 27.0f, 9.0f, 18.0f,
1247 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1248 QuantizedVector<T>({
1249 7.0f, 11.0f, 13.0f, 9.0f,
1250 12.0f, 17.0f, 19.0f, 13.0f,
1251 12.0f, 16.0f, 16.0f, 10.0f,
1252 9.0f, 11.0f, 12.0f, 7.0f,
1256 return SimplePooling2dTestImpl<ArmnnType>(
1257 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1260 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1264 float qScale = 1.0f,
1265 int32_t qOffset = 0)
1281 if(armnn::IsQuantizedType<T>())
1284 inputTensorInfo.SetQuantizationOffset(qOffset);
1289 auto input = MakeTensor<T, 4>(inputTensorInfo,
1290 QuantizedVector<T>({
1291 2.0f, 4.0f, 8.0f, 16.0f,
1292 4.0f, 2.0f, 2.0f, 4.0f,
1293 8.0f, 2.0f, 4.0f, 2.0f,
1294 16.0f, 2.0f, 2.0f, 8.0f,
1298 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1299 QuantizedVector<T>({
1300 1.0f, 4.4721f, 8.0f,
1301 4.4721f, 2.6457f, 2.236f,
1302 8.0f, 1.4142f, 4.0f,
1306 return SimplePooling2dTestImpl<ArmnnType>(
1307 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1310 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1314 float qScale = 1.0f,
1315 int32_t qOffset = 0)
1331 if(armnn::IsQuantizedType<T>())
1334 inputTensorInfo.SetQuantizationOffset(qOffset);
1339 auto input = MakeTensor<T, 4>(inputTensorInfo,
1340 QuantizedVector<T>({
1341 1.0f, 2.0f, 3.0f, 4.0f,
1342 1.0f, 2.0f, 3.0f, 4.0f,
1343 1.0f, 2.0f, 3.0f, 4.0f,
1344 1.0f, 2.0f, 3.0f, 4.0f,
1348 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1349 QuantizedVector<T>({
1350 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1351 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1352 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1353 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1357 return SimplePooling2dTestImpl<ArmnnType>(
1358 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1366 bool forceNoPadding)
1368 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1369 workloadFactory, memoryManager, forceNoPadding);
1375 bool forceNoPadding)
1377 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1378 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1384 bool forceNoPadding)
1386 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1387 workloadFactory, memoryManager, forceNoPadding);
1393 bool forceNoPadding)
1395 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1396 workloadFactory, memoryManager, forceNoPadding);
1402 bool forceNoPadding)
1404 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1405 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1411 bool forceNoPadding)
1413 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1414 workloadFactory, memoryManager, forceNoPadding);
1422 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1430 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1438 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1444 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1451 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1452 workloadFactory, memoryManager, 1.0f, -5);
1459 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1460 workloadFactory, memoryManager);
1467 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1474 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1475 workloadFactory, memoryManager, 1.0f, -5);
1482 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1483 workloadFactory, memoryManager);
1491 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1499 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1500 workloadFactory, memoryManager, dataLayout, 0.5, -1);
1508 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1509 workloadFactory, memoryManager, dataLayout);
1515 bool forceNoPadding)
1517 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1518 workloadFactory, memoryManager, forceNoPadding);
1525 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1532 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1533 workloadFactory, memoryManager, 0.5, -1);
1540 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1541 workloadFactory, memoryManager);
1547 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1554 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1555 workloadFactory, memoryManager);
1562 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1563 workloadFactory, memoryManager);
1570 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1571 workloadFactory, memoryManager);
1578 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1579 workloadFactory, memoryManager);
1586 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1587 workloadFactory, memoryManager);
1594 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1601 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1602 workloadFactory, memoryManager);
1609 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1610 workloadFactory, memoryManager);
1618 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1626 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1634 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1641 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1648 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1655 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1662 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1669 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1676 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1682 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1689 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1696 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1703 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1710 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1717 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1724 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1731 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1738 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1744 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1751 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1758 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1765 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1772 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1779 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1786 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1793 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1800 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1809 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1810 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1819 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1820 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1829 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1830 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
unsigned int GetWidthIndex() const
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorShape & GetShape() const
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetHeightIndex() const
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadRight
Padding right value in the width dimension.
#define ARMNN_NO_DEPRECATE_WARN_END
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
The padding fields count, but are ignored.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetChannelsIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)