31 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
39 const std::vector<T>& input,
40 const std::vector<T>& outputExpected,
58 unsigned int outputChannels =
armnn::numeric_cast<
unsigned int>(outputShape[channelsIndex]);
62 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
65 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
68 if(armnn::IsQuantizedType<T>())
70 inputTensorInfo.SetQuantizationScale(qScale);
71 inputTensorInfo.SetQuantizationOffset(qOffset);
79 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
80 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
87 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
88 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
94 result.m_Supported = handle.IsPooling2dSupported(inputTensorInfo,
97 if (!result.m_Supported)
106 inputHandle->Allocate();
107 outputHandle->Allocate();
115 result.m_ActualData = actualOutput;
116 result.m_ExpectedData = outputExpected;
130 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
151 unsigned int inputWidth = 8;
152 unsigned int inputHeight = 13;
153 unsigned int outputWidth =
156 unsigned int outputHeight =
159 unsigned int channels = 2;
160 unsigned int batchSize = 2;
162 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
163 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
166 if(armnn::IsQuantizedType<T>())
169 inputTensorInfo.SetQuantizationOffset(qOffset);
174 std::vector<float> singleChannelData({
175 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
176 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
177 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
178 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
179 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
180 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
181 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
182 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
183 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
184 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
185 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
186 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
187 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
191 std::vector<float> inputData;
192 auto negator = [](
float f) {
return -f; };
195 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
196 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
199 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
200 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
202 auto input = QuantizedVector<T>(inputData, qScale, qOffset);
205 std::vector<T> outputExpected;
208 outputExpected = QuantizedVector<T>(
230 outputExpected = QuantizedVector<T>(
232 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
233 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
234 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
236 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
237 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
238 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
240 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
241 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
242 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
244 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
245 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
246 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
251 return SimplePooling2dTestImpl<ArmnnType>(
252 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
253 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
256 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
276 if(armnn::IsQuantizedType<T>())
284 std::vector<T> inputData(
286 1.0f, 2.0f, 5.0f, 6.0f,
287 3.0f, 4.0f, 7.0f, 8.0f,
288 9.0f, 10.0f, 13.0f, 14.0f,
289 11.0f, 12.0f, 15.0f, 16.0f,
291 17.0f, 18.0f, 21.0f, 22.0f,
292 19.0f, 20.0f, 23.0f, 24.0f,
293 25.0f, 26.0f, 29.0f, 30.0f,
294 27.0f, 28.0f, 31.0f, 32.0f,
298 std::vector<T> outputData(
311 std::vector<T> tmp(inputData.size());
315 std::vector<T> tmp1(outputData.size());
320 return SimplePooling2dTestImpl<ArmnnType>(
321 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
322 inputData, outputData, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
325 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
345 if(armnn::IsQuantizedType<T>())
353 std::vector<T> inputData(
355 2.0f, 2.0f, 6.0f, 6.0f,
356 4.0f, 4.0f, 8.0f, 8.0f,
357 10.0f, 12.0f, 14.0f, 16.0f,
358 10.0f, 12.0f, 16.0f, 14.0f,
360 18.0f, 20.0f, 24.0f, 22.0f,
361 20.0f, 18.0f, 22.0f, 24.0f,
362 26.0f, 28.0f, 0.0f, 0.0f,
363 26.0f, 28.0f, 0.0f, 0.0f,
367 std::vector<T> outputData(
380 std::vector<T> tmp(inputData.size());
384 std::vector<T> tmp1(outputData.size());
389 return SimplePooling2dTestImpl<ArmnnType>(
390 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
391 inputData, outputData, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
394 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
416 if(armnn::IsQuantizedType<T>())
424 std::vector<T> input;
431 std::vector<T> outputExpected;
435 outputExpected.push_back(1);
438 return SimplePooling2dTestImpl<ArmnnType>(
439 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
440 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
443 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
462 std::vector<T> inputData(
464 1.0f, 7.0f, 5.0f, 5.0f,
465 1.0f, 7.0f, 5.0f, 5.0f,
466 3.0f, 3.0f, 1.0f, 1.0f,
467 3.0f, 3.0f, 1.0f, 1.0f,
469 1.0f, 7.0f, 0.0f, 0.0f,
470 1.0f, 7.0f, 2.0f, 0.0f,
471 0.0f, 2.0f, 1.0f, 1.0f,
472 0.0f, 0.0f, 1.0f, 1.0f,
476 std::vector<T> outputData(
489 std::vector<T> tmp(inputData.size());
493 std::vector<T> tmp1(outputData.size());
498 return SimplePooling2dTestImpl<ArmnnType>(
499 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
500 inputData, outputData, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
503 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
518 auto input = QuantizedVector<T>(
520 2.0f, 1.0f, 5.0f, 2.0f,
521 1.0f, 2.0f, 2.0f, 1.0f,
522 5.0f, 4.0f, 1.0f, 5.0f,
523 2.0f, 1.0f, 5.0f, 2.0f,
528 auto outputExpected = QuantizedVector<T>(
535 return SimplePooling2dTestImpl<ArmnnType>(
536 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
537 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
540 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
555 auto input = QuantizedVector<T>(
557 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
558 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
559 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
560 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
561 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
562 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
563 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
564 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
565 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
570 auto outputExpected = QuantizedVector<T>(
578 return SimplePooling2dTestImpl<ArmnnType>(
579 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
580 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
583 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
598 auto input = QuantizedVector<T>(
600 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
601 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
602 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
603 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
604 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
605 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
606 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
611 auto outputExpected = QuantizedVector<T>(
618 return SimplePooling2dTestImpl<ArmnnType>(
619 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
620 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
623 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
638 auto input = QuantizedVector<T>(
640 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
641 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
642 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
643 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
644 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
645 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
646 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
651 auto outputExpected = QuantizedVector<T>(
657 return SimplePooling2dTestImpl<ArmnnType>(
658 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
659 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
662 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
677 auto input = QuantizedVector<T>(
679 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
680 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
681 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
682 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
683 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
684 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
685 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
686 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
687 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
692 auto outputExpected = QuantizedVector<T>(
698 return SimplePooling2dTestImpl<ArmnnType>(
699 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
700 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
703 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
716 descriptor.m_PoolWidth = 2;
717 descriptor.m_PoolHeight = 3;
718 descriptor.m_StrideX = 2;
719 descriptor.m_StrideY = 1;
720 descriptor.m_PadLeft = 2;
721 descriptor.m_PadRight = 0;
722 descriptor.m_PadTop = 1;
723 descriptor.m_PadBottom = 2;
728 auto input = QuantizedVector<T>(
735 auto outputExpected = QuantizedVector<T>(
737 0.0f, 3.0f, 0.0f, 3.0f,
741 return SimplePooling2dTestImpl<ArmnnType>(
742 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
743 input, outputExpected, inputTensorInfo.
GetShape(), outputTensorInfo.
GetShape());
746 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
758 const unsigned int inputWidth = 16;
759 const unsigned int inputHeight = 32;
760 const unsigned int channelCount = 2;
761 const unsigned int batchSize = 5;
763 const unsigned int poolSize = 3;
764 const unsigned int strideX = 2;
765 const unsigned int strideY = 4;
766 const unsigned int padX = 0;
767 const unsigned int padY = 0;
769 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
770 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
775 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
776 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
782 if(armnn::IsQuantizedType<T>())
790 std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 81715);
796 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
797 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
801 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
802 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
814 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
815 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
821 comparisonResult.m_Supported = handle.IsPooling2dSupported(inputTensorInfo,
824 if (!comparisonResult.m_Supported)
826 return comparisonResult;
831 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
832 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
834 std::unique_ptr<armnn::IWorkload> workload
836 std::unique_ptr<armnn::IWorkload> workloadRef
839 outputHandleRef->Allocate();
840 inputHandleRef->Allocate();
841 inputHandle->Allocate();
842 outputHandle->Allocate();
848 workloadRef->Execute();
853 comparisonResult.m_ActualData = actualOutput;
854 comparisonResult.m_ExpectedData = expectedOutput;
856 return comparisonResult;
868 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
888 unsigned int inputWidth = 4;
890 unsigned int inputHeight = 4;
892 unsigned int outputWidth =
895 unsigned int outputHeight =
898 unsigned int channels = 1;
899 unsigned int batchSize = 1;
901 std::vector<float> inputData = {
902 510.0f, 222.0f, 780.0f, 654.0f,
903 141.0f, 276.0f, 15.0f, 546.0f,
904 303.0f, 618.0f, 582.0f, 339.0f,
905 438.0f, 564.0f, 573.0f, 402.0f
909 std::vector<float> expectedOutputDataWithPadding = {
910 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
911 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
914 std::vector<float> expectedOutputDataNoPadding = {
919 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
922 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
925 if(armnn::IsQuantizedType<T>())
928 inputTensorInfo.SetQuantizationOffset(qOffset);
933 auto input = QuantizedVector<T>(inputData, qScale, qOffset);
935 auto outputExpected =
936 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
937 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
939 return SimplePooling2dTestImpl<ArmnnType>(
940 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
941 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
953 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
968 descriptor.
m_PadLeft = (forceNoPadding) ? 0 : 1;
975 unsigned int inputWidth = 3;
976 unsigned int inputHeight = 2;
977 unsigned int outputWidth =
980 unsigned int outputHeight =
983 unsigned int channels = 1;
984 unsigned int batchSize = 1;
986 std::vector<float> inputData = {
991 std::vector<float> expectedOutputDataWithPadding = {
995 std::vector<float> expectedOutputDataNoPadding = {
999 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
1002 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
1005 if(armnn::IsQuantizedType<T>())
1008 inputTensorInfo.SetQuantizationOffset(qOffset);
1013 auto input = QuantizedVector<T>(inputData, qScale, qOffset);
1015 auto outputExpected =
1016 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1017 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
1019 return SimplePooling2dTestImpl<ArmnnType>(
1020 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1021 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1025 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1030 float qScale = 1.0f,
1031 int32_t qOffset = 0)
1047 if(armnn::IsQuantizedType<T>())
1050 inputTensorInfo.SetQuantizationOffset(qOffset);
1055 auto input = QuantizedVector<T>(
1057 -1.0f, -2.0f, 3.0f, 4.0f,
1058 -1.0f, -2.0f, 3.0f, 4.0f,
1059 1.0f, 2.0f, -3.0f, -4.0f,
1060 1.0f, 2.0f, -3.0f, -4.0f,
1064 auto outputExpected = QuantizedVector<T>(
1072 return SimplePooling2dTestImpl<ArmnnType>(
1073 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1074 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1077 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1082 float qScale = 1.0f,
1083 int32_t qOffset = 0)
1099 if(armnn::IsQuantizedType<T>())
1102 inputTensorInfo.SetQuantizationOffset(qOffset);
1107 auto input = QuantizedVector<T>(
1109 -1.0f, -2.0f, 3.0f, 4.0f,
1110 -1.0f, -2.0f, 3.0f, 4.0f,
1111 1.0f, 2.0f, -3.0f, -4.0f,
1112 1.0f, 2.0f, -3.0f, -4.0f,
1116 auto outputExpected = QuantizedVector<T>(
1118 -1.0f, 3.0f, 4.0f, 4.0f,
1119 2.0f, 3.0f, 4.0f, 4.0f,
1120 2.0f, 3.0f, 4.0f, 4.0f,
1121 2.0f, 2.0f, 2.0f, -3.0f,
1125 return SimplePooling2dTestImpl<ArmnnType>(
1126 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1127 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1130 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1135 float qScale = 1.0f,
1136 int32_t qOffset = 0)
1152 if(armnn::IsQuantizedType<T>())
1155 inputTensorInfo.SetQuantizationOffset(qOffset);
1160 auto input = QuantizedVector<T>(
1162 12.0f, 20.0f, 32.0f, 40.0f,
1163 12.0f, 20.0f, 32.0f, 40.0f,
1164 12.0f, 20.0f, 32.0f, 40.0f,
1165 12.0f, 20.0f, 32.0f, 40.0f,
1169 auto outputExpected = QuantizedVector<T>(
1177 return SimplePooling2dTestImpl<ArmnnType>(
1178 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1179 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1182 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1187 float qScale = 1.0f,
1188 int32_t qOffset = 0)
1205 if(armnn::IsQuantizedType<T>())
1208 inputTensorInfo.SetQuantizationOffset(qOffset);
1213 auto input = QuantizedVector<T>(
1215 1.0f, 2.0f, 3.0f, 4.0f,
1216 1.0f, 2.0f, 3.0f, 4.0f,
1217 1.0f, 2.0f, 3.0f, 4.0f,
1218 1.0f, 2.0f, 3.0f, 4.0f,
1222 auto outputExpected = QuantizedVector<T>(
1229 return SimplePooling2dTestImpl<ArmnnType>(
1230 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1231 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1234 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1239 float qScale = 1.0f,
1240 int32_t qOffset = 0)
1256 if(armnn::IsQuantizedType<T>())
1259 inputTensorInfo.SetQuantizationOffset(qOffset);
1264 auto input = QuantizedVector<T>(
1266 9.0f, 27.0f, 18.0f, 36.0f,
1267 18.0f, 9.0f, 18.0f, 9.0f,
1268 27.0f, 18.0f, 9.0f, 27.0f,
1269 9.0f, 27.0f, 9.0f, 18.0f,
1273 auto outputExpected = QuantizedVector<T>(
1275 7.0f, 11.0f, 13.0f, 9.0f,
1276 12.0f, 17.0f, 19.0f, 13.0f,
1277 12.0f, 16.0f, 16.0f, 10.0f,
1278 9.0f, 11.0f, 12.0f, 7.0f,
1282 return SimplePooling2dTestImpl<ArmnnType>(
1283 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1284 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1287 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1292 float qScale = 1.0f,
1293 int32_t qOffset = 0)
1309 if(armnn::IsQuantizedType<T>())
1312 inputTensorInfo.SetQuantizationOffset(qOffset);
1317 auto input = QuantizedVector<T>(
1319 2.0f, 4.0f, 8.0f, 16.0f,
1320 4.0f, 2.0f, 2.0f, 4.0f,
1321 8.0f, 2.0f, 4.0f, 2.0f,
1322 16.0f, 2.0f, 2.0f, 8.0f,
1326 auto outputExpected = QuantizedVector<T>(
1328 1.0f, 4.4721f, 8.0f,
1329 4.4721f, 2.6457f, 2.236f,
1330 8.0f, 1.4142f, 4.0f,
1334 return SimplePooling2dTestImpl<ArmnnType>(
1335 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1336 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1339 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
1344 float qScale = 1.0f,
1345 int32_t qOffset = 0)
1361 if(armnn::IsQuantizedType<T>())
1364 inputTensorInfo.SetQuantizationOffset(qOffset);
1369 auto input = QuantizedVector<T>(
1371 1.0f, 2.0f, 3.0f, 4.0f,
1372 1.0f, 2.0f, 3.0f, 4.0f,
1373 1.0f, 2.0f, 3.0f, 4.0f,
1374 1.0f, 2.0f, 3.0f, 4.0f,
1378 auto outputExpected = QuantizedVector<T>(
1380 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1381 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1382 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1383 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1387 return SimplePooling2dTestImpl<ArmnnType>(
1388 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1389 input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.
GetShape());
1398 bool forceNoPadding)
1400 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1401 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1408 bool forceNoPadding)
1410 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1411 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
1418 bool forceNoPadding)
1420 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1421 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1428 bool forceNoPadding)
1430 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1431 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1438 bool forceNoPadding)
1440 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1441 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
1448 bool forceNoPadding)
1450 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1451 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1460 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1461 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1470 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1471 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1480 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1481 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1488 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1489 workloadFactory, memoryManager, tensorHandleFactory);
1497 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1498 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1506 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1507 workloadFactory, memoryManager, tensorHandleFactory);
1515 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1516 workloadFactory, memoryManager, tensorHandleFactory);
1524 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1525 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1533 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1534 workloadFactory, memoryManager, tensorHandleFactory);
1543 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1544 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1553 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1554 workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
1563 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1564 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1571 bool forceNoPadding)
1573 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1574 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1582 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1583 workloadFactory, memoryManager, tensorHandleFactory);
1591 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1592 workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
1600 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1601 workloadFactory, memoryManager, tensorHandleFactory);
1608 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1609 workloadFactory, memoryManager, tensorHandleFactory);
1617 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1618 workloadFactory, memoryManager, tensorHandleFactory);
1626 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1627 workloadFactory, memoryManager, tensorHandleFactory);
1635 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1636 workloadFactory, memoryManager, tensorHandleFactory);
1644 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1645 workloadFactory, memoryManager, tensorHandleFactory);
1653 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1654 workloadFactory, memoryManager, tensorHandleFactory);
1662 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1663 workloadFactory, memoryManager, tensorHandleFactory);
1671 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1672 workloadFactory, memoryManager, tensorHandleFactory);
1680 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1681 workloadFactory, memoryManager, tensorHandleFactory);
1690 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1691 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1700 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1701 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1710 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1711 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1719 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1720 workloadFactory, memoryManager, tensorHandleFactory);
1728 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1729 workloadFactory, memoryManager, tensorHandleFactory);
1737 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1738 workloadFactory, memoryManager, tensorHandleFactory);
1746 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1747 workloadFactory, memoryManager, tensorHandleFactory);
1755 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1756 workloadFactory, memoryManager, tensorHandleFactory);
1764 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1765 workloadFactory, memoryManager, tensorHandleFactory);
1772 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1773 workloadFactory, memoryManager, tensorHandleFactory);
1781 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1782 workloadFactory, memoryManager, tensorHandleFactory);
1790 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1791 workloadFactory, memoryManager, tensorHandleFactory);
1799 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1800 workloadFactory, memoryManager, tensorHandleFactory);
1808 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1809 workloadFactory, memoryManager, tensorHandleFactory);
1817 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1818 workloadFactory, memoryManager, tensorHandleFactory);
1826 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1827 workloadFactory, memoryManager, tensorHandleFactory);
1835 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1836 workloadFactory, memoryManager, tensorHandleFactory);
1844 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1845 workloadFactory, memoryManager, tensorHandleFactory);
1852 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1853 workloadFactory, memoryManager, tensorHandleFactory);
1861 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1862 workloadFactory, memoryManager, tensorHandleFactory);
1870 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1871 workloadFactory, memoryManager, tensorHandleFactory);
1879 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1880 workloadFactory, memoryManager, tensorHandleFactory);
1888 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1889 workloadFactory, memoryManager, tensorHandleFactory);
1897 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1898 workloadFactory, memoryManager, tensorHandleFactory);
1906 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1907 workloadFactory, memoryManager, tensorHandleFactory);
1915 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1916 workloadFactory, memoryManager, tensorHandleFactory);
1924 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1925 workloadFactory, memoryManager, tensorHandleFactory);
1936 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1937 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1948 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1949 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1950 poolingType, 0.1f, 128);
1961 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1962 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
unsigned int GetWidthIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetHeightIndex() const
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
uint32_t m_PadRight
Padding right value in the width dimension.
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields count, but are ignored.
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationOffset(int32_t offset)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
unsigned int GetChannelsIndex() const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int GetNumElements() const
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)