aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-11-11 11:33:03 +0000
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-11-11 19:20:08 +0000
commit8081536d24291794b4e189e6d5532d913a4525cb (patch)
tree7b30bfc8f3f619377f0e1fa4f0f81943e62386ba
parent3dda41daba5210ebd842115471ecadeb2fb3ae3b (diff)
downloadarmnn-8081536d24291794b4e189e6d5532d913a4525cb.tar.gz
IVGCVSW-5387 TfLiteDelegate: Implement the Pooling operators
* Add support for AveragePool2d and L2Pool2d operators * Unit tests Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: Ida3c2e80120bce2991035f143e9eb5b9480b0e4b
-rw-r--r--delegate/src/Pooling.hpp6
-rw-r--r--delegate/src/test/Pooling2dTest.cpp1076
2 files changed, 1025 insertions, 57 deletions
diff --git a/delegate/src/Pooling.hpp b/delegate/src/Pooling.hpp
index 28e26f6504..b3a2af8900 100644
--- a/delegate/src/Pooling.hpp
+++ b/delegate/src/Pooling.hpp
@@ -72,6 +72,12 @@ TfLiteStatus VisitPoolingOperator(DelegateData& delegateData,
armnn::PoolingAlgorithm poolingAlgorithm;
switch(tfLitePoolingOperatorCode)
{
+ case kTfLiteBuiltinAveragePool2d:
+ poolingAlgorithm = armnn::PoolingAlgorithm::Average;
+ break;
+ case kTfLiteBuiltinL2Pool2d:
+ poolingAlgorithm = armnn::PoolingAlgorithm::L2;
+ break;
case kTfLiteBuiltinMaxPool2d:
poolingAlgorithm = armnn::PoolingAlgorithm::Max;
break;
diff --git a/delegate/src/test/Pooling2dTest.cpp b/delegate/src/test/Pooling2dTest.cpp
index 3671b0b172..bc7bf384aa 100644
--- a/delegate/src/test/Pooling2dTest.cpp
+++ b/delegate/src/test/Pooling2dTest.cpp
@@ -241,121 +241,1083 @@ void MaxPool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
1);
}
-TEST_SUITE("Pooling2dTest")
+void MaxPool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
-TEST_CASE ("MaxPooling2d_FP32_PaddingValid_GpuAcc_Test")
+ std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
+ 8, 12, 15, 2,
+ 3, 4, 1, 11 };
+
+ std::vector<uint8_t> expectedOutputValues = { 12, 15, 4, 11 };
+
+ Pooling2dTest<uint8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+ ::tflite::TensorType_UINT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 2,
+ 2,
+ tflite::ActivationFunctionType_NONE,
+ 2.5f,
+ 1);
+}
+
+void MaxPool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dFP32PaddingValidTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+ std::vector<uint8_t> inputValues = { 12, 8, 10, 15,
+ 8, 5, 7, 2,
+ 3, 4, 1, 11 };
+
+ std::vector<uint8_t> expectedOutputValues = { 12, 10, 15, 8, 7, 11 };
+
+ Pooling2dTest<uint8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+ ::tflite::TensorType_UINT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 1,
+ 1,
+ 2,
+ 2,
+ ::tflite::ActivationFunctionType_RELU,
+ 2.0f,
+ 1);
}
-TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuAcc_Test")
+void MaxPool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dFP32PaddingValidTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<int16_t> inputValues = { -5, 8, -10, 7,
+ 8, 12, -15, 2,
+ 3, -4, -1, -11 };
+
+ std::vector<int16_t> expectedOutputValues = { 12, 7, 3, -1 };
+
+ Pooling2dTest<int16_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+ ::tflite::TensorType_INT16,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 2,
+ 2,
+ tflite::ActivationFunctionType_NONE,
+ 2.5f,
+ 0);
}
-TEST_CASE ("MaxPooling2d_Int8_PaddingValid_GpuAcc_Test")
+void MaxPool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dInt8PaddingValidTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+ std::vector<int16_t> inputValues = { -5, -8, -10, 7,
+ -8, -12, -15, 2,
+ 3, -4, -1, -11 };
+
+ std::vector<int16_t> expectedOutputValues = { 0, 0, 7, 3, 0, 2 };
+
+ Pooling2dTest<int16_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+ ::tflite::TensorType_INT16,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 1,
+ 1,
+ 2,
+ 2,
+ ::tflite::ActivationFunctionType_RELU,
+ 2.0f,
+ 0);
}
-TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuAcc_Test")
+void AveragePool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dInt8PaddingValidTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ std::vector<float> expectedOutputValues = { 5.75f, -4.0f };
+
+ Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 2,
+ 2,
+ 2,
+ 2);
}
-TEST_CASE ("MaxPooling2d_FP32_PaddingSame_GpuAcc_Test")
+void AveragePool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dFP32PaddingSameTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+ std::vector<int8_t > inputValues = { -5, 8, -10, 7,
+ 8, 12, -15, 2,
+ 3, -4, -1, -11 };
+
+ std::vector<int8_t> expectedOutputValues = { 6, -4 };
+
+ Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_INT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 2,
+ 2,
+ 2,
+ 2,
+ tflite::ActivationFunctionType_NONE,
+ 2.5f,
+ 1);
}
-TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuAcc_Test")
+void AveragePool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dFP32PaddingSameTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ std::vector<float> expectedOutputValues = { 5.75f, -4.0f, -0.5f, -6.0f };
+
+ Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 2,
+ 2);
}
-TEST_CASE ("MaxPooling2d_Int8_PaddingSame_GpuAcc_Test")
+void AveragePool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dInt8PaddingSameTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<int8_t > inputValues = { -5, 8, -10, 7,
+ 8, 12, -15, 2,
+ 3, -4, -1, -11 };
+
+ std::vector<int8_t> expectedOutputValues = { 6, -4, -1, -6 };
+
+ Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_INT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 2,
+ 2,
+ tflite::ActivationFunctionType_NONE,
+ 2.5f,
+ 1);
}
-TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuAcc_Test")
+void AveragePool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dInt8PaddingSameTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ -8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, 11.0f };
+
+ std::vector<float> expectedOutputValues = { 1.75f, 0.0f, 0.0f, 0.75f, 0.0f, 0.0f };
+
+ Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 1,
+ 1,
+ 2,
+ 2,
+ ::tflite::ActivationFunctionType_RELU);
}
-TEST_CASE ("MaxPooling2d_FP32_Relu_GpuAcc_Test")
+void AveragePool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dFP32ReluTest(backends);
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+ std::vector<int8_t> inputValues = { -5, 8, -10, 7,
+ -8, 12, -15, 2,
+ 3, -4, -1, 11 };
+
+ std::vector<int8_t> expectedOutputValues = { 2, 1, 1, 1, 1, 1 };
+
+ Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_INT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 1,
+ 1,
+ 2,
+ 2,
+ ::tflite::ActivationFunctionType_RELU,
+ 2.5f,
+ 1);
}
-TEST_CASE ("MaxPooling2d_FP32_Relu_CpuAcc_Test")
+void AveragePool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ -8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, 11.0f };
+
+ std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 3.0f, 0.0f };
+
+ Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 1,
+ 1,
+ ::tflite::ActivationFunctionType_RELU6);
+}
+
+void AveragePool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<int8_t> inputValues = { -5, 8, -10, 7,
+ -8, 12, -15, 2,
+ 3, -4, -1, 11 };
+
+ std::vector<int8_t> expectedOutputValues = { 1, 1, 3, 1 };
+
+ Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_INT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 1,
+ 1,
+ ::tflite::ActivationFunctionType_RELU6,
+ 2.5f,
+ 1);
+}
+
+void AveragePool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
+ 8, 12, 15, 2,
+ 3, 4, 1, 11 };
+
+ std::vector<uint8_t> expectedOutputValues = { 8, 9, 4, 6 };
+
+ Pooling2dTest<uint8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_UINT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 2,
+ 2,
+ tflite::ActivationFunctionType_NONE,
+ 2.5f,
+ 1);
+}
+
+void AveragePool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+ std::vector<uint8_t> inputValues = { 12, 8, 10, 15,
+ 8, 5, 7, 2,
+ 3, 4, 1, 11 };
+
+ std::vector<uint8_t> expectedOutputValues = { 8, 8, 9, 5, 4, 5 };
+
+ Pooling2dTest<uint8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_UINT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 1,
+ 1,
+ 2,
+ 2,
+ ::tflite::ActivationFunctionType_RELU,
+ 2.0f,
+ 1);
+}
+
+void AveragePool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<int16_t > inputValues = { -5, 8, -10, 7,
+ 8, 12, -15, 2,
+ 3, -4, -1, -11 };
+
+ std::vector<int16_t> expectedOutputValues = { 6, -4, -1, -6 };
+
+ Pooling2dTest<int16_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_INT16,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 2,
+ 2,
+ tflite::ActivationFunctionType_NONE,
+ 2.5f,
+ 0);
+}
+
+void AveragePool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+ std::vector<int16_t> inputValues = { -5, 8, -10, 7,
+ -8, 12, -15, 2,
+ 3, -4, -1, 11 };
+
+ std::vector<int16_t> expectedOutputValues = { 2, 0, 0, 1, 0, 0 };
+
+ Pooling2dTest<int16_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+ ::tflite::TensorType_INT16,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 1,
+ 1,
+ 2,
+ 2,
+ ::tflite::ActivationFunctionType_RELU,
+ 2.5f,
+ 0);
+}
+
+void L2Pool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ std::vector<float> expectedOutputValues = { 8.616844f, 9.721111f };
+
+ Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 2,
+ 2,
+ 2,
+ 2);
+}
+
+void L2Pool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ std::vector<float> expectedOutputValues = { 8.616844f, 9.721111f, 3.535534f, 7.81025f };
+
+ Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 2,
+ 2);
+}
+
+void L2Pool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ -8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, 11.0f };
+
+ std::vector<float> expectedOutputValues = { 8.616844f, 11.543396f, 9.721111f, 7.632169f, 9.8234415f, 9.367497f };
+
+ Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_VALID,
+ 1,
+ 1,
+ 2,
+ 2,
+ ::tflite::ActivationFunctionType_RELU);
+}
+
+void L2Pool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ -8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, 11.0f };
+
+ std::vector<float> expectedOutputValues = { 5.0f, 6.0f, 3.0f, 1.0f };
+
+ Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ ::tflite::Padding_SAME,
+ 2,
+ 2,
+ 1,
+ 1,
+ ::tflite::ActivationFunctionType_RELU6);
+}
+
+TEST_SUITE("Pooling2d_GpuAccTests")
+{
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_GpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
MaxPool2dFP32ReluTest(backends);
}
TEST_CASE ("MaxPooling2d_Int8_Relu_GpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
MaxPool2dInt8ReluTest(backends);
}
+TEST_CASE ("MaxPooling2d_FP32_Relu6_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu6_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_Relu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int16_PaddingSame_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dInt16PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int16_Relu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ MaxPool2dInt16ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingValid_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingValid_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingSame_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingSame_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu6_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu6_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_Relu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int16_PaddingSame_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dInt16PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int16_Relu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ AveragePool2dInt16ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingValid_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ L2Pool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingSame_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ L2Pool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ L2Pool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu6_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ L2Pool2dFP32Relu6Test(backends);
+}
+
+} // TEST_SUITE("Pooling2d_GpuAccTests")
+
+TEST_SUITE("Pooling2d_CpuAccTests")
+{
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dFP32ReluTest(backends);
+}
+
TEST_CASE ("MaxPooling2d_Int8_Relu_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
MaxPool2dInt8ReluTest(backends);
}
-TEST_CASE ("MaxPooling2d_FP32_Relu6_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuAcc_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
MaxPool2dFP32Relu6Test(backends);
}
-TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_Relu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int16_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dInt16PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int16_Relu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ MaxPool2dInt16ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingValid_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingValid_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu6_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu6_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_Relu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int16_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dInt16PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int16_Relu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ AveragePool2dInt16ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingValid_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ L2Pool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingSame_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ L2Pool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ L2Pool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu6_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ L2Pool2dFP32Relu6Test(backends);
+}
+
+} // TEST_SUITE("Pooling2d_CpuAccTests")
+
+TEST_SUITE("Pooling2d_CpuRefTests")
+{
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
MaxPool2dFP32Relu6Test(backends);
}
-TEST_CASE ("MaxPooling2d_Int8_Relu6_GpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
- armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
MaxPool2dInt8Relu6Test(backends);
}
-TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuAcc_Test")
+TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_CpuRef_Test")
{
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
- armnn::Compute::CpuRef };
- MaxPool2dInt8Relu6Test(backends);
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_Relu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int16_PaddingSame_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dInt16PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int16_Relu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ MaxPool2dInt16ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingValid_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingValid_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingSame_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingSame_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu6_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dFP32Relu6Test(backends);
}
+TEST_CASE ("AveragePooling2d_Int8_Relu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu6_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_Relu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dUint8ReluTest(backends);
}
+TEST_CASE ("AveragePooling2d_Int16_PaddingSame_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dInt16PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int16_Relu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ AveragePool2dInt16ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingValid_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ L2Pool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingSame_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ L2Pool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ L2Pool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu6_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ L2Pool2dFP32Relu6Test(backends);
+}
+
+} // TEST_SUITE("Pooling2d_CpuRefTests")
+
} // namespace armnnDelegate \ No newline at end of file