// // Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include #include #include #include namespace { using namespace armnn; template armnn::INetworkPtr CreatePooling2dNetwork(const armnn::TensorShape& inputShape, const armnn::TensorShape& outputShape, PaddingMethod padMethod = PaddingMethod::Exclude, PoolingAlgorithm poolAlg = PoolingAlgorithm::Max, const float qScale = 1.0f, const int32_t qOffset = 0) { INetworkPtr network(INetwork::Create()); TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset, true); Pooling2dDescriptor descriptor; descriptor.m_PoolType = poolAlg; descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; descriptor.m_StrideX = descriptor.m_StrideY = 1; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = padMethod; descriptor.m_DataLayout = DataLayout::NHWC; IConnectableLayer* pool = network->AddPooling2dLayer(descriptor, "pool"); IConnectableLayer* input = network->AddInputLayer(0, "input"); IConnectableLayer* output = network->AddOutputLayer(0, "output"); Connect(input, pool, inputTensorInfo, 0, 0); Connect(pool, output, outputTensorInfo, 0, 0); return network; } template> void MaxPool2dEndToEnd(const std::vector& backends, PaddingMethod padMethod = PaddingMethod::Exclude) { const TensorShape& inputShape = { 1, 3, 3, 1 }; const TensorShape& outputShape = { 1, 3, 3, 1 }; INetworkPtr network = CreatePooling2dNetwork(inputShape, outputShape, padMethod); CHECK(network); std::vector inputData{ 1, 2, 3, 4, 5, 6, 7, 8, 9 }; std::vector expectedOutput{ 5, 6, 6, 8, 9, 9, 8, 9, 9 }; std::map> inputTensorData = { { 0, inputData } }; std::map> expectedOutputData = { { 0, expectedOutput } }; EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); } template void MaxPool2dEndToEndFloat16(const std::vector& backends, PaddingMethod padMethod = PaddingMethod::Exclude) { using namespace half_float::literal; using Half = half_float::half; const TensorShape& inputShape = { 1, 3, 3, 1 }; const TensorShape& outputShape = { 1, 3, 3, 1 }; INetworkPtr network = CreatePooling2dNetwork(inputShape, outputShape, padMethod); CHECK(network); std::vector inputData{ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h, 9._h }; std::vector expectedOutput{ 5._h, 6._h, 6._h, 8._h, 9._h, 9._h, 8._h, 9._h, 9._h }; std::map> inputTensorData = { { 0, inputData } }; std::map> expectedOutputData = { { 0, expectedOutput } }; EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); } template> void AvgPool2dEndToEnd(const std::vector& backends, PaddingMethod padMethod = PaddingMethod::Exclude) { const TensorShape& inputShape = { 1, 3, 3, 1 }; const TensorShape& outputShape = { 1, 3, 3, 1 }; INetworkPtr network = CreatePooling2dNetwork( inputShape, outputShape, padMethod, PoolingAlgorithm::Average); CHECK(network); std::vector inputData{ 1, 2, 3, 4, 5, 6, 7, 8, 9 }; std::vector expectedOutput; if (padMethod == PaddingMethod::Exclude) { expectedOutput = { 3.f , 3.5f, 4.f , 4.5f, 5.f , 5.5f, 6.f , 6.5f, 7.f }; } else { expectedOutput = { 1.33333f, 2.33333f, 1.77778f, 3.f , 5.f , 3.66667f, 2.66667f, 4.33333f, 3.11111f }; } std::map> inputTensorData = { { 0, inputData } }; std::map> expectedOutputData = { { 0, expectedOutput } }; EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends, 0.00001f); } template void AvgPool2dEndToEndFloat16(const std::vector& backends, PaddingMethod padMethod = PaddingMethod::Exclude) { using namespace half_float::literal; using Half = half_float::half; const TensorShape& inputShape = { 1, 3, 3, 1 }; const TensorShape& outputShape = { 1, 3, 3, 1 }; INetworkPtr network = CreatePooling2dNetwork( inputShape, outputShape, padMethod, PoolingAlgorithm::Average); CHECK(network); std::vector inputData{ 1._h, 2._h, 3._h, 4._h, 5._h, 6._h, 7._h, 8._h, 9._h }; std::vector expectedOutput; if (padMethod == PaddingMethod::Exclude) { expectedOutput = { 3._h , 3.5_h, 4._h , 4.5_h, 5._h , 5.5_h, 6._h , 6.5_h, 7._h }; } else { expectedOutput = { 1.33333_h, 2.33333_h, 1.77778_h, 3._h , 5._h , 3.66667_h, 2.66667_h, 4.33333_h, 3.11111_h }; } std::map> inputTensorData = { { 0, inputData } }; std::map> expectedOutputData = { { 0, expectedOutput } }; EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends, 0.00001f); } template armnn::INetworkPtr CreateTwoLayerPooling2dNetwork(const armnn::TensorShape& inputShape, const armnn::TensorShape& outputShape, PaddingMethod padMethod = PaddingMethod::Exclude, PoolingAlgorithm poolAlg = PoolingAlgorithm::Max, const float qScale = 1.0f, const int32_t qOffset = 0) { INetworkPtr network(INetwork::Create()); TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset, true); Pooling2dDescriptor descriptor; descriptor.m_PoolType = poolAlg; descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; descriptor.m_StrideX = descriptor.m_StrideY = 1; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = padMethod; descriptor.m_DataLayout = DataLayout::NHWC; IConnectableLayer* input = network->AddInputLayer(0, "input"); IConnectableLayer* pool1 = network->AddPooling2dLayer(descriptor, "pool_1"); IConnectableLayer* pool2 = network->AddPooling2dLayer(descriptor, "pool_2"); IConnectableLayer* output = network->AddOutputLayer(0, "output"); Connect(input, pool1, inputTensorInfo, 0, 0); Connect(pool1, pool2, inputTensorInfo, 0, 0); Connect(pool2, output, outputTensorInfo, 0, 0); return network; } template armnn::INetworkPtr CreateThreeLayerPooling2dNetwork(const armnn::TensorShape& inputShape, const armnn::TensorShape& outputShape, PaddingMethod padMethod = PaddingMethod::Exclude, PoolingAlgorithm poolAlg = PoolingAlgorithm::Max, const float qScale = 1.0f, const int32_t qOffset = 0) { INetworkPtr network(INetwork::Create()); TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset, true); TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset, true); Pooling2dDescriptor descriptor; descriptor.m_PoolType = poolAlg; descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3; descriptor.m_StrideX = descriptor.m_StrideY = 1; descriptor.m_PadLeft = 1; descriptor.m_PadRight = 1; descriptor.m_PadTop = 1; descriptor.m_PadBottom = 1; descriptor.m_PaddingMethod = padMethod; descriptor.m_DataLayout = DataLayout::NHWC; IConnectableLayer* input = network->AddInputLayer(0, "input"); IConnectableLayer* pool1 = network->AddPooling2dLayer(descriptor, "pool_1"); IConnectableLayer* pool2 = network->AddPooling2dLayer(descriptor, "pool_2"); IConnectableLayer* pool3 = network->AddPooling2dLayer(descriptor, "pool_3"); IConnectableLayer* output = network->AddOutputLayer(0, "output"); Connect(input, pool1, inputTensorInfo, 0, 0); Connect(pool1, pool2, inputTensorInfo, 0, 0); Connect(pool2, pool3, inputTensorInfo, 0, 0); Connect(pool3, output, outputTensorInfo, 0, 0); return network; } template> void MaxPool2dTwoLayerEndToEnd(const std::vector& backends, PaddingMethod padMethod = PaddingMethod::Exclude) { const TensorShape& inputShape = { 1, 3, 3, 1 }; const TensorShape& outputShape = { 1, 3, 3, 1 }; INetworkPtr network = CreateTwoLayerPooling2dNetwork(inputShape, outputShape, padMethod); CHECK(network); std::vector inputData{ 1, 2, 3, 4, 5, 6, 7, 8, 9 }; std::vector expectedOutput{ 9, 9, 9, 9, 9, 9, 9, 9, 9 }; std::map> inputTensorData = { { 0, inputData } }; std::map> expectedOutputData = { { 0, expectedOutput } }; EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); } template> void MaxPool2dThreeLayerEndToEnd(const std::vector& backends, PaddingMethod padMethod = PaddingMethod::Exclude) { const TensorShape& inputShape = { 1, 3, 3, 1 }; const TensorShape& outputShape = { 1, 3, 3, 1 }; INetworkPtr network = CreateThreeLayerPooling2dNetwork(inputShape, outputShape, padMethod); CHECK(network); std::vector inputData{ 1, 2, 3, 4, 5, 6, 7, 8, 9 }; std::vector expectedOutput{ 9, 9, 9, 9, 9, 9, 9, 9, 9 }; std::map> inputTensorData = { { 0, inputData } }; std::map> expectedOutputData = { { 0, expectedOutput } }; EndToEndLayerTestImpl(std::move(network), inputTensorData, expectedOutputData, backends); } } // anonymous namespace