diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-07-22 16:03:36 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-08-05 15:50:57 +0100 |
commit | 3883b2776cec33f16f0ea9a2d795de2b7c766df7 (patch) | |
tree | 6842e15904037d73426d814d5751945b3d9c2376 /src/armnn/test/optimizations | |
parent | 9d63fee68081b65bd72de3a70da76c2696c6c6ed (diff) | |
download | armnn-3883b2776cec33f16f0ea9a2d795de2b7c766df7.tar.gz |
GitHub #667: Neon fold padding into average pool 2D quantization bug fix.
* Originated from a GitHub issue: https://github.com/ARM-software/armnn/issues/667
* Initially, Arm NN supports the pool 2D operation because there is no padding
on the pool2d. Neon failure occurs when padding is followed by average pool 2D
due to folding optimization.
* Here we prevent the folding optimization from happening for the above special case
and add it in as a backend specific optimization.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ia0fd90c3a6b4b9d29c81106f154617d2e893e26b
Diffstat (limited to 'src/armnn/test/optimizations')
-rw-r--r-- | src/armnn/test/optimizations/FoldPadIntoQuantizedAveragePooling2DTests.cpp | 114 | ||||
-rw-r--r-- | src/armnn/test/optimizations/FoldPadTests.cpp | 64 |
2 files changed, 177 insertions, 1 deletions
diff --git a/src/armnn/test/optimizations/FoldPadIntoQuantizedAveragePooling2DTests.cpp b/src/armnn/test/optimizations/FoldPadIntoQuantizedAveragePooling2DTests.cpp new file mode 100644 index 0000000000..32627c62f7 --- /dev/null +++ b/src/armnn/test/optimizations/FoldPadIntoQuantizedAveragePooling2DTests.cpp @@ -0,0 +1,114 @@ +// +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include <GraphUtils.hpp> +#include <TestUtils.hpp> + +#include <armnn/INetwork.hpp> + +#include <doctest/doctest.h> + +using namespace armnn; + +namespace +{ +#if defined(ARMNNREF_ENABLED)||defined(ARMCOMPUTECL_ENABLED) +void FoldPadIntoQuantizedAvgPoolTest(Compute backendId) +{ + // Create a network + INetworkPtr network = INetwork::Create(); + + const unsigned int inputShape[] = {1, 2, 2, 3}; + const unsigned int paddedShape[] = {1, 4, 4, 3}; + const unsigned int outputShape[] = {1, 2, 2, 3}; + + TensorInfo inputInfo(4, inputShape, DataType::QAsymmU8, 1.0f, 0.0f); + TensorInfo paddedInfo(4, paddedShape, DataType::QAsymmU8, 1.0f, 0.0f); + TensorInfo outputInfo(4, outputShape, DataType::QAsymmU8, 1.0f, 0.0f); + + IConnectableLayer* input = network->AddInputLayer(0, "input"); + input->GetOutputSlot(0).SetTensorInfo(inputInfo); + + PadDescriptor padDescriptor({{0, 0}, + {1, 1}, + {1, 1}, + {0, 0}}); + + IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "pad"); + padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo); + + Pooling2dDescriptor pooling2dDescriptor; + pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average; + pooling2dDescriptor.m_PoolWidth = 3; + pooling2dDescriptor.m_PoolHeight = 3; + pooling2dDescriptor.m_StrideX = 1; + pooling2dDescriptor.m_StrideY = 1; + pooling2dDescriptor.m_DataLayout = DataLayout::NHWC; + + IConnectableLayer* pool2dLayer = network->AddPooling2dLayer(pooling2dDescriptor, "pool2d"); + pool2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + IConnectableLayer* output = network->AddOutputLayer(0, "output"); + + // Connect up layers - input -> pad -> pool2d -> output + input->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0)); + padLayer->GetOutputSlot(0).Connect(pool2dLayer->GetInputSlot(0)); + pool2dLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + // Create ArmNN runtime + IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); + + // Optimise ArmNN network + IOptimizedNetworkPtr optNet = Optimize(*network, {backendId}, run->GetDeviceSpec()); + + auto checkPadFoldedIntoPool2d = [&](const Layer* const layer) { + if (!IsLayerOfType<Pooling2dLayer>(layer) || (layer->GetNameStr() != "folded-pad-into-pool2d")) + { + return false; + } + + const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer); + const Pooling2dDescriptor pool2dLayerParams = pool2dLayer->GetParameters(); + + Pooling2dDescriptor pool2dLayerParamsNoPad = pool2dLayerParams; + pool2dLayerParamsNoPad.m_PadLeft = 0; + pool2dLayerParamsNoPad.m_PadRight = 0; + pool2dLayerParamsNoPad.m_PadTop = 0; + pool2dLayerParamsNoPad.m_PadBottom = 0; + // If we fold then PaddingMethod will be set to Ignore. The original will be Exclude. + pool2dLayerParamsNoPad.m_PaddingMethod = PaddingMethod::Exclude; + + return (pool2dLayerParamsNoPad == pooling2dDescriptor) && (pool2dLayerParams.m_PadLeft == 1) && + (pool2dLayerParams.m_PadRight == 1) && (pool2dLayerParams.m_PadTop == 1) && + (pool2dLayerParams.m_PadBottom == 1) && (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue); + }; + + Graph& graph = GetGraphForTesting(optNet.get()); + CHECK(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + checkPadFoldedIntoPool2d, + &IsLayerOfType<OutputLayer>)); +} +#endif +} + + +TEST_SUITE("Optimizer_FoldPadIntoQuantizedAvgPoolCpuRef") +{ +TEST_CASE("FoldPadIntoQuantizedAvgPoolCpuRefTest") +{ + FoldPadIntoQuantizedAvgPoolTest(Compute::CpuRef); +} +} + +#if defined(ARMCOMPUTECL_ENABLED) +TEST_SUITE("Optimizer_FoldPadIntoQuantizedAvgPoolGpuAcc") +{ +TEST_CASE("FoldPadIntoQuantizedAvgPoolGpuAccTest") +{ + FoldPadIntoQuantizedAvgPoolTest(Compute::GpuAcc); +} +} +#endif diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp index 4d7defcabe..b2672ea584 100644 --- a/src/armnn/test/optimizations/FoldPadTests.cpp +++ b/src/armnn/test/optimizations/FoldPadTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -474,6 +474,68 @@ TEST_CASE("FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShould &IsLayerOfType<OutputLayer>)); } +TEST_CASE("FoldPadLayerIntoPooling2dLayer_QuantizedAveragePoolingShouldNotBeFolded") +{ + Graph graph; + const unsigned int inputShape[] = {1, 2, 2, 3}; + const unsigned int paddedShape[] = {1, 4, 4, 3}; + const unsigned int outputShape[] = {1, 2, 2, 3}; + + TensorInfo inputInfo(4, inputShape, DataType::QAsymmU8); + TensorInfo paddedInfo(4, paddedShape, DataType::QAsymmU8); + TensorInfo outputInfo(4, outputShape, DataType::QAsymmU8); + + Layer* input = graph.AddLayer<InputLayer>(0, "input"); + input->GetOutputSlot().SetTensorInfo(inputInfo); + + PadDescriptor padDescriptor({{0, 0}, + {1, 1}, + {1, 1}, + {0, 0}}); + + PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad"); + padLayer->GetOutputSlot().SetTensorInfo(paddedInfo); + + Pooling2dDescriptor pooling2dDescriptor; + pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average; + pooling2dDescriptor.m_PoolWidth = 3; + pooling2dDescriptor.m_PoolHeight = 3; + pooling2dDescriptor.m_StrideX = 1; + pooling2dDescriptor.m_StrideY = 1; + pooling2dDescriptor.m_DataLayout = DataLayout::NHWC; + + Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d"); + pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); + + Layer* output = graph.AddLayer<OutputLayer>(0, "output"); + + // Connect up layers - input -> pad -> pool2d -> output + input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); + padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0)); + pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); + + auto checkSimplePool2d = [&](const Layer* const layer) { + const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer); + return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") && + (pool2dLayer->GetParameters() == pooling2dDescriptor); + }; + + CHECK(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<PadLayer>, + checkSimplePool2d, + &IsLayerOfType<OutputLayer>)); + + armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d())); + + // The optimization should not have modified the graph. + CHECK(CheckSequence(graph.cbegin(), graph.cend(), + &IsLayerOfType<InputLayer>, + &IsLayerOfType<PadLayer>, + checkSimplePool2d, + &IsLayerOfType<OutputLayer>)); +} + #if defined(ARMNNREF_ENABLED) TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization") { |