diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-07-22 16:03:36 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-08-05 15:50:57 +0100 |
commit | 3883b2776cec33f16f0ea9a2d795de2b7c766df7 (patch) | |
tree | 6842e15904037d73426d814d5751945b3d9c2376 /src/armnn/optimizations/FoldPadIntoLayer2d.hpp | |
parent | 9d63fee68081b65bd72de3a70da76c2696c6c6ed (diff) | |
download | armnn-3883b2776cec33f16f0ea9a2d795de2b7c766df7.tar.gz |
GitHub #667: Neon fold padding into average pool 2D quantization bug fix.
* Originated from a GitHub issue: https://github.com/ARM-software/armnn/issues/667
* Initially, Arm NN supports the pool 2D operation because there is no padding
on the pool2d. Neon failure occurs when padding is followed by average pool 2D
due to folding optimization.
* Here we prevent the folding optimization from happening for the above special case
and add it in as a backend specific optimization.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ia0fd90c3a6b4b9d29c81106f154617d2e893e26b
Diffstat (limited to 'src/armnn/optimizations/FoldPadIntoLayer2d.hpp')
-rw-r--r-- | src/armnn/optimizations/FoldPadIntoLayer2d.hpp | 43 |
1 files changed, 29 insertions, 14 deletions
diff --git a/src/armnn/optimizations/FoldPadIntoLayer2d.hpp b/src/armnn/optimizations/FoldPadIntoLayer2d.hpp index eb6bc90afd..4c4bd80d41 100644 --- a/src/armnn/optimizations/FoldPadIntoLayer2d.hpp +++ b/src/armnn/optimizations/FoldPadIntoLayer2d.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -73,6 +73,17 @@ inline bool IsNeutralElement( : tensorValue == GetZeroElement(tensorInfo); } +inline bool IsPooling2dPadded(const Pooling2dDescriptor& poolDescriptor) +{ + const auto poolingPadValues = std::make_tuple(poolDescriptor.m_PadLeft, poolDescriptor.m_PadRight, + poolDescriptor.m_PadTop, poolDescriptor.m_PadBottom); + if (poolingPadValues != std::make_tuple(0U, 0U, 0U, 0U)) + { + return true; + } + return false; +} + template <typename Descriptor> bool TryFoldPadIntoLayer2d( const PadDescriptor& padDescriptor, Descriptor& layerDescriptor, const TensorInfo& tensorInfo) @@ -101,25 +112,29 @@ bool TryFoldPadIntoLayer2d( return true; } -inline bool TryFoldPadIntoLayer2d( - const PadDescriptor& padDescriptor, Pooling2dDescriptor& poolDescriptor, const TensorInfo& tensorInfo) +inline bool TryFoldPadIntoLayer2d(const PadDescriptor& padDescriptor, + Pooling2dDescriptor& poolDescriptor, + const TensorInfo& tensorInfo, + bool isBackendOptimization = false) { - const auto poolingPadValues = std::make_tuple(poolDescriptor.m_PadLeft, poolDescriptor.m_PadRight, - poolDescriptor.m_PadTop, poolDescriptor.m_PadBottom); - bool poolHasPadding = false; - if (poolingPadValues != std::make_tuple(0U, 0U, 0U, 0U)) + // Cannot fold Average or L2 pooling if padding exists and the padding method is Exclude. + if (poolDescriptor.m_PoolType != PoolingAlgorithm::Max && + IsPooling2dPadded(poolDescriptor) && + poolDescriptor.m_PaddingMethod == PaddingMethod::Exclude) { - poolHasPadding = true; + return false; } - // We cannot fold Average or L2 pooling if there's is already padding and that padding method is Exclude. - if (poolDescriptor.m_PoolType != PoolingAlgorithm::Max) // PoolingAlgorithm::Average or PoolingAlgorithm::L2 + // Cannot fold Average pooling if data type is quantized and layout is NHWC in Neon backend. + // Therefore, this specific case will become a backend specific optimization. + if (!isBackendOptimization && + tensorInfo.IsQuantized() && + poolDescriptor.m_PoolType == PoolingAlgorithm::Average && + poolDescriptor.m_DataLayout == DataLayout::NHWC) { - if ((poolHasPadding) && (poolDescriptor.m_PaddingMethod == PaddingMethod::Exclude)) - { - return false; - } + return false; } + poolDescriptor.m_PaddingMethod = PaddingMethod::IgnoreValue; return TryFoldPadIntoLayer2d<Pooling2dDescriptor>(padDescriptor, poolDescriptor, tensorInfo); |