diff options
author | Cathal Corbett <cathal.corbett@arm.com> | 2022-07-22 16:03:36 +0100 |
---|---|---|
committer | Nikhil Raj <nikhil.raj@arm.com> | 2022-08-05 15:50:57 +0100 |
commit | 3883b2776cec33f16f0ea9a2d795de2b7c766df7 (patch) | |
tree | 6842e15904037d73426d814d5751945b3d9c2376 /src/backends/aclCommon | |
parent | 9d63fee68081b65bd72de3a70da76c2696c6c6ed (diff) | |
download | armnn-3883b2776cec33f16f0ea9a2d795de2b7c766df7.tar.gz |
GitHub #667: Neon fold padding into average pool 2D quantization bug fix.
* Originated from a GitHub issue: https://github.com/ARM-software/armnn/issues/667
* Initially, Arm NN supports the pool 2D operation because there is no padding
on the pool2d. Neon failure occurs when padding is followed by average pool 2D
due to folding optimization.
* Here we prevent the folding optimization from happening for the above special case
and add it in as a backend specific optimization.
Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Ia0fd90c3a6b4b9d29c81106f154617d2e893e26b
Diffstat (limited to 'src/backends/aclCommon')
-rw-r--r-- | src/backends/aclCommon/ArmComputeSubgraphUtils.hpp | 46 |
1 files changed, 2 insertions, 44 deletions
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp index a26442cb86..766bf2d2cc 100644 --- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp +++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp @@ -1,5 +1,5 @@ // -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -9,6 +9,7 @@ #include <armnn/utility/Assert.hpp> #include <aclCommon/ArmComputeUtils.hpp> +#include <backendsCommon/SubgraphUtils.hpp> namespace armnn { @@ -20,36 +21,6 @@ namespace // this helper only works if all layers where the inputs connect to are not selected // -SubgraphView::IInputSlots CreateIInputsFrom(const std::vector<armnn::IConnectableLayer*>& layers) -{ - SubgraphView::IInputSlots result; - for (auto&& layer : layers) - { - for (unsigned int i = 0 ; i < layer->GetNumInputSlots(); ++i) - { - result.push_back(&(layer->GetInputSlot(i))); - } - } - return result; -} - -// -// this helper only works if all layers where the outputs connect to are not selected -// - -SubgraphView::IOutputSlots CreateIOutputsFrom(const std::vector<armnn::IConnectableLayer*>& layers) -{ - SubgraphView::IOutputSlots result; - for (auto &&layer: layers) - { - for (unsigned int i = 0; i < layer->GetNumOutputSlots(); ++i) - { - result.push_back(&(layer->GetOutputSlot(i))); - } - } - return result; -} - bool checkDataTypeInputandOutput(const Layer& layer) { auto inputInfo = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -79,19 +50,6 @@ bool checkDataTypeInputandOutput(const Layer& layer) } // namespace -inline void ReportUntouchedLayers(OptimizationViews& optimizationViews, std::map<LayerGuid, Layer*> untouched) -{ - std::vector<Layer*> untouchedVector; - for (const auto& pair : untouched) - { - Layer* layer = pair.second; - SubgraphView subgraphView({layer}, - CreateIInputsFrom({layer}), - CreateIOutputsFrom({layer})); - optimizationViews.AddUntouchedSubgraph(std::move(subgraphView)); - } -} - template<typename LayerType> LayerType* FuseLayer(OptimizationViews& optimizationViews, LayerType* baseLayer, |