aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/NeonBackend.cpp
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2021-05-03 12:22:03 +0100
committerMatthew Sloyan <matthew.sloyan@arm.com>2021-05-06 17:58:26 +0000
commitd905decd256558bbee165e636ce4242ac3b9c917 (patch)
tree86f51622399553d1741b66ff232a429de8fc43f8 /src/backends/neon/NeonBackend.cpp
parent1f58f03d82c482626b1b4673b6c0e25da4338fb5 (diff)
downloadarmnn-d905decd256558bbee165e636ce4242ac3b9c917.tar.gz
MLCE-418 Reduce layer does not support multiple axes
* Added backend specific optimization to chain new reduces layers for each axis to simulate behaviour of a layer with multiple axes. * Added function to calculate reduced output shape. * Added unit tests. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I180b0b111b7bcf3d0c283f1db0b82d5f17757682
Diffstat (limited to 'src/backends/neon/NeonBackend.cpp')
-rw-r--r--src/backends/neon/NeonBackend.cpp24
1 files changed, 23 insertions, 1 deletions
diff --git a/src/backends/neon/NeonBackend.cpp b/src/backends/neon/NeonBackend.cpp
index a1299fb458..6d5eab0ddf 100644
--- a/src/backends/neon/NeonBackend.cpp
+++ b/src/backends/neon/NeonBackend.cpp
@@ -29,6 +29,7 @@
#include "workloads/NeonDivisionWorkload.hpp"
#include "workloads/NeonFullyConnectedWorkload.hpp"
#include "workloads/NeonMultiplicationWorkload.hpp"
+#include "workloads/NeonReduceWorkload.hpp"
#include "workloads/NeonSubtractionWorkload.hpp"
#include <Optimizer.hpp>
@@ -164,7 +165,8 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
if ((base.GetType() == LayerType::DepthwiseConvolution2d || base.GetType() == LayerType::Convolution2d
|| base.GetType() == LayerType::BatchNormalization || base.GetType() == LayerType::FullyConnected
|| base.GetType() == LayerType::Addition || base.GetType() == LayerType::Multiplication
- || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division)
+ || base.GetType() == LayerType::Subtraction || base.GetType() == LayerType::Division
+ || base.GetType() == LayerType::Reduce)
&& (base.GetAdditionalInformation<ActivationDescriptor>() == nullptr))
{
for (auto output = base.BeginOutputSlots(); output != base.EndOutputSlots(); ++output)
@@ -389,6 +391,26 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
}
}
}
+
+ // Separate check for Reduce as we aren't fusing with activation layer
+ if (base.GetType() == LayerType::Reduce)
+ {
+ ReduceLayer* baseLayer = PolymorphicDowncast<ReduceLayer*>(&base);
+
+ // Get params from base layer
+ ReduceDescriptor reduceDescriptor = baseLayer->GetParameters();
+
+ arm_compute::Status status = NeonReduceWorkloadValidate(
+ baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(),
+ baseLayer->GetOutputSlot(0).GetTensorInfo(),
+ reduceDescriptor);
+
+ if (status)
+ {
+ ChainReduceLayers<ReduceLayer>(optimizationViews, baseLayer, reduceDescriptor);
+ untouched.erase(baseLayer->GetGuid());
+ }
+ }
}
}
}