aboutsummaryrefslogtreecommitdiff
path: root/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/aclCommon/ArmComputeSubgraphUtils.hpp')
-rw-r--r--src/backends/aclCommon/ArmComputeSubgraphUtils.hpp84
1 files changed, 84 insertions, 0 deletions
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
index a0fca46330..521c17cd62 100644
--- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -6,6 +6,9 @@
#pragma once
#include <armnn/backends/OptimizationViews.hpp>
+#include <armnn/utility/Assert.hpp>
+
+#include <aclCommon/ArmComputeUtils.hpp>
namespace armnn
{
@@ -147,4 +150,85 @@ LayerType* FuseLayerWithWeightsAndBiases(OptimizationViews& optimizationViews,
return replacementLayer;
}
+//
+// If reduce layer has multiple axes, add new layer for each axis to simulate the same behaviour
+// as currently only one axis is supported.
+//
+template<typename LayerType>
+std::vector<Layer*> ChainReduceLayers(OptimizationViews& optimizationViews,
+ LayerType* baseLayer,
+ ReduceDescriptor& desc)
+{
+ // Vector of new chained layers, used for substitution.
+ std::vector<Layer*> layers;
+
+ // Vector of axes so each layer is reshaped correctly.
+ std::vector<uint32_t> axes;
+ unsigned int recalulatedAxis = 0;
+
+ for (unsigned int i = 0; i != desc.m_vAxis.size(); ++i)
+ {
+ // Get TensorInfo from base layer and reduce shape using axis.
+ TensorInfo layerInfo = baseLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+
+ axes.emplace_back(desc.m_vAxis[i]);
+
+ const TensorInfo& reducedTensorInfo = ComputeReductionTensorShape(layerInfo,
+ axes,
+ desc.m_KeepDims);
+
+ // Create a vector for the single axis to be assigned to the descriptor.
+ // Update axis if keepDims is set reduce layers correctly.
+ std::vector<uint32_t> singleAxis(1, desc.m_vAxis[i] - recalulatedAxis);
+
+ // Create a descriptor and assign single axis.
+ ReduceDescriptor newReduceDescriptor = baseLayer->GetParameters();
+ newReduceDescriptor.m_vAxis.assign(singleAxis.begin(), singleAxis.end());
+
+ // Add new layer to graph.
+ std::string layerName = "reduce_layer_" + std::to_string(i);
+ Layer* replacementLayer = optimizationViews.GetGraph().AddLayer<LayerType>(newReduceDescriptor,
+ layerName.c_str());
+ // Connect previous layer with new layer.
+ // The first and last layer will be connected when the subgraph is replaced.
+ if (!layers.empty())
+ {
+ layers[i - 1]->GetOutputSlot(0).Connect(replacementLayer->GetInputSlot(0));
+ }
+
+ // Set updated tensorInfo for new layer.
+ replacementLayer->GetOutputSlot(0).SetTensorInfo(reducedTensorInfo);
+
+ if (!desc.m_KeepDims)
+ {
+ recalulatedAxis++;
+ }
+
+ layers.emplace_back(replacementLayer);
+ }
+
+ // Check if the TensorInfo from the last layer equals the inferred output from the original layer.
+ ARMNN_ASSERT(baseLayer->GetOutputSlot(0).GetTensorInfo() == layers.back()->GetOutputSlot().GetTensorInfo());
+
+ return layers;
+}
+
+//
+// Substitute baseLayer with new subgraph
+//
+template<typename LayerType>
+void ReplaceLayers(OptimizationViews& optimizationViews,
+ LayerType* baseLayer,
+ std::vector<Layer*>& layers)
+{
+ std::list<Layer*> replacementLayers(layers.begin(), layers.end());
+
+ SubgraphView substitutionSubgraph(baseLayer);
+ SubgraphView replacementSubgraph(CreateInputsFrom({replacementLayers.front()}),
+ CreateOutputsFrom({replacementLayers.back()}),
+ std::move(replacementLayers));
+
+ optimizationViews.AddSubstitution({substitutionSubgraph, replacementSubgraph});
+}
+
} // namespace armnn