aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorIdriss Chaouch <idriss.chaouch@arm.com>2023-08-28 14:28:31 +0100
committerIdriss Chaouch <idriss.chaouch@arm.com>2023-08-31 11:26:28 +0100
commit98e383eadf4e670d057ad725c7fe7924fea8e36b (patch)
tree35acac15aa69ab405887289cb9674d388f06f96b /src/armnn
parent2be039bce38a4fa436e8310dfe14ebfff20d57bd (diff)
downloadarmnn-98e383eadf4e670d057ad725c7fe7924fea8e36b.tar.gz
IVGCVSW-7525 Add broadcast_to operator
Signed-off-by: Idriss Chaouch <idriss.chaouch@arm.com> Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I94ec5f9120b2d736fdf98d00ec5137a4efd739b8
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/BackendHelper.cpp16
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp16
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/BroadcastToLayer.cpp57
-rw-r--r--src/armnn/layers/BroadcastToLayer.hpp48
-rw-r--r--src/armnn/optimizations/All.hpp5
-rw-r--r--src/armnn/optimizations/DeleteBroadcastTo.hpp37
-rw-r--r--src/armnn/test/optimizations/BroadcastToTests.cpp151
9 files changed, 332 insertions, 3 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index fc7a2fab83..56938d021e 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -246,6 +246,22 @@ bool LayerSupportHandle::IsBatchToSpaceNdSupported(const TensorInfo& input,
reasonIfUnsupported);
}
+
+bool LayerSupportHandle::IsBroadcastToSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const armnn::BroadcastToDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ TensorInfos infos{input, output};
+
+ return m_LayerSupport->IsLayerSupported(LayerType::BroadcastTo,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported.value());
+}
+
bool LayerSupportHandle::IsCastSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported)
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index f83b710134..325bfc3875 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -12,6 +12,7 @@
#include "layers/BatchMatMulLayer.hpp"
#include "layers/BatchNormalizationLayer.hpp"
#include "layers/BatchToSpaceNdLayer.hpp"
+#include "layers/BroadcastToLayer.hpp"
#include "layers/CastLayer.hpp"
#include "layers/ChannelShuffleLayer.hpp"
#include "layers/ComparisonLayer.hpp"
@@ -116,6 +117,7 @@ DECLARE_LAYER(ArgMinMax)
DECLARE_LAYER(BatchMatMul)
DECLARE_LAYER(BatchNormalization)
DECLARE_LAYER(BatchToSpaceNd)
+DECLARE_LAYER(BroadcastTo)
DECLARE_LAYER(Cast)
DECLARE_LAYER(ChannelShuffle)
DECLARE_LAYER(Comparison)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 7f4ef6b1b6..d2b14cd045 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -657,6 +657,12 @@ IConnectableLayer* INetwork::AddTileLayer(const TileDescriptor &descriptor,
return pNetworkImpl->AddTileLayer(descriptor, name);
}
+IConnectableLayer* INetwork::AddBroadcastToLayer(const BroadcastToDescriptor& descriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddBroadcastToLayer(descriptor, name);
+}
+
void INetwork::ExecuteStrategy(IStrategy& strategy) const
{
return pNetworkImpl->ExecuteStrategy(strategy);
@@ -1929,8 +1935,10 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
optGraph.InferTensorInfos();
}
- // Perform AddBroadcastReshapeLayer optimisation
+ // Perform BroadcastToOptimizationLayer and then AddBroadcastReshapeLayer optimisation
using namespace optimizations;
+ Optimizer::Pass(optGraph, MakeOptimizations(BroadcastToOptimizationLayer()));
+
Optimizer::Pass(optGraph, MakeOptimizations(AddBroadcastReshapeLayer()));
if(options.GetShapeInferenceMethod() == ShapeInferenceMethod::ValidateOnly)
@@ -1961,6 +1969,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph,
FoldPadIntoConvolution2d(),
FoldPadIntoDepthwiseConvolution2d(),
FoldPadIntoPooling2d(),
+ BroadcastToOptimizationLayer(),
PermuteAndBatchToSpaceAsDepthToSpace(),
TransposeAndBatchToSpaceAsDepthToSpace(),
FuseBatchNormIntoConvolution2DFloat32(),
@@ -3045,6 +3054,11 @@ IConnectableLayer* NetworkImpl::AddPrecompiledLayer(const PreCompiledDescriptor&
return layer;
}
+IConnectableLayer* NetworkImpl::AddBroadcastToLayer(const BroadcastToDescriptor &desc, const char *name)
+{
+ return m_Graph->AddLayer<BroadcastToLayer>(desc, name);
+}
+
void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
{
for (auto layer : GetGraph())
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 5a3570d825..6ffdfb37a8 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -63,6 +63,9 @@ public:
IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr);
+ IConnectableLayer* AddBroadcastToLayer(const BroadcastToDescriptor& descriptor,
+ const char* name = nullptr);
+
IConnectableLayer* AddCastLayer(const char* name = nullptr);
IConnectableLayer* AddChannelShuffleLayer(const ChannelShuffleDescriptor& channelShuffleDescriptor,
diff --git a/src/armnn/layers/BroadcastToLayer.cpp b/src/armnn/layers/BroadcastToLayer.cpp
new file mode 100644
index 0000000000..252aa46de0
--- /dev/null
+++ b/src/armnn/layers/BroadcastToLayer.cpp
@@ -0,0 +1,57 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BroadcastToLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+BroadcastToLayer::BroadcastToLayer(const BroadcastToDescriptor& param, const char* name)
+ : LayerWithParameters(1, 1, LayerType::BroadcastTo, param, name)
+{}
+
+std::unique_ptr<IWorkload> BroadcastToLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ BroadcastToQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreateWorkload(LayerType::BroadcastTo, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+BroadcastToLayer* BroadcastToLayer::Clone(armnn::Graph& graph) const
+{
+ return CloneBase<BroadcastToLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> BroadcastToLayer::InferOutputShapes(const std::vector<TensorShape>&) const
+{
+ return std::vector<TensorShape>({ m_Param.m_BroadcastToShape });
+}
+
+void BroadcastToLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ const TensorShape &outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+ auto inferredShapes = outputShape;
+
+ ValidateAndCopyShape(outputShape, inferredShapes, m_ShapeInferenceMethod, "BroadcastToLayer");
+}
+
+void BroadcastToLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
+}
+
+} //namespace armnn
diff --git a/src/armnn/layers/BroadcastToLayer.hpp b/src/armnn/layers/BroadcastToLayer.hpp
new file mode 100644
index 0000000000..5da27a7a66
--- /dev/null
+++ b/src/armnn/layers/BroadcastToLayer.hpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+ class BroadcastToLayer : public LayerWithParameters<BroadcastToDescriptor>
+ {
+ public:
+ /// Makes a workload for the BroadcastTo type.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ BroadcastToLayer* Clone(Graph& graph) const override;
+
+ /// Infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Check if the input tensor BroadcastTo(s)
+ /// will lead to a valid configuration of @ref BroadcastToLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// Execute Strategy on BroadcastTo layer
+ /// @param [in] strategy The input strategy for the layer
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
+ protected:
+ /// Constructor to create a BroadcastToLayer.
+ /// @param [in] param Parameters for the layer.
+ /// @param [in] name Optional name for the layer.
+ BroadcastToLayer(const BroadcastToDescriptor& param, const char* name);
+
+ /// Default destructor.
+ ~BroadcastToLayer() = default;
+ };
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp
index 0e67516193..abf4cde442 100644
--- a/src/armnn/optimizations/All.hpp
+++ b/src/armnn/optimizations/All.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -10,6 +10,7 @@
#include "ConvertConstDequantisationLayersToConstLayers.hpp"
#include "ConvertConstPermuteLayersToConstLayers.hpp"
#include "ConvertFp32NetworkToFp16.hpp"
+#include "DeleteBroadcastTo.hpp"
#include "FoldPadIntoLayer2d.hpp"
#include "FuseBatchNorm.hpp"
#include "MovePermuteUp.hpp"
@@ -21,4 +22,4 @@
#include "PermuteAndBatchToSpaceAsDepthToSpace.hpp"
#include "PermuteDepthwiseConv2dWeights.hpp"
#include "SquashEqualSiblings.hpp"
-#include "TransposeAsReshape.hpp" \ No newline at end of file
+#include "TransposeAsReshape.hpp"
diff --git a/src/armnn/optimizations/DeleteBroadcastTo.hpp b/src/armnn/optimizations/DeleteBroadcastTo.hpp
new file mode 100644
index 0000000000..9ea20907df
--- /dev/null
+++ b/src/armnn/optimizations/DeleteBroadcastTo.hpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "Optimization.hpp"
+
+namespace armnn
+{
+namespace optimizations
+{
+class DeleteBroadcastToImpl
+{
+public:
+ /// Run for every BroadcastToLayer. Remove it if it is before an ElementWiseLayer.
+ /// Since ElementWiseBinary uses a brodcastLoop, using a broadcastTo layer is
+ /// not necessary so it will be deleted.
+ void Run(Graph&, BroadcastToLayer& layer) const
+ {
+ if(layer.GetType() == LayerType::BroadcastTo)
+ {
+ Layer& next = layer.GetOutputSlot(0).GetConnection(0)->GetOwningLayer();
+ if (next.GetType() == LayerType::ElementwiseBinary)
+ {
+ Layer& connectedLayer = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOwningLayer();
+ layer.GetOutputSlot().MoveAllConnections(connectedLayer.GetOutputSlot());
+ }
+ }
+ }
+protected:
+ DeleteBroadcastToImpl() = default;
+ ~DeleteBroadcastToImpl() = default;
+};
+using BroadcastToOptimizationLayer = OptimizeForType<BroadcastToLayer, DeleteBroadcastToImpl>;
+}
+} \ No newline at end of file
diff --git a/src/armnn/test/optimizations/BroadcastToTests.cpp b/src/armnn/test/optimizations/BroadcastToTests.cpp
new file mode 100644
index 0000000000..69f2bb8860
--- /dev/null
+++ b/src/armnn/test/optimizations/BroadcastToTests.cpp
@@ -0,0 +1,151 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LayersFwd.hpp"
+
+#include <Network.hpp>
+#include <ResolveType.hpp>
+#include <armnn/INetwork.hpp>
+#include <TestUtils.hpp>
+#include <Optimizer.hpp>
+
+#include <doctest/doctest.h>
+
+TEST_SUITE("Optimizer")
+{
+ using namespace armnn;
+ using namespace armnn::optimizations;
+
+ TEST_CASE("DeleteBroadcastToAfterMulLayer")
+ {
+ Graph graph;
+ const unsigned int inputShape[] = {1, 3};
+ const unsigned int outputShape[] = {4, 3};
+
+ //rank of input is 1 and of output is 2
+ TensorInfo inputInfo(1, inputShape, DataType::Float32);
+ TensorInfo floorInfo(1, inputShape, DataType::Float32);
+ TensorInfo outputInfo(2, outputShape, DataType::Float32);
+
+ Layer* input = graph.AddLayer<InputLayer>(0, "input");
+ input->GetOutputSlot().SetTensorInfo(inputInfo);
+
+ FloorLayer* floorLayer = graph.AddLayer<FloorLayer>("floor");
+ floorLayer->GetOutputSlot().SetTensorInfo(inputInfo);
+
+ BroadcastToDescriptor broadcastToDescriptor({4, 3});
+ BroadcastToLayer* broadcastToLayer = graph.AddLayer<BroadcastToLayer>(broadcastToDescriptor, "broadcast_to");
+ broadcastToLayer->GetOutputSlot().SetTensorInfo(floorInfo);
+
+ ElementwiseBinaryDescriptor elementwiseBinaryDescriptor(BinaryOperation::Mul);
+ ElementwiseBinaryLayer* elementwiseBinaryLayer =
+ graph.AddLayer<ElementwiseBinaryLayer>(elementwiseBinaryDescriptor, "multiplication");
+ elementwiseBinaryLayer->GetOutputSlot().SetTensorInfo(outputInfo);
+
+ Layer* output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connect up layers - input -> floor -> broadcast_to -> multiplication -> output
+ input->GetOutputSlot().Connect(floorLayer->GetInputSlot(0));
+ floorLayer->GetOutputSlot().Connect(broadcastToLayer->GetInputSlot(0));
+ broadcastToLayer->GetOutputSlot().Connect(elementwiseBinaryLayer->GetInputSlot(0));
+ elementwiseBinaryLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
+
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<FloorLayer>,
+ &IsLayerOfType<BroadcastToLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<OutputLayer>));
+
+ Optimizer::Pass(graph, MakeOptimizations(BroadcastToOptimizationLayer()));
+
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<FloorLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<OutputLayer>));
+ }
+
+ TEST_CASE("DeleteBroadcastToNullptr")
+ {
+ Graph graph;
+ const unsigned int inputShape[] = {1, 3};
+ const unsigned int outputShape[] = {4, 3};
+
+ //rank of input is 1 and of output is 2
+ TensorInfo inputInfo(1, inputShape, DataType::Float32);
+ TensorInfo outputInfo(2, outputShape, DataType::Float32);
+
+ Layer* input = graph.AddLayer<InputLayer>(0, "input");
+ input->GetOutputSlot().SetTensorInfo(inputInfo);
+
+ ElementwiseBinaryDescriptor elementwiseBinaryDescriptor(BinaryOperation::Mul);
+ ElementwiseBinaryLayer* elementwiseBinaryLayer =
+ graph.AddLayer<ElementwiseBinaryLayer>(elementwiseBinaryDescriptor, "multiplication");
+ elementwiseBinaryLayer->GetOutputSlot().SetTensorInfo(outputInfo);
+
+ Layer* output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connect up layers - input -> broadcast_to -> multiplication -> output
+ input->GetOutputSlot().Connect(elementwiseBinaryLayer->GetInputSlot(0));
+ elementwiseBinaryLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
+
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<OutputLayer>));
+
+ Optimizer::Pass(graph, MakeOptimizations(BroadcastToOptimizationLayer()));
+
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<ElementwiseBinaryLayer>,
+ &IsLayerOfType<OutputLayer>));
+ }
+
+ TEST_CASE("DeleteBroadcastToNotElementWise")
+ {
+ Graph graph;
+ const unsigned int inputShape[] = {1, 3};
+ const unsigned int outputShape[] = {4, 3};
+
+ //rank of input is 1 and of output is 2
+ TensorInfo inputInfo(1, inputShape, DataType::Float32);
+ TensorInfo broadcastToInfo(2, outputShape, DataType::Float32);
+ TensorInfo outputInfo(2, outputShape, DataType::Float32);
+
+ Layer* input = graph.AddLayer<InputLayer>(0, "input");
+ input->GetOutputSlot().SetTensorInfo(inputInfo);
+
+ BroadcastToDescriptor broadcastToDescriptor({4, 3});
+ BroadcastToLayer* broadcastToLayer = graph.AddLayer<BroadcastToLayer>(broadcastToDescriptor, "broadcast_to");
+ broadcastToLayer->GetOutputSlot().SetTensorInfo(broadcastToInfo);
+
+ TileDescriptor tileDescriptor({2, 3});
+ TileLayer* tileLayer = graph.AddLayer<TileLayer>(tileDescriptor, "tile");
+ tileLayer->GetOutputSlot().SetTensorInfo(outputInfo);
+
+ Layer* output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connect up layers - input -> broadcast_to -> tile -> output
+ input->GetOutputSlot().Connect(broadcastToLayer->GetInputSlot(0));
+ broadcastToLayer->GetOutputSlot().Connect(tileLayer->GetInputSlot(0));
+ tileLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
+
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<BroadcastToLayer>,
+ &IsLayerOfType<TileLayer>,
+ &IsLayerOfType<OutputLayer>));
+
+ Optimizer::Pass(graph, MakeOptimizations(BroadcastToOptimizationLayer()));
+
+ CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+ &IsLayerOfType<InputLayer>,
+ &IsLayerOfType<BroadcastToLayer>,
+ &IsLayerOfType<TileLayer>,
+ &IsLayerOfType<OutputLayer>));
+ }
+}