aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-08-17 18:44:58 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2023-08-28 12:37:25 +0100
commit9145e38edf49fa4862008c163c34590141eecb14 (patch)
tree64706ef579f548b804d5b674b33f6b239c638d0f /src/armnn
parente40cc8359b02a7786908294300c45b672cf6b0e4 (diff)
downloadarmnn-9145e38edf49fa4862008c163c34590141eecb14.tar.gz
IVGCVSW-7505 Create FusedLayer and NeonFusedWorkload for AddMulAdd Neon kernel
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ic778d35b001474b44fb1e433a6fe276e4ec9f565
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/BackendHelper.cpp24
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp12
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/SerializeLayerParameters.cpp9
-rw-r--r--src/armnn/SerializeLayerParameters.hpp5
-rw-r--r--src/armnn/layers/FusedLayer.cpp48
-rw-r--r--src/armnn/layers/FusedLayer.hpp38
8 files changed, 141 insertions, 0 deletions
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index f025193006..fc7a2fab83 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -748,6 +748,30 @@ bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
reasonIfUnsupported);
}
+bool LayerSupportHandle::IsFusedSupported(const std::vector<std::reference_wrapper<TensorInfo>>& inputs,
+ const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+ const FusedDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported)
+{
+ TensorInfos infos;
+ infos.reserve(inputs.size() + outputs.size());
+ for (TensorInfo inInfo : inputs)
+ {
+ infos.emplace_back(inInfo);
+ }
+ for (TensorInfo outInfo : outputs)
+ {
+ infos.emplace_back(outInfo);
+ }
+
+ return m_LayerSupport->IsLayerSupported(LayerType::Fused,
+ infos,
+ descriptor,
+ EmptyOptional(),
+ EmptyOptional(),
+ reasonIfUnsupported);
+}
+
bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 743b8d7205..f83b710134 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -33,6 +33,7 @@
#include "layers/FillLayer.hpp"
#include "layers/FloorLayer.hpp"
#include "layers/FullyConnectedLayer.hpp"
+#include "layers/FusedLayer.hpp"
#include "layers/GatherLayer.hpp"
#include "layers/GatherNdLayer.hpp"
#include "layers/InputLayer.hpp"
@@ -136,6 +137,7 @@ DECLARE_LAYER(FakeQuantization)
DECLARE_LAYER(Fill)
DECLARE_LAYER(Floor)
DECLARE_LAYER(FullyConnected)
+DECLARE_LAYER(Fused)
DECLARE_LAYER(Gather)
DECLARE_LAYER(GatherNd)
DECLARE_LAYER(Input)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 41111476da..7f4ef6b1b6 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -335,6 +335,12 @@ IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescript
return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, name);
}
+IConnectableLayer* INetwork::AddFusedLayer(const FusedDescriptor& fusedDescriptor,
+ const char* name)
+{
+ return pNetworkImpl->AddFusedLayer(fusedDescriptor, name);
+}
+
IConnectableLayer* INetwork::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
const char* name)
{
@@ -2195,6 +2201,12 @@ IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescr
return m_Graph->AddLayer<FullyConnectedLayer>(fullyConnectedDescriptor, name);
}
+IConnectableLayer* NetworkImpl::AddFusedLayer(const FusedDescriptor& fusedDescriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<FusedLayer>(fusedDescriptor, name);
+}
+
IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index 34549248bc..5a3570d825 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -113,6 +113,9 @@ public:
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const char* name = nullptr);
+ IConnectableLayer* AddFusedLayer(const FusedDescriptor& fusedDescriptor,
+ const char* name = nullptr);
+
IConnectableLayer* AddGatherLayer(const GatherDescriptor& gatherDescriptor,
const char* name = nullptr);
diff --git a/src/armnn/SerializeLayerParameters.cpp b/src/armnn/SerializeLayerParameters.cpp
index d65a7d55fa..cc59e1fad3 100644
--- a/src/armnn/SerializeLayerParameters.cpp
+++ b/src/armnn/SerializeLayerParameters.cpp
@@ -325,6 +325,15 @@ void StringifyLayerParameters<PreCompiledDescriptor>::Serialize(ParameterStringi
fn("NumOutputSlots", std::to_string(desc.m_NumOutputSlots));
}
+void StringifyLayerParameters<FusedDescriptor>::Serialize(ParameterStringifyFunction& fn,
+ const FusedDescriptor& desc)
+{
+ fn("NumInputSlots", std::to_string(desc.m_NumInputSlots));
+ fn("NumOutputSlots", std::to_string(desc.m_NumOutputSlots));
+ fn("PaddingMode", GetFusedTypeAsCString(desc.m_FusedKernelType));
+
+}
+
void StringifyLayerParameters<Pooling2dDescriptor>::Serialize(ParameterStringifyFunction& fn,
const Pooling2dDescriptor& desc)
{
diff --git a/src/armnn/SerializeLayerParameters.hpp b/src/armnn/SerializeLayerParameters.hpp
index 5b0378eab7..34a2986534 100644
--- a/src/armnn/SerializeLayerParameters.hpp
+++ b/src/armnn/SerializeLayerParameters.hpp
@@ -149,6 +149,11 @@ template <> struct StringifyLayerParameters<PreCompiledDescriptor>
static void Serialize(ParameterStringifyFunction& fn, const PreCompiledDescriptor& desc);
};
+template <> struct StringifyLayerParameters<FusedDescriptor>
+{
+ static void Serialize(ParameterStringifyFunction& fn, const FusedDescriptor& desc);
+};
+
template <> struct StringifyLayerParameters<ReduceDescriptor>
{
static void Serialize(ParameterStringifyFunction& fn, const ReduceDescriptor& desc);
diff --git a/src/armnn/layers/FusedLayer.cpp b/src/armnn/layers/FusedLayer.cpp
new file mode 100644
index 0000000000..37b1835450
--- /dev/null
+++ b/src/armnn/layers/FusedLayer.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "FusedLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/backends/Workload.hpp>
+#include <armnn/TypesUtils.hpp>
+
+namespace armnn
+{
+
+FusedLayer::FusedLayer(const FusedDescriptor& param, const char* name)
+ : LayerWithParameters(param.m_NumInputSlots, param.m_NumOutputSlots, LayerType::Fused, param, name)
+{}
+
+FusedLayer::~FusedLayer()
+{}
+
+FusedLayer* FusedLayer::Clone(Graph& graph) const
+{
+ FusedLayer* clonedLayer = CloneBase<FusedLayer>(graph, m_Param, GetName());
+ clonedLayer->m_AdditionalInfoObject = const_cast<FusedLayer*>(this)->m_AdditionalInfoObject;
+ return clonedLayer;
+}
+
+std::unique_ptr<IWorkload> FusedLayer::CreateWorkload(const armnn::IWorkloadFactory& factory) const
+{
+ FusedQueueDescriptor descriptor;
+ SetAdditionalInfo(descriptor);
+
+ return factory.CreateWorkload(LayerType::Fused, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+void FusedLayer::ValidateTensorShapesFromInputs()
+{
+ // NOTE: since the FusedLayer is an internal layer created from a valid SubgraphView,
+ // we do not need to validate its input shapes
+}
+
+void FusedLayer::ExecuteStrategy(IStrategy& strategy) const
+{
+ strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/FusedLayer.hpp b/src/armnn/layers/FusedLayer.hpp
new file mode 100644
index 0000000000..e26a379707
--- /dev/null
+++ b/src/armnn/layers/FusedLayer.hpp
@@ -0,0 +1,38 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+#include <armnn/backends/WorkloadFactory.hpp>
+
+#include <armnn/Descriptors.hpp>
+
+#include <memory>
+#include <functional>
+
+namespace armnn
+{
+
+class FusedLayer : public LayerWithParameters<FusedDescriptor>
+{
+public:
+ FusedLayer(const FusedDescriptor& param, const char* name);
+ ~FusedLayer();
+
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ FusedLayer* Clone(Graph &graph) const override;
+
+ void ValidateTensorShapesFromInputs() override;
+
+ void ExecuteStrategy(IStrategy& strategy) const override;
+
+private:
+ FusedLayer(const FusedLayer& other) = delete;
+ FusedLayer& operator=(const FusedLayer& other) = delete;
+};
+
+} // namespace armnn