aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp7
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/StandInLayer.cpp47
-rw-r--r--src/armnn/layers/StandInLayer.hpp56
-rw-r--r--src/armnnSerializer/Serializer.cpp7
-rw-r--r--src/armnnSerializer/Serializer.hpp4
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp16
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp41
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp21
12 files changed, 210 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index d7f932f699..36e7280e96 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -66,6 +66,7 @@ enum class LayerType
SpaceToDepth,
Splitter,
Stack,
+ StandIn,
StridedSlice,
Subtraction,
Switch,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 6c30749904..13bf900dca 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -58,6 +58,7 @@
#include "layers/SpaceToDepthLayer.hpp"
#include "layers/SplitterLayer.hpp"
#include "layers/StackLayer.hpp"
+#include "layers/StandInLayer.hpp"
#include "layers/StridedSliceLayer.hpp"
#include "layers/SubtractionLayer.hpp"
#include "layers/SwitchLayer.hpp"
@@ -142,6 +143,7 @@ DECLARE_LAYER(SpaceToBatchNd)
DECLARE_LAYER(SpaceToDepth)
DECLARE_LAYER(Splitter)
DECLARE_LAYER(Stack)
+DECLARE_LAYER(StandIn)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
DECLARE_LAYER(Switch)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 857f6b3959..1339a6ef6b 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1503,6 +1503,13 @@ IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor
return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
}
+
+IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
+ const char* name)
+{
+ return m_Graph->AddLayer<StandInLayer>(desc, name);
+}
+
IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c1d99a9010..49cf4dac09 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -222,6 +222,9 @@ public:
IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
const char* name = nullptr) override;
+ IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
new file mode 100644
index 0000000000..fdc905fea2
--- /dev/null
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -0,0 +1,47 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "StandInLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+namespace armnn
+{
+
+StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
+ : LayerWithParameters(param.m_NumInputs, 1, LayerType::StandIn, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+{
+ // This throws in the event that it's called. We would expect that any backend that
+ // "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
+ // during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
+ throw Exception("Stand in layer does not support creating workloads");
+}
+
+StandInLayer* StandInLayer::Clone(Graph& graph) const
+{
+ return CloneBase<StandInLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ throw Exception("Stand in layer does not support infering output shapes");
+}
+
+void StandInLayer::ValidateTensorShapesFromInputs()
+{
+ // Cannot validate this layer since no implementation details can be known by the framework
+ // so do nothing here.
+}
+
+void StandInLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitStandInLayer(this, GetParameters(), GetName());
+}
+} //namespace armnn
+
+
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
new file mode 100644
index 0000000000..9fe1773a27
--- /dev/null
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -0,0 +1,56 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents an unknown operation in the input graph.
+class StandInLayer : public LayerWithParameters<StandInDescriptor>
+{
+public:
+ /// Empty implementation explictly does NOT create a workload. Throws Exception if called.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return Does not return anything. Throws Exception if called.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ StandInLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// Does nothing since cannot validate any properties of this layer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// Empty implementation that throws Exception if called.
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return Does not return anything. Throws Exception if called.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Accepts a visitor object and calls VisitStandInLayer() method.
+ /// @param visitor The visitor on which to call VisitStandInLayer() method.
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a StandInLayer.
+ /// @param [in] param StandInDescriptor to configure the stand-in operation.
+ /// @param [in] name Optional name for the layer.
+ StandInLayer(const StandInDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~StandInLayer() = default;
+};
+
+} //namespace armnn
+
+
+
+
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 6122351812..d147d47f41 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1090,6 +1090,13 @@ void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
}
+void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer,
+ const armnn::StandInDescriptor& standInDescriptor,
+ const char *name)
+{
+ // TODO: IVGCVSW-4010 Implement serialization
+}
+
void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 79dc17ba01..1fd507af80 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -235,6 +235,10 @@ public:
const armnn::ViewsDescriptor& viewsDescriptor,
const char* name = nullptr) override;
+ void VisitStandInLayer(const armnn::IConnectableLayer* layer,
+ const armnn::StandInDescriptor& standInDescriptor,
+ const char* name = nullptr) override;
+
void VisitStackLayer(const armnn::IConnectableLayer* layer,
const armnn::StackDescriptor& stackDescriptor,
const char* name = nullptr) override;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 358106e5e9..9ffad7b8e2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -502,6 +502,22 @@ bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& in
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ if (reasonIfUnsupported)
+ {
+ std::stringstream message;
+ message << "StandIn layer is not executable via backends";
+
+ reasonIfUnsupported.value() = message.str();
+ }
+
+ return false;
+}
+
bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index d4c37c1a91..e99cb67614 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -312,6 +312,11 @@ public:
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 30dfa023f9..34e4cbe579 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -902,6 +902,47 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
break;
}
+ case LayerType::StandIn:
+ {
+ auto cLayer = boost::polymorphic_downcast<const StandInLayer*>(&layer);
+
+ // Get vector of all inputs.
+ auto getTensorInfoIn = [&dataType](const InputSlot& slot)
+ {
+ return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
+ };
+ auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
+ {
+ return OverrideDataType(slot.GetTensorInfo(), dataType);
+ };
+ auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
+ auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
+ std::vector<TensorInfo> inputs(beginI, endI);
+
+ auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
+ auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
+ std::vector<TensorInfo> outputs(beginO, endO);
+
+
+ auto getTensorInfoPtr = [](const TensorInfo& info)
+ {
+ return &info;
+ };
+ auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
+
+ auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
+ auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
+
+
+ result = layerSupportObject->IsStandInSupported(inputPtrs,
+ outputPtrs,
+ cLayer->GetParameters(),
+ reason);
+ break;
+ }
case LayerType::StridedSlice:
{
auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 9bddae9759..c52d6a9511 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -380,6 +380,25 @@ struct LayerTypePolicy<armnn::LayerType::name, DataType> \
// Use this version for layers whose constructor takes 2 parameters(descriptor and name).
#define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
+
+#define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
+template<armnn::DataType DataType> \
+struct LayerTypePolicy<armnn::LayerType::name, DataType> \
+{ \
+ using Type = armnn::name##Layer; \
+ using Desc = descType; \
+ constexpr static const char* NameStr = #name; \
+ \
+ static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
+ unsigned int nIn, unsigned int nOut) \
+ { \
+ return std::unique_ptr<armnn::IWorkload>(); \
+ } \
+};
+
+#define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
+#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
+
// Layer policy template.
template<armnn::LayerType Type, armnn::DataType DataType>
struct LayerTypePolicy;
@@ -489,6 +508,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Splitter)
DECLARE_LAYER_POLICY_2_PARAM(Stack)
+DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(StandIn)
+
DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
DECLARE_LAYER_POLICY_1_PARAM(Subtraction)