aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-10-21 10:46:16 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-10-21 12:59:23 +0000
commit013c390c2d9829fede2d8b1d59c3f2a497730462 (patch)
tree7918e1e4e3445a741695772864d200b3979dd1f8
parente80ebd101b516751a798aa1b1d669e9117a32266 (diff)
downloadarmnn-013c390c2d9829fede2d8b1d59c3f2a497730462.tar.gz
IVGCVSW-4009 StandInLayer frontend API
Change-Id: I058c57b554769799c6775813215070ef47790e3d Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/Descriptors.hpp16
-rw-r--r--include/armnn/DescriptorsFwd.hpp1
-rw-r--r--include/armnn/ILayerSupport.hpp6
-rw-r--r--include/armnn/ILayerVisitor.hpp8
-rw-r--r--include/armnn/INetwork.hpp10
-rw-r--r--include/armnn/LayerVisitorBase.hpp4
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp7
-rw-r--r--src/armnn/Network.hpp3
-rw-r--r--src/armnn/layers/StandInLayer.cpp47
-rw-r--r--src/armnn/layers/StandInLayer.hpp56
-rw-r--r--src/armnnSerializer/Serializer.cpp7
-rw-r--r--src/armnnSerializer/Serializer.hpp4
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp16
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp41
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp21
20 files changed, 258 insertions, 0 deletions
diff --git a/Android.mk b/Android.mk
index 02941230f6..29ca0830c4 100644
--- a/Android.mk
+++ b/Android.mk
@@ -165,6 +165,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/SpaceToDepthLayer.cpp \
src/armnn/layers/SplitterLayer.cpp \
src/armnn/layers/StackLayer.cpp \
+ src/armnn/layers/StandInLayer.cpp \
src/armnn/layers/StridedSliceLayer.cpp \
src/armnn/layers/SubtractionLayer.cpp \
src/armnn/layers/SwitchLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 626478a0e9..ed683da0bc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -344,6 +344,8 @@ list(APPEND armnn_sources
src/armnn/layers/SplitterLayer.cpp
src/armnn/layers/StackLayer.hpp
src/armnn/layers/StackLayer.cpp
+ src/armnn/layers/StandInLayer.cpp
+ src/armnn/layers/StandInLayer.hpp
src/armnn/layers/StridedSliceLayer.cpp
src/armnn/layers/StridedSliceLayer.hpp
src/armnn/layers/SubtractionLayer.cpp
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 10d8ab7a08..425c5261df 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -952,6 +952,22 @@ struct StackDescriptor
TensorShape m_InputShape;
};
+/// A StandInDescriptor for the StandIn layer
+struct StandInDescriptor
+{
+ StandInDescriptor() {};
+
+ StandInDescriptor(uint32_t numInputs, uint32_t numOutputs)
+ : m_NumInputs(numInputs)
+ , m_NumOutputs(numOutputs)
+ {}
+
+ /// Number of input tensors
+ uint32_t m_NumInputs = 0;
+ /// Number of output tensors
+ uint32_t m_NumOutputs = 0;
+};
+
/// A StridedSliceDescriptor for the StridedSliceLayer.
struct StridedSliceDescriptor
{
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index a978f7739a..cfdef8a030 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -36,6 +36,7 @@ struct SpaceToBatchNdDescriptor;
struct SpaceToDepthDescriptor;
struct SliceDescriptor;
struct StackDescriptor;
+struct StandInDescriptor;
struct StridedSliceDescriptor;
struct TransposeConvolution2dDescriptor;
struct ViewsDescriptor;
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 87197eed5a..54f4a2883b 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -328,6 +328,12 @@ public:
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
+
virtual bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index 80931ebcc0..9669b3a7cb 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -443,6 +443,14 @@ public:
const StackDescriptor& stackDescriptor,
const char* name = nullptr) = 0;
+ /// Function a StandInLayer should call back to when its Accept(ILaterVisitor&) function is invoked
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param standInDescriptor - Parameters for the stand-in layer.
+ /// @param name - Optional name for the layer.
+ virtual void VisitStandInLayer(const IConnectableLayer* layer,
+ const StandInDescriptor& standInDescriptor,
+ const char* name = nullptr) = 0;
+
/// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param stridedSliceDescriptor - Parameters for the strided slice operation.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index b3fab82cb4..10414923e8 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -507,6 +507,16 @@ public:
virtual IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
const char* name = nullptr) = 0;
+
+ /// Add a stand-in layer for a type unknown to the Arm NN framework.
+ /// Note: Due to the nature of this layer, no validation can be performed by the framework.
+ /// Furthermore, Any model containing this layer cannot make use of dynamic tensors since the
+ /// tensor sizes cannot be inferred.
+ /// @descriptor - Descriptor for the StandIn layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
+ const char* name = nullptr) = 0;
+
/// Add a QuantizedLstm layer to the network
/// @param params - The weights and biases for the Quantized LSTM cell
/// @param name - Optional name for the layer
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 5226fa2f66..388fc6f922 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -222,6 +222,10 @@ public:
const StackDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
+ void VisitStandInLayer(const IConnectableLayer*,
+ const StandInDescriptor&,
+ const char*) override { DefaultPolicy::Apply(__func__); }
+
void VisitStridedSliceLayer(const IConnectableLayer*,
const StridedSliceDescriptor&,
const char*) override { DefaultPolicy::Apply(__func__); }
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index d7f932f699..36e7280e96 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -66,6 +66,7 @@ enum class LayerType
SpaceToDepth,
Splitter,
Stack,
+ StandIn,
StridedSlice,
Subtraction,
Switch,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 6c30749904..13bf900dca 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -58,6 +58,7 @@
#include "layers/SpaceToDepthLayer.hpp"
#include "layers/SplitterLayer.hpp"
#include "layers/StackLayer.hpp"
+#include "layers/StandInLayer.hpp"
#include "layers/StridedSliceLayer.hpp"
#include "layers/SubtractionLayer.hpp"
#include "layers/SwitchLayer.hpp"
@@ -142,6 +143,7 @@ DECLARE_LAYER(SpaceToBatchNd)
DECLARE_LAYER(SpaceToDepth)
DECLARE_LAYER(Splitter)
DECLARE_LAYER(Stack)
+DECLARE_LAYER(StandIn)
DECLARE_LAYER(StridedSlice)
DECLARE_LAYER(Subtraction)
DECLARE_LAYER(Switch)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 857f6b3959..1339a6ef6b 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1503,6 +1503,13 @@ IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor
return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
}
+
+IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
+ const char* name)
+{
+ return m_Graph->AddLayer<StandInLayer>(desc, name);
+}
+
IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index c1d99a9010..49cf4dac09 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -222,6 +222,9 @@ public:
IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
const char* name = nullptr) override;
+ IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
new file mode 100644
index 0000000000..fdc905fea2
--- /dev/null
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -0,0 +1,47 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "StandInLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+namespace armnn
+{
+
+StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
+ : LayerWithParameters(param.m_NumInputs, 1, LayerType::StandIn, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+{
+ // This throws in the event that it's called. We would expect that any backend that
+ // "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
+ // during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
+ throw Exception("Stand in layer does not support creating workloads");
+}
+
+StandInLayer* StandInLayer::Clone(Graph& graph) const
+{
+ return CloneBase<StandInLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ throw Exception("Stand in layer does not support infering output shapes");
+}
+
+void StandInLayer::ValidateTensorShapesFromInputs()
+{
+ // Cannot validate this layer since no implementation details can be known by the framework
+ // so do nothing here.
+}
+
+void StandInLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitStandInLayer(this, GetParameters(), GetName());
+}
+} //namespace armnn
+
+
diff --git a/src/armnn/layers/StandInLayer.hpp b/src/armnn/layers/StandInLayer.hpp
new file mode 100644
index 0000000000..9fe1773a27
--- /dev/null
+++ b/src/armnn/layers/StandInLayer.hpp
@@ -0,0 +1,56 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents an unknown operation in the input graph.
+class StandInLayer : public LayerWithParameters<StandInDescriptor>
+{
+public:
+ /// Empty implementation explictly does NOT create a workload. Throws Exception if called.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return Does not return anything. Throws Exception if called.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ StandInLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// Does nothing since cannot validate any properties of this layer.
+ void ValidateTensorShapesFromInputs() override;
+
+ /// Empty implementation that throws Exception if called.
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return Does not return anything. Throws Exception if called.
+ std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+ /// Accepts a visitor object and calls VisitStandInLayer() method.
+ /// @param visitor The visitor on which to call VisitStandInLayer() method.
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a StandInLayer.
+ /// @param [in] param StandInDescriptor to configure the stand-in operation.
+ /// @param [in] name Optional name for the layer.
+ StandInLayer(const StandInDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~StandInLayer() = default;
+};
+
+} //namespace armnn
+
+
+
+
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 6122351812..d147d47f41 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1090,6 +1090,13 @@ void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
}
+void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer,
+ const armnn::StandInDescriptor& standInDescriptor,
+ const char *name)
+{
+ // TODO: IVGCVSW-4010 Implement serialization
+}
+
void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index 79dc17ba01..1fd507af80 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -235,6 +235,10 @@ public:
const armnn::ViewsDescriptor& viewsDescriptor,
const char* name = nullptr) override;
+ void VisitStandInLayer(const armnn::IConnectableLayer* layer,
+ const armnn::StandInDescriptor& standInDescriptor,
+ const char* name = nullptr) override;
+
void VisitStackLayer(const armnn::IConnectableLayer* layer,
const armnn::StackDescriptor& stackDescriptor,
const char* name = nullptr) override;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 358106e5e9..9ffad7b8e2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -502,6 +502,22 @@ bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& in
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ if (reasonIfUnsupported)
+ {
+ std::stringstream message;
+ message << "StandIn layer is not executable via backends";
+
+ reasonIfUnsupported.value() = message.str();
+ }
+
+ return false;
+}
+
bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index d4c37c1a91..e99cb67614 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -312,6 +312,11 @@ public:
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
+ const std::vector<const TensorInfo*>& outputs,
+ const StandInDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 30dfa023f9..34e4cbe579 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -902,6 +902,47 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
break;
}
+ case LayerType::StandIn:
+ {
+ auto cLayer = boost::polymorphic_downcast<const StandInLayer*>(&layer);
+
+ // Get vector of all inputs.
+ auto getTensorInfoIn = [&dataType](const InputSlot& slot)
+ {
+ return OverrideDataType(slot.GetConnectedOutputSlot()->GetTensorInfo(), dataType);
+ };
+ auto getTensorInfoOut = [&dataType](const OutputSlot& slot)
+ {
+ return OverrideDataType(slot.GetTensorInfo(), dataType);
+ };
+ auto beginI = boost::make_transform_iterator(layer.GetInputSlots().begin(), getTensorInfoIn);
+ auto endI = boost::make_transform_iterator(layer.GetInputSlots().end(), getTensorInfoIn);
+ std::vector<TensorInfo> inputs(beginI, endI);
+
+ auto beginO = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfoOut);
+ auto endO = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfoOut);
+ std::vector<TensorInfo> outputs(beginO, endO);
+
+
+ auto getTensorInfoPtr = [](const TensorInfo& info)
+ {
+ return &info;
+ };
+ auto beginPtrI = boost::make_transform_iterator(inputs.begin(), getTensorInfoPtr);
+ auto endPtrI = boost::make_transform_iterator(inputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> inputPtrs(beginPtrI, endPtrI);
+
+ auto beginPtrO = boost::make_transform_iterator(outputs.begin(), getTensorInfoPtr);
+ auto endPtrO = boost::make_transform_iterator(outputs.end(), getTensorInfoPtr);
+ std::vector<const TensorInfo*> outputPtrs(beginPtrO, endPtrO);
+
+
+ result = layerSupportObject->IsStandInSupported(inputPtrs,
+ outputPtrs,
+ cLayer->GetParameters(),
+ reason);
+ break;
+ }
case LayerType::StridedSlice:
{
auto cLayer = boost::polymorphic_downcast<const StridedSliceLayer*>(&layer);
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 9bddae9759..c52d6a9511 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -380,6 +380,25 @@ struct LayerTypePolicy<armnn::LayerType::name, DataType> \
// Use this version for layers whose constructor takes 2 parameters(descriptor and name).
#define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
+
+#define DECLARE_LAYER_POLICY_EXCEPTION(name, descType) \
+template<armnn::DataType DataType> \
+struct LayerTypePolicy<armnn::LayerType::name, DataType> \
+{ \
+ using Type = armnn::name##Layer; \
+ using Desc = descType; \
+ constexpr static const char* NameStr = #name; \
+ \
+ static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
+ unsigned int nIn, unsigned int nOut) \
+ { \
+ return std::unique_ptr<armnn::IWorkload>(); \
+ } \
+};
+
+#define DECLARE_LAYER_POLICY_EXCEPTION_1_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, void)
+#define DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(name) DECLARE_LAYER_POLICY_EXCEPTION(name, armnn::name##Descriptor)
+
// Layer policy template.
template<armnn::LayerType Type, armnn::DataType DataType>
struct LayerTypePolicy;
@@ -489,6 +508,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Splitter)
DECLARE_LAYER_POLICY_2_PARAM(Stack)
+DECLARE_LAYER_POLICY_EXCEPTION_2_PARAM(StandIn)
+
DECLARE_LAYER_POLICY_2_PARAM(StridedSlice)
DECLARE_LAYER_POLICY_1_PARAM(Subtraction)