aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/StandInLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/layers/StandInLayer.cpp')
-rw-r--r--src/armnn/layers/StandInLayer.cpp47
1 files changed, 47 insertions, 0 deletions
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
new file mode 100644
index 0000000000..fdc905fea2
--- /dev/null
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -0,0 +1,47 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "StandInLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+namespace armnn
+{
+
+StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
+ : LayerWithParameters(param.m_NumInputs, 1, LayerType::StandIn, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
+{
+ // This throws in the event that it's called. We would expect that any backend that
+ // "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
+ // during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
+ throw Exception("Stand in layer does not support creating workloads");
+}
+
+StandInLayer* StandInLayer::Clone(Graph& graph) const
+{
+ return CloneBase<StandInLayer>(graph, m_Param, GetName());
+}
+
+std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+ throw Exception("Stand in layer does not support infering output shapes");
+}
+
+void StandInLayer::ValidateTensorShapesFromInputs()
+{
+ // Cannot validate this layer since no implementation details can be known by the framework
+ // so do nothing here.
+}
+
+void StandInLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitStandInLayer(this, GetParameters(), GetName());
+}
+} //namespace armnn
+
+