38 class IWorkloadFactory;
46 : m_OwningLayer(owner)
47 , m_Connection(nullptr)
48 , m_SlotIndex(slotIndex)
64 if (m_Connection !=
nullptr && source !=
nullptr)
67 "but the latter already has a connection");
69 m_Connection = source;
83 const unsigned int m_SlotIndex;
90 : m_OwningLayer(owner)
91 , m_OutputHandler(outputHandler)
108 catch (
const std::exception& e)
113 std::cerr <<
"WARNING: An error has occurred when disconnecting all output slots: " 114 << e.what() << std::endl;
122 LayerGuid GetOwningLayerGuid()
const override;
133 bool ValidateTensorShape(
const TensorShape& shape)
const;
136 void DisconnectAll();
139 void MoveAllConnections(
OutputSlot& destination);
147 void SetTensorInfo(
const TensorInfo& tensorInfo)
override;
149 bool IsTensorInfoSet()
const override;
153 return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
158 return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot));
161 unsigned int CalculateIndexOnOwner()
const override;
168 void SetEdgeStrategy(
unsigned int connectionIndex,
EdgeStrategy strategy);
169 EdgeStrategy GetEdgeStrategyForConnection(
unsigned int connectionIdx)
const;
172 void ValidateConnectionIndex(
unsigned int index)
const;
174 Layer& m_OwningLayer;
176 std::vector<InputSlot*> m_Connections;
179 std::vector<EdgeStrategy> m_EdgeStrategies;
186 if (m_Connection !=
nullptr)
191 m_Connection->Disconnect(*
this);
193 catch (
const std::exception& e)
198 std::cerr <<
"WARNING: An error has occurred when disconnecting an input slot: " 199 << e.what() << std::endl;
219 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
const char* name);
220 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
DataLayout layout,
const char* name);
222 void ExecuteStrategy(
IStrategy& strategy)
const override;
232 return m_OutputHandlers[i];
237 return const_cast<OutputHandler&
>(
const_cast<const Layer*
>(
this)->GetOutputHandler(i));
242 const std::vector<InputSlot>&
GetInputSlots()
const {
return m_InputSlots; }
247 std::vector<InputSlot>::iterator
EndInputSlots() {
return m_InputSlots.end(); }
251 std::vector<OutputSlot>::iterator
EndOutputSlots() {
return m_OutputSlots.end(); }
256 unsigned int numConnections = 0;
258 for (
auto&& output : GetOutputSlots())
260 numConnections += output.GetNumConnections();
263 return (GetNumOutputSlots() > 0) && (numConnections == 0);
267 void ResetPriority()
const;
283 const bool IsMemoryManaged =
true);
289 void VerifyLayerConnections(
unsigned int expectedConnections,
const CheckLocation& location)
const;
291 virtual void ValidateTensorShapesFromInputs() = 0;
293 std::vector<TensorShape> InferOutputShapes(
const std::vector<TensorShape>& inputShapes)
const override;
300 virtual void ReleaseConstantData();
302 template<
typename Op>
305 for (
auto constant : GetConstantTensorsByRef())
316 const char*
GetName()
const override {
return m_LayerName.c_str(); }
318 unsigned int GetNumInputSlots()
const override {
return static_cast<unsigned int>(m_InputSlots.size()); }
319 unsigned int GetNumOutputSlots()
const override {
return static_cast<unsigned int>(m_OutputSlots.size()); }
333 virtual void Reparent(
Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
337 m_BackendHint = backend;
343 m_ShapeInferenceMethod = shapeInferenceMethod;
349 return std::static_pointer_cast<T>(m_AdditionalInfoObject);
354 m_AdditionalInfoObject = additionalInfo;
362 virtual ~
Layer() =
default;
364 template <
typename QueueDescriptor>
368 CollectWorkloadInputs(dataCollector);
371 template <
typename QueueDescriptor>
375 CollectWorkloadOutputs(dataCollector);
378 void ValidateAndCopyShape(
const TensorShape& outputShape,
381 const std::string& layerName,
382 const unsigned int outputSlotIndex = 0);
387 template <
typename QueueDescriptor>
391 CollectQueueDescriptorInputs(descriptor, info);
392 CollectQueueDescriptorOutputs(descriptor, info);
396 template <
typename LayerType,
typename ... Params>
418 const std::string m_LayerName;
420 std::vector<InputSlot> m_InputSlots;
421 std::vector<OutputSlot> m_OutputSlots;
429 mutable bool m_Visiting =
false;
433 std::list<std::string> m_RelatedLayerNames;
446 unsigned int numOutputSlots,
450 :
Layer(numInputSlots, numOutputSlots, type, name)
std::vector< InputSlot >::iterator EndInputSlots()
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
const std::vector< InputSlot * > & GetConnections() const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
Optional< BackendId > GetBackendHint() const
void AddRelatedLayerName(const std::string layerName)
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
LayerBindingId GetBindingId() const
void OperateOnConstantTensors(Op op)
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
OutputSlot & GetOutputSlot(unsigned int index=0) override
Get the output slot handle by slot index.
Layer & GetOwningLayer() const
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
ShapeInferenceMethod GetShapeInferenceMethod() const
unsigned int LayerPriority
void Disconnect(IInputSlot &slot) override
int Connect(IInputSlot &destination) override
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
const std::vector< InputSlot > & GetInputSlots() const
bool IsOutputUnconnected()
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Base class for all descriptors.
std::vector< InputSlot >::iterator BeginInputSlots()
std::shared_ptr< void > AdditionalInfoObjectPtr
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
std::vector< TensorInfo > m_InputTensorInfos
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
void SetGuid(LayerGuid guid)
AdditionalInfoObjectPtr m_AdditionalInfoObject
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
An output connection slot for a layer.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
std::vector< TensorInfo > m_OutputTensorInfos
InputSlot & GetInputSlot(unsigned int index) override
Get the input slot handle by slot index.
const BackendId & GetBackendId() const
std::vector< OutputHandler > m_OutputHandlers
const std::vector< OutputSlot > & GetOutputSlots() const
BindableLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name, LayerBindingId id)
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
const std::list< std::string > & GetRelatedLayerNames()
std::vector< OutputSlot >::iterator BeginOutputSlots()
std::vector< ITensorHandle * > m_Outputs
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
const OutputHandler & GetOutputHandler(unsigned int i=0) const
virtual const BaseDescriptor & GetParameters() const override
If the layer has a descriptor return it.
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
std::vector< OutputSlot >::iterator EndOutputSlots()
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
OutputHandler & GetOutputHandler()
OutputSlot(Layer &owner, OutputHandler &outputHandler)
const OutputHandler & GetOutputHandler() const
Contains information about TensorInfos of a layer.
const char * GetName() const override
Returns the name of the layer.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::vector< ITensorHandle * > m_Inputs
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
ShapeInferenceMethod m_ShapeInferenceMethod
OutputHandler & GetOutputHandler(unsigned int i=0)
std::shared_ptr< T > GetAdditionalInformation() const
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
LayerGuid GetGuid() const final
Returns the unique id of the layer.