38 class IWorkloadFactory;
46 : m_OwningLayer(owner)
47 , m_Connection(nullptr)
48 , m_SlotIndex(slotIndex)
64 if (m_Connection !=
nullptr && source !=
nullptr)
67 "but the latter already has a connection");
69 m_Connection = source;
83 const unsigned int m_SlotIndex;
90 : m_OwningLayer(owner)
91 , m_OutputHandler(outputHandler)
108 catch (
const std::exception& e)
113 std::cerr <<
"WARNING: An error has occurred when disconnecting all output slots: " 114 << e.what() << std::endl;
122 LayerGuid GetOwningLayerGuid()
const override;
133 bool ValidateTensorShape(
const TensorShape& shape)
const;
136 void DisconnectAll();
139 void MoveAllConnections(
OutputSlot& destination);
147 void SetTensorInfo(
const TensorInfo& tensorInfo)
override;
149 bool IsTensorInfoSet()
const override;
153 return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
158 return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot));
161 unsigned int CalculateIndexOnOwner()
const override;
168 void SetEdgeStrategy(
unsigned int connectionIndex,
EdgeStrategy strategy);
169 EdgeStrategy GetEdgeStrategyForConnection(
unsigned int connectionIdx)
const;
172 void ValidateConnectionIndex(
unsigned int index)
const;
174 Layer& m_OwningLayer;
176 std::vector<InputSlot*> m_Connections;
179 std::vector<EdgeStrategy> m_EdgeStrategies;
186 if (m_Connection !=
nullptr)
191 m_Connection->Disconnect(*
this);
193 catch (
const std::exception& e)
198 std::cerr <<
"WARNING: An error has occurred when disconnecting an input slot: " 199 << e.what() << std::endl;
219 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
const char* name);
220 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
DataLayout layout,
const char* name);
222 void ExecuteStrategy(
IStrategy& strategy)
const override;
232 return m_OutputHandlers[i];
237 return const_cast<OutputHandler&
>(
const_cast<const Layer*
>(
this)->GetOutputHandler(i));
243 const std::vector<InputSlot>&
GetInputSlots()
const {
return m_InputSlots; }
248 std::vector<InputSlot>::iterator
EndInputSlots() {
return m_InputSlots.end(); }
252 std::vector<OutputSlot>::iterator
EndOutputSlots() {
return m_OutputSlots.end(); }
257 unsigned int numConnections = 0;
259 for (
auto&& output : GetOutputSlots())
261 numConnections += output.GetNumConnections();
264 return (GetNumOutputSlots() > 0) && (numConnections == 0);
268 void ResetPriority()
const;
284 const bool IsMemoryManaged =
true);
290 void VerifyLayerConnections(
unsigned int expectedConnections,
const CheckLocation& location)
const;
292 virtual void ValidateTensorShapesFromInputs() = 0;
294 std::vector<TensorShape> InferOutputShapes(
const std::vector<TensorShape>& inputShapes)
const override;
301 virtual void ReleaseConstantData();
303 template<
typename Op>
306 for (
auto constant : GetConstantTensorsByRef())
317 const char*
GetName()
const override {
return m_LayerName.c_str(); }
319 unsigned int GetNumInputSlots()
const override {
return static_cast<unsigned int>(m_InputSlots.size()); }
320 unsigned int GetNumOutputSlots()
const override {
return static_cast<unsigned int>(m_OutputSlots.size()); }
334 virtual void Reparent(
Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
338 m_BackendHint = backend;
344 m_ShapeInferenceMethod = shapeInferenceMethod;
349 m_AllowExpandedDims = allowExpandedDims;
355 return std::static_pointer_cast<T>(m_AdditionalInfoObject);
360 m_AdditionalInfoObject = additionalInfo;
368 virtual ~
Layer() =
default;
370 template <
typename QueueDescriptor>
374 CollectWorkloadInputs(dataCollector);
377 template <
typename QueueDescriptor>
381 CollectWorkloadOutputs(dataCollector);
384 void ValidateAndCopyShape(
const TensorShape& outputShape,
387 const std::string& layerName,
388 const unsigned int outputSlotIndex = 0);
393 template <
typename QueueDescriptor>
397 CollectQueueDescriptorInputs(descriptor, info);
398 CollectQueueDescriptorOutputs(descriptor, info);
402 template <
typename LayerType,
typename ... Params>
424 const std::string m_LayerName;
426 std::vector<InputSlot> m_InputSlots;
427 std::vector<OutputSlot> m_OutputSlots;
435 mutable bool m_Visiting =
false;
437 bool m_AllowExpandedDims =
false;
441 std::list<std::string> m_RelatedLayerNames;
454 unsigned int numOutputSlots,
458 :
Layer(numInputSlots, numOutputSlots, type, name)
std::vector< InputSlot >::iterator EndInputSlots()
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
const std::vector< InputSlot * > & GetConnections() const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
Optional< BackendId > GetBackendHint() const
void AddRelatedLayerName(const std::string layerName)
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
void SetAllowExpandedDims(bool allowExpandedDims)
void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
LayerBindingId GetBindingId() const
void OperateOnConstantTensors(Op op)
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
OutputSlot & GetOutputSlot(unsigned int index=0) override
Get the output slot handle by slot index.
Layer & GetOwningLayer() const
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
ShapeInferenceMethod GetShapeInferenceMethod() const
unsigned int LayerPriority
void Disconnect(IInputSlot &slot) override
int Connect(IInputSlot &destination) override
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
const std::vector< InputSlot > & GetInputSlots() const
bool IsOutputUnconnected()
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Base class for all descriptors.
std::vector< InputSlot >::iterator BeginInputSlots()
std::shared_ptr< void > AdditionalInfoObjectPtr
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
std::vector< TensorInfo > m_InputTensorInfos
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
void SetGuid(LayerGuid guid)
AdditionalInfoObjectPtr m_AdditionalInfoObject
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
An output connection slot for a layer.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
std::vector< TensorInfo > m_OutputTensorInfos
InputSlot & GetInputSlot(unsigned int index) override
Get the input slot handle by slot index.
const BackendId & GetBackendId() const
std::vector< OutputHandler > m_OutputHandlers
const std::vector< OutputSlot > & GetOutputSlots() const
BindableLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name, LayerBindingId id)
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
const std::list< std::string > & GetRelatedLayerNames()
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
std::vector< OutputSlot >::iterator BeginOutputSlots()
std::vector< ITensorHandle * > m_Outputs
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
const OutputHandler & GetOutputHandler(unsigned int i=0) const
virtual const BaseDescriptor & GetParameters() const override
If the layer has a descriptor return it.
bool GetAllowExpandedDims() const
std::vector< OutputSlot >::iterator EndOutputSlots()
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
OutputHandler & GetOutputHandler()
OutputSlot(Layer &owner, OutputHandler &outputHandler)
const OutputHandler & GetOutputHandler() const
Contains information about TensorInfos of a layer.
const char * GetName() const override
Returns the name of the layer.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::vector< ITensorHandle * > m_Inputs
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
ShapeInferenceMethod m_ShapeInferenceMethod
OutputHandler & GetOutputHandler(unsigned int i=0)
std::shared_ptr< T > GetAdditionalInformation() const
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
LayerGuid GetGuid() const final
Returns the unique id of the layer.