37 class IWorkloadFactory;
45 : m_OwningLayer(owner)
46 , m_Connection(nullptr)
47 , m_SlotIndex(slotIndex)
61 if (m_Connection !=
nullptr && source !=
nullptr)
64 "but the latter already has a connection");
66 m_Connection = source;
80 const unsigned int m_SlotIndex;
87 : m_OwningLayer(owner)
88 , m_OutputHandler(outputHandler)
105 catch (
const std::exception& e)
110 std::cerr <<
"WARNING: An error has occurred when disconnecting all output slots: " 111 << e.what() << std::endl;
117 LayerGuid GetOwningLayerGuid()
const override;
128 bool ValidateTensorShape(
const TensorShape& shape)
const;
131 void DisconnectAll();
134 void MoveAllConnections(
OutputSlot& destination);
142 void SetTensorInfo(
const TensorInfo& tensorInfo)
override;
144 bool IsTensorInfoSet()
const override;
148 return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
153 return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot));
156 unsigned int CalculateIndexOnOwner()
const override;
163 void SetEdgeStrategy(
unsigned int connectionIndex,
EdgeStrategy strategy);
164 EdgeStrategy GetEdgeStrategyForConnection(
unsigned int connectionIdx)
const;
167 void ValidateConnectionIndex(
unsigned int index)
const;
169 Layer& m_OwningLayer;
171 std::vector<InputSlot*> m_Connections;
174 std::vector<EdgeStrategy> m_EdgeStrategies;
181 if (m_Connection !=
nullptr)
186 m_Connection->Disconnect(*
this);
188 catch (
const std::exception& e)
193 std::cerr <<
"WARNING: An error has occurred when disconnecting an input slot: " 194 << e.what() << std::endl;
214 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
const char* name);
215 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
DataLayout layout,
const char* name);
217 void ExecuteStrategy(
IStrategy& strategy)
const override;
227 return m_OutputHandlers[i];
232 return const_cast<OutputHandler&
>(
const_cast<const Layer*
>(
this)->GetOutputHandler(i));
237 const std::vector<InputSlot>&
GetInputSlots()
const {
return m_InputSlots; }
242 std::vector<InputSlot>::iterator
EndInputSlots() {
return m_InputSlots.end(); }
246 std::vector<OutputSlot>::iterator
EndOutputSlots() {
return m_OutputSlots.end(); }
251 unsigned int numConnections = 0;
253 for (
auto&& output : GetOutputSlots())
255 numConnections += output.GetNumConnections();
258 return (GetNumOutputSlots() > 0) && (numConnections == 0);
262 void ResetPriority()
const;
278 const bool IsMemoryManaged =
true);
284 void VerifyLayerConnections(
unsigned int expectedConnections,
const CheckLocation& location)
const;
286 virtual void ValidateTensorShapesFromInputs() = 0;
288 std::vector<TensorShape> InferOutputShapes(
const std::vector<TensorShape>& inputShapes)
const override;
295 virtual void ReleaseConstantData();
297 template<
typename Op>
300 for (
auto constant : GetConstantTensorsByRef())
311 const char*
GetName()
const override {
return m_LayerName.c_str(); }
313 unsigned int GetNumInputSlots()
const override {
return static_cast<unsigned int>(m_InputSlots.size()); }
314 unsigned int GetNumOutputSlots()
const override {
return static_cast<unsigned int>(m_OutputSlots.size()); }
328 virtual void Reparent(
Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
332 m_BackendHint = backend;
338 m_ShapeInferenceMethod = shapeInferenceMethod;
344 return std::static_pointer_cast<T>(m_AdditionalInfoObject);
349 m_AdditionalInfoObject = additionalInfo;
355 virtual ~
Layer() =
default;
357 template <
typename QueueDescriptor>
361 CollectWorkloadInputs(dataCollector);
364 template <
typename QueueDescriptor>
368 CollectWorkloadOutputs(dataCollector);
371 void ValidateAndCopyShape(
const TensorShape& outputShape,
374 const std::string& layerName,
375 const unsigned int outputSlotIndex = 0);
380 template <
typename QueueDescriptor>
384 CollectQueueDescriptorInputs(descriptor, info);
385 CollectQueueDescriptorOutputs(descriptor, info);
389 template <
typename LayerType,
typename ... Params>
393 using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle>>>;
411 const std::string m_LayerName;
413 std::vector<InputSlot> m_InputSlots;
414 std::vector<OutputSlot> m_OutputSlots;
422 mutable bool m_Visiting =
false;
426 std::list<std::string> m_RelatedLayerNames;
435 unsigned int numOutputSlots,
439 :
Layer(numInputSlots, numOutputSlots, type, name)
std::vector< InputSlot >::iterator EndInputSlots()
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
const std::vector< InputSlot * > & GetConnections() const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
Optional< BackendId > GetBackendHint() const
void AddRelatedLayerName(const std::string layerName)
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
LayerBindingId GetBindingId() const
void OperateOnConstantTensors(Op op)
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
OutputSlot & GetOutputSlot(unsigned int index=0) override
Get the output slot handle by slot index.
Layer & GetOwningLayer() const
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
ShapeInferenceMethod GetShapeInferenceMethod() const
unsigned int LayerPriority
void Disconnect(IInputSlot &slot) override
int Connect(IInputSlot &destination) override
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
const std::vector< InputSlot > & GetInputSlots() const
bool IsOutputUnconnected()
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Base class for all descriptors.
std::vector< InputSlot >::iterator BeginInputSlots()
std::shared_ptr< void > AdditionalInfoObjectPtr
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
std::vector< TensorInfo > m_InputTensorInfos
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
void SetGuid(LayerGuid guid)
AdditionalInfoObjectPtr m_AdditionalInfoObject
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
An output connection slot for a layer.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
std::vector< TensorInfo > m_OutputTensorInfos
InputSlot & GetInputSlot(unsigned int index) override
Get the input slot handle by slot index.
const BackendId & GetBackendId() const
std::vector< OutputHandler > m_OutputHandlers
const std::vector< OutputSlot > & GetOutputSlots() const
BindableLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name, LayerBindingId id)
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
const std::list< std::string > & GetRelatedLayerNames()
std::vector< OutputSlot >::iterator BeginOutputSlots()
std::vector< ITensorHandle * > m_Outputs
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
const OutputHandler & GetOutputHandler(unsigned int i=0) const
std::vector< OutputSlot >::iterator EndOutputSlots()
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
OutputHandler & GetOutputHandler()
OutputSlot(Layer &owner, OutputHandler &outputHandler)
const OutputHandler & GetOutputHandler() const
Contains information about inputs and outputs to a layer.
const char * GetName() const override
Returns the name of the layer.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::vector< ITensorHandle * > m_Inputs
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
ShapeInferenceMethod m_ShapeInferenceMethod
OutputHandler & GetOutputHandler(unsigned int i=0)
std::shared_ptr< T > GetAdditionalInformation() const
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
LayerGuid GetGuid() const final
Returns the unique id of the layer.