38 class IWorkloadFactory;
46 : m_OwningLayer(owner)
47 , m_Connection(nullptr)
48 , m_SlotIndex(slotIndex)
65 if (m_Connection !=
nullptr && source !=
nullptr)
68 "but the latter already has a connection");
70 m_Connection = source;
84 const unsigned int m_SlotIndex;
91 : m_OwningLayer(owner)
92 , m_OutputHandler(outputHandler)
109 catch (
const std::exception& e)
114 std::cerr <<
"WARNING: An error has occurred when disconnecting all output slots: " 115 << e.what() << std::endl;
124 LayerGuid GetOwningLayerGuid()
const override;
135 bool ValidateTensorShape(
const TensorShape& shape)
const;
138 void DisconnectAll();
141 void MoveAllConnections(
OutputSlot& destination);
149 void SetTensorInfo(
const TensorInfo& tensorInfo)
override;
151 bool IsTensorInfoSet()
const override;
155 return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
160 return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot));
163 unsigned int CalculateIndexOnOwner()
const override;
170 void SetEdgeStrategy(
unsigned int connectionIndex,
EdgeStrategy strategy);
171 EdgeStrategy GetEdgeStrategyForConnection(
unsigned int connectionIdx)
const;
174 void ValidateConnectionIndex(
unsigned int index)
const;
176 Layer& m_OwningLayer;
178 std::vector<InputSlot*> m_Connections;
181 std::vector<EdgeStrategy> m_EdgeStrategies;
188 if (m_Connection !=
nullptr)
193 m_Connection->Disconnect(*
this);
195 catch (
const std::exception& e)
200 std::cerr <<
"WARNING: An error has occurred when disconnecting an input slot: " 201 << e.what() << std::endl;
221 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
const char* name);
222 Layer(
unsigned int numInputSlots,
unsigned int numOutputSlots,
LayerType type,
DataLayout layout,
const char* name);
224 void ExecuteStrategy(
IStrategy& strategy)
const override;
234 return m_OutputHandlers[i];
239 return const_cast<OutputHandler&
>(
const_cast<const Layer*
>(
this)->GetOutputHandler(i));
245 const std::vector<InputSlot>&
GetInputSlots()
const {
return m_InputSlots; }
250 std::vector<InputSlot>::iterator
EndInputSlots() {
return m_InputSlots.end(); }
254 std::vector<OutputSlot>::iterator
EndOutputSlots() {
return m_OutputSlots.end(); }
259 unsigned int numConnections = 0;
261 for (
auto&& output : GetOutputSlots())
263 numConnections += output.GetNumConnections();
266 return (GetNumOutputSlots() > 0) && (numConnections == 0);
270 void ResetPriority()
const;
286 const bool IsMemoryManaged =
true);
292 void VerifyLayerConnections(
unsigned int expectedConnections,
const CheckLocation& location)
const;
294 virtual void ValidateTensorShapesFromInputs() = 0;
296 std::vector<TensorShape> InferOutputShapes(
const std::vector<TensorShape>& inputShapes)
const override;
303 virtual void ReleaseConstantData();
305 template<
typename Op>
308 for (
auto constant : GetConstantTensorsByRef())
319 const char*
GetName()
const override {
return m_LayerName.c_str(); }
321 unsigned int GetNumInputSlots()
const override {
return static_cast<unsigned int>(m_InputSlots.size()); }
322 unsigned int GetNumOutputSlots()
const override {
return static_cast<unsigned int>(m_OutputSlots.size()); }
336 virtual void Reparent(
Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
340 m_BackendHint = backend;
346 m_ShapeInferenceMethod = shapeInferenceMethod;
351 m_AllowExpandedDims = allowExpandedDims;
357 return std::static_pointer_cast<T>(m_AdditionalInfoObject);
362 m_AdditionalInfoObject = additionalInfo;
370 virtual ~
Layer() =
default;
372 template <
typename QueueDescriptor>
376 CollectWorkloadInputs(dataCollector);
379 template <
typename QueueDescriptor>
383 CollectWorkloadOutputs(dataCollector);
386 void ValidateAndCopyShape(
const TensorShape& outputShape,
389 const std::string& layerName,
390 const unsigned int outputSlotIndex = 0);
395 template <
typename QueueDescriptor>
399 CollectQueueDescriptorInputs(descriptor, info);
400 CollectQueueDescriptorOutputs(descriptor, info);
404 template <
typename LayerType,
typename ... Params>
426 const std::string m_LayerName;
428 std::vector<InputSlot> m_InputSlots;
429 std::vector<OutputSlot> m_OutputSlots;
437 mutable bool m_Visiting =
false;
439 bool m_AllowExpandedDims =
false;
443 std::list<std::string> m_RelatedLayerNames;
456 unsigned int numOutputSlots,
460 :
Layer(numInputSlots, numOutputSlots, type, name)
std::vector< InputSlot >::iterator EndInputSlots()
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
const std::vector< InputSlot * > & GetConnections() const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
std::unique_ptr< armnn::IWorkload > CreateWorkload(const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const DescriptorType &descriptor)
Optional< BackendId > GetBackendHint() const
void AddRelatedLayerName(const std::string layerName)
void BackendSelectionHint(Optional< BackendId > backend) final
Provide a hint for the optimizer as to which backend to prefer for this layer.
void SetAllowExpandedDims(bool allowExpandedDims)
void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
const std::vector< EdgeStrategy > & GetEdgeStrategies() const
LayerBindingId GetBindingId() const
void OperateOnConstantTensors(Op op)
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
OutputSlot & GetOutputSlot(unsigned int index=0) override
Get the output slot handle by slot index.
Layer & GetOwningLayer() const
ShapeInferenceMethod GetShapeInferenceMethod() const
unsigned int LayerPriority
void Disconnect(IInputSlot &slot) override
int Connect(IInputSlot &destination) override
Copyright (c) 2021 ARM Limited and Contributors.
void SetBackendId(const BackendId &id)
const std::vector< InputSlot > & GetInputSlots() const
bool IsOutputUnconnected()
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Base class for all descriptors.
std::vector< InputSlot >::iterator BeginInputSlots()
std::shared_ptr< void > AdditionalInfoObjectPtr
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
std::vector< TensorInfo > m_InputTensorInfos
bool operator==(const armnn::DataLayout &dataLayout, const DataLayoutIndexed &indexed)
Equality methods.
Null Descriptor used as a return value from the IConnectableLayer GetParameters method by layers whic...
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
void SetGuid(LayerGuid guid)
AdditionalInfoObjectPtr m_AdditionalInfoObject
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
An output connection slot for a layer.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
std::vector< TensorInfo > m_OutputTensorInfos
virtual void ExecuteStrategy(const IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
InputSlot & GetInputSlot(unsigned int index) override
Get the input slot handle by slot index.
const BackendId & GetBackendId() const
std::vector< OutputHandler > m_OutputHandlers
const std::vector< OutputSlot > & GetOutputSlots() const
BindableLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name, LayerBindingId id)
void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr &additionalInfo)
const std::list< std::string > & GetRelatedLayerNames()
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
std::vector< OutputSlot >::iterator BeginOutputSlots()
std::vector< ITensorHandle * > m_Outputs
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
const OutputHandler & GetOutputHandler(unsigned int i=0) const
virtual const BaseDescriptor & GetParameters() const override
If the layer has a descriptor return it.
bool GetAllowExpandedDims() const
std::vector< OutputSlot >::iterator EndOutputSlots()
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
OutputHandler & GetOutputHandler()
OutputSlot(Layer &owner, OutputHandler &outputHandler)
const OutputHandler & GetOutputHandler() const
Contains information about TensorInfos of a layer.
const char * GetName() const override
Returns the name of the layer.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::vector< ITensorHandle * > m_Inputs
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
ShapeInferenceMethod m_ShapeInferenceMethod
OutputHandler & GetOutputHandler(unsigned int i=0)
std::shared_ptr< T > GetAdditionalInformation() const
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
LayerGuid GetGuid() const final
Returns the unique id of the layer.