13 #include <fmt/format.h> 21 NullDescriptor Layer::m_NullDescriptor;
29 if (prevSlot !=
nullptr)
32 prevSlot->Disconnect(*
this);
40 const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
51 ValidateConnectionIndex(index);
52 return m_Connections[index];
57 ValidateConnectionIndex(index);
58 return m_Connections[index];
63 GetOutputHandler().SetTensorInfo(tensorInfo);
68 return GetOutputHandler().GetTensorInfo();
77 return GetOutputHandler().IsTensorInfoSet();
82 ARMNN_ASSERT_MSG(IsTensorInfoSet(),
"TensorInfo must be set in order to validate the shape.");
83 return shape == m_OutputHandler.GetTensorInfo().GetShape();
89 m_Connections.push_back(&destination);
97 auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
99 if (it == m_Connections.end())
104 auto idx = std::distance(m_Connections.begin(), it);
105 m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
107 m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
112 while (GetNumConnections() > 0)
115 Disconnect(connection);
121 while (GetNumConnections() > 0)
124 "Cannot move connections once memory strategies have be established.");
127 Disconnect(connection);
128 destination.
Connect(connection);
154 for (
unsigned int i = 0; i < GetNumConnections(); i++)
161 void OutputSlot::ValidateConnectionIndex(
unsigned int index)
const 163 if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
176 m_TensorHandleFactoryId = id;
181 return m_TensorHandleFactoryId;
186 m_EdgeStrategies[connectionIndex] = strategy;
191 return m_EdgeStrategies[connectionIdx];
195 unsigned int numOutputSlots,
199 : m_OutputHandlers(numOutputSlots)
201 , m_LayerName(name ? name :
"")
208 m_InputSlots.reserve(numInputSlots);
209 for (
unsigned int i = 0; i < numInputSlots; ++i)
211 m_InputSlots.emplace_back(*
this, i);
214 m_OutputSlots.reserve(numOutputSlots);
215 for (
unsigned int i = 0; i < numOutputSlots; ++i)
222 unsigned int numOutputSlots,
235 const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
236 dataCollector.
Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
244 outputHandler.CollectWorkloadOutputs(dataCollector);
255 const bool IsMemoryManaged)
271 handleFactory = registry.
GetFactory(factoryId);
304 constexpr
LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
305 constexpr
LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
309 m_Priority = inputPrio;
313 m_Priority = outputPrio;
315 else if (m_Priority == 0)
324 const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
341 if (parentPrio >= outputPrio)
346 m_Priority = parentPrio + 1U;
356 for (
unsigned int i=0; i<expectedConnections; ++i)
361 fmt::format(
"Input connection #{0} must be connected " 362 "for {1} layer {2} {3}",
384 fmt::format(
"Default implementation for InferOutputShapes can only be used for " 385 "layers with the same number of input and output slots. This doesn't " 386 "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
399 const std::string& layerName,
400 const unsigned int outputSlotIndex)
404 ConditionalThrowIfNotEqual<LayerValidationException>(
405 layerName +
": TensorShape set on OutputSlot[0] does not match the inferred shape.",
417 std::stringstream ss;
418 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
419 "] does not match the inferred shape at dimension index [";
420 ss << i <<
"] " << outputShape <<
" != " << inferredShape;
430 info.GetQuantizationScale(),
431 info.GetQuantizationOffset());
440 ConditionalThrow<LayerValidationException>(
442 "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
444 ConditionalThrow<LayerValidationException>(
446 "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
452 std::string guid = std::to_string(m_Guid);
454 std::string backendId = std::string(m_BackendId);
455 if (!(guid.compare(
"") == 0) && !guid.empty())
459 if(!(m_LayerName.compare(
"") == 0) && !m_LayerName.empty())
461 fn(
"LayerName",m_LayerName);
463 if(!(layerType.compare(
"") == 0) && !layerType.empty())
465 fn(
"LayerType",layerType);
467 if(!(backendId.compare(
"") == 0) && !backendId.empty())
469 fn(
"BackendID",backendId);
471 std::shared_ptr<ActivationDescriptor>
472 activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
474 if (activationDescPtr)
488 return m_OwningLayer;
493 return m_OwningLayer;
virtual void ReleaseConstantData()
bool ValidateTensorShape(const TensorShape &shape) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
std::string AsString() const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
LayerGuid GetOwningLayerGuid() const override
void OperateOnConstantTensors(Op op)
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Layer & GetOwningLayer() const
int Connect(InputSlot &destination)
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
unsigned int LayerPriority
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void Disconnect(InputSlot &slot)
const IConnectableLayer & GetOwningIConnectableLayer() const override
Base class for all descriptors.
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
void ResetPriority() const
#define ARMNN_ASSERT_MSG(COND, MSG)
AdditionalInfoObjectPtr m_AdditionalInfoObject
DataType GetDataType() const
void Push(ITensorHandle *handle, const TensorInfo &info)
Validate all output shapes.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
LayerPriority GetPriority() const
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
#define ARMNN_ASSERT(COND)
virtual void ValidateTensorShapesFromInputs()=0
std::vector< OutputHandler > m_OutputHandlers
void * m_AdditionalInfoObject
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
bool operator==(const OutputSlot &other) const
const OutputHandler & GetOutputHandler(unsigned int i=0) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
profiling::ProfilingGuid LayerGuid
Define LayerGuid type.
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
DataType GetDataType() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Infer missing output shapes and validate all output shapes.
virtual const TensorInfo & GetTensorInfo() const =0
const OutputHandler & GetOutputHandler() const
const char * GetName() const override
Returns the name of the layer.
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
bool IsTensorInfoSet() const override
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
const TensorInfo & GetTensorInfo() const override
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
const char * GetLayerTypeAsCString(LayerType type)
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
const InputSlot * GetConnection(unsigned int index) const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
LayerGuid GetGuid() const final
Returns the unique id of the layer.
unsigned int CalculateIndexOnOwner() const override