13 #include <fmt/format.h> 26 if (prevSlot !=
nullptr)
29 prevSlot->Disconnect(*
this);
37 const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
48 ValidateConnectionIndex(index);
49 return m_Connections[index];
54 ValidateConnectionIndex(index);
55 return m_Connections[index];
60 GetOutputHandler().SetTensorInfo(tensorInfo);
65 return GetOutputHandler().GetTensorInfo();
74 return GetOutputHandler().IsTensorInfoSet();
79 ARMNN_ASSERT_MSG(IsTensorInfoSet(),
"TensorInfo must be set in order to validate the shape.");
80 return shape == m_OutputHandler.GetTensorInfo().GetShape();
86 m_Connections.push_back(&destination);
94 auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
96 if (it == m_Connections.end())
101 auto idx = std::distance(m_Connections.begin(), it);
102 m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
104 m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
109 while (GetNumConnections() > 0)
112 Disconnect(connection);
118 while (GetNumConnections() > 0)
121 "Cannot move connections once memory strategies have be established.");
124 Disconnect(connection);
125 destination.
Connect(connection);
151 for (
unsigned int i = 0; i < GetNumConnections(); i++)
158 void OutputSlot::ValidateConnectionIndex(
unsigned int index)
const 160 if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
173 m_TensorHandleFactoryId = id;
178 return m_TensorHandleFactoryId;
183 m_EdgeStrategies[connectionIndex] = strategy;
188 return m_EdgeStrategies[connectionIdx];
192 unsigned int numOutputSlots,
196 : m_OutputHandlers(numOutputSlots)
198 , m_LayerName(name ? name :
"")
205 m_InputSlots.reserve(numInputSlots);
206 for (
unsigned int i = 0; i < numInputSlots; ++i)
208 m_InputSlots.emplace_back(*
this, i);
211 m_OutputSlots.reserve(numOutputSlots);
212 for (
unsigned int i = 0; i < numOutputSlots; ++i)
219 unsigned int numOutputSlots,
232 const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
233 dataCollector.
Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
241 outputHandler.CollectWorkloadOutputs(dataCollector);
252 const bool IsMemoryManaged)
279 handle.reset(
nullptr);
300 constexpr
LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
301 constexpr
LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
305 m_Priority = inputPrio;
309 m_Priority = outputPrio;
311 else if (m_Priority == 0)
320 const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
337 if (parentPrio >= outputPrio)
342 m_Priority = parentPrio + 1U;
352 for (
unsigned int i=0; i<expectedConnections; ++i)
357 fmt::format(
"Input connection #{0} must be connected " 358 "for {1} layer {2} {3}",
380 fmt::format(
"Default implementation for InferOutputShapes can only be used for " 381 "layers with the same number of input and output slots. This doesn't " 382 "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
395 const std::string& layerName,
396 const unsigned int outputSlotIndex)
400 ConditionalThrowIfNotEqual<LayerValidationException>(
401 layerName +
": TensorShape set on OutputSlot[0] does not match the inferred shape.",
413 std::stringstream ss;
414 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
415 "] does not match the inferred shape at dimension index [";
416 ss << i <<
"] " << outputShape <<
" != " << inferredShape;
426 info.GetQuantizationScale(),
427 info.GetQuantizationOffset());
436 ConditionalThrow<LayerValidationException>(
438 "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
440 ConditionalThrow<LayerValidationException>(
442 "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
448 std::string guid = std::to_string(m_Guid);
450 std::string backendId = std::string(m_BackendId);
451 if (!(guid.compare(
"") == 0) && !guid.empty())
455 if(!(m_LayerName.compare(
"") == 0) && !m_LayerName.empty())
457 fn(
"LayerName",m_LayerName);
459 if(!(layerType.compare(
"") == 0) && !layerType.empty())
461 fn(
"LayerType",layerType);
463 if(!(backendId.compare(
"") == 0) && !backendId.empty())
465 fn(
"BackendID",backendId);
467 std::shared_ptr<ActivationDescriptor>
468 activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
470 if (activationDescPtr)
virtual void ReleaseConstantData()
bool ValidateTensorShape(const TensorShape &shape) const
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
std::string AsString() const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
LayerGuid GetOwningLayerGuid() const override
void OperateOnConstantTensors(Op op)
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Layer & GetOwningLayer() const
int Connect(InputSlot &destination)
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
unsigned int LayerPriority
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void Disconnect(InputSlot &slot)
Base class for all descriptors.
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
void ResetPriority() const
#define ARMNN_ASSERT_MSG(COND, MSG)
AdditionalInfoObjectPtr m_AdditionalInfoObject
DataType GetDataType() const
void Push(ITensorHandle *handle, const TensorInfo &info)
Validate all output shapes.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
LayerPriority GetPriority() const
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
#define ARMNN_ASSERT(COND)
virtual void ValidateTensorShapesFromInputs()=0
std::vector< OutputHandler > m_OutputHandlers
void * m_AdditionalInfoObject
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
bool operator==(const OutputSlot &other) const
const OutputHandler & GetOutputHandler(unsigned int i=0) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
DataType GetDataType() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Infer missing output shapes and validate all output shapes.
virtual const TensorInfo & GetTensorInfo() const =0
const OutputHandler & GetOutputHandler() const
const char * GetName() const override
Returns the name of the layer.
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
bool IsTensorInfoSet() const override
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
const TensorInfo & GetTensorInfo() const override
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
const char * GetLayerTypeAsCString(LayerType type)
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
const InputSlot * GetConnection(unsigned int index) const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
LayerGuid GetGuid() const final
Returns the unique id of the layer.
unsigned int CalculateIndexOnOwner() const override