16 #include <client/include/IProfilingService.hpp> 18 #include <fmt/format.h> 26 NullDescriptor Layer::m_NullDescriptor;
54 if (prevSlot !=
nullptr)
57 prevSlot->Disconnect(*
this);
66 const TensorInfo& tensorInfo = prevSlot->GetTensorInfo();
77 ValidateConnectionIndex(index);
78 return m_Connections[index];
83 ValidateConnectionIndex(index);
84 return m_Connections[index];
89 GetOutputHandler().SetTensorInfo(tensorInfo);
94 return GetOutputHandler().GetTensorInfo();
103 return GetOutputHandler().IsTensorInfoSet();
108 ARMNN_ASSERT_MSG(IsTensorInfoSet(),
"TensorInfo must be set in order to validate the shape.");
109 return shape == m_OutputHandler.GetTensorInfo().GetShape();
115 m_Connections.push_back(&destination);
123 auto it = std::find(m_Connections.begin(), m_Connections.end(), &slot);
125 if (it == m_Connections.end())
130 auto idx = std::distance(m_Connections.begin(), it);
131 m_Connections.erase(std::remove(m_Connections.begin(), m_Connections.end(), &slot), m_Connections.end());
133 m_EdgeStrategies.erase(m_EdgeStrategies.begin() + idx);
138 while (GetNumConnections() > 0)
141 Disconnect(connection);
147 while (GetNumConnections() > 0)
150 "Cannot move connections once memory strategies have be established.");
153 Disconnect(connection);
154 destination.
Connect(connection);
180 for (
unsigned int i = 0; i < GetNumConnections(); i++)
187 void OutputSlot::ValidateConnectionIndex(
unsigned int index)
const 189 if (armnn::numeric_cast<std::size_t>(index) >= m_Connections.size())
202 m_TensorHandleFactoryId = id;
207 return m_TensorHandleFactoryId;
212 m_EdgeStrategies[connectionIndex] = strategy;
217 return m_EdgeStrategies[connectionIdx];
221 unsigned int numOutputSlots,
225 : m_OutputHandlers(numOutputSlots)
227 , m_LayerName(name ? name :
"")
231 , m_Guid(
arm::pipe::IProfilingService::GetNextGuid())
234 m_InputSlots.reserve(numInputSlots);
235 for (
unsigned int i = 0; i < numInputSlots; ++i)
237 m_InputSlots.emplace_back(*
this, i);
240 m_OutputSlots.reserve(numOutputSlots);
241 for (
unsigned int i = 0; i < numOutputSlots; ++i)
248 unsigned int numOutputSlots,
261 const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
262 dataCollector.
Push(outputHandler.GetData(), outputHandler.GetTensorInfo());
270 outputHandler.CollectWorkloadOutputs(dataCollector);
281 const bool IsMemoryManaged)
297 handleFactory = registry.
GetFactory(factoryId);
330 constexpr
LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
331 constexpr
LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
335 m_Priority = inputPrio;
339 m_Priority = outputPrio;
341 else if (m_Priority == 0)
350 const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
367 if (parentPrio >= outputPrio)
372 m_Priority = parentPrio + 1U;
382 for (
unsigned int i=0; i<expectedConnections; ++i)
387 fmt::format(
"Input connection #{0} must be connected " 388 "for {1} layer {2} {3}",
410 fmt::format(
"Default implementation for InferOutputShapes can only be used for " 411 "layers with the same number of input and output slots. This doesn't " 412 "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
425 const std::string& layerName,
426 const unsigned int outputSlotIndex)
430 if (m_AllowExpandedDims)
435 if (outputDims.size() != inferredDims.size())
437 std::stringstream ss;
438 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
439 "] does not match the inferred shape. ";
440 ss << outputShape <<
" != " << inferredShape;
443 for (
unsigned int i = 0; i < outputDims.size(); ++i)
445 if (outputDims[i] != inferredDims[i])
447 std::stringstream ss;
448 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
449 "] does not match the inferred shape at dimension index [";
450 ss << i <<
"] " << outputShape <<
" != " << inferredShape;
458 ConditionalThrowIfNotEqual<LayerValidationException>(
459 layerName +
": TensorShape set on OutputSlot[0] does not match the inferred shape.",
472 std::stringstream ss;
473 ss << layerName <<
": TensorShape set on OutputSlot[" << outputSlotIndex <<
474 "] does not match the inferred shape at dimension index [";
475 ss << i <<
"] " << outputShape <<
" != " << inferredShape;
485 info.GetQuantizationScale(),
486 info.GetQuantizationOffset());
495 ConditionalThrow<LayerValidationException>(
497 "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
499 ConditionalThrow<LayerValidationException>(
501 "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
507 std::string guid = std::to_string(m_Guid);
509 std::string backendId = std::string(m_BackendId);
510 if (!(guid.compare(
"") == 0) && !guid.empty())
514 if(!(m_LayerName.compare(
"") == 0) && !m_LayerName.empty())
516 fn(
"LayerName",m_LayerName);
518 if(!(layerType.compare(
"") == 0) && !layerType.empty())
520 fn(
"LayerType",layerType);
522 if(!(backendId.compare(
"") == 0) && !backendId.empty())
524 fn(
"BackendID",backendId);
526 std::shared_ptr<ActivationDescriptor>
527 activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
529 if (activationDescPtr)
543 return m_OwningLayer;
548 return m_OwningLayer;
void AssertNumberOfInputSlots(Layer &layer)
virtual void ReleaseConstantData()
bool ValidateTensorShape(const TensorShape &shape) const
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy)
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
std::string AsString() const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
std::vector< unsigned int > SqueezeDims(const armnn::TensorShape &tensorShape)
LayerGuid GetOwningLayerGuid() const override
void OperateOnConstantTensors(Op op)
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Layer & GetOwningLayer() const
int Connect(InputSlot &destination)
virtual void ExecuteStrategy(const armnn::IConnectableLayer *layer, const armnn::BaseDescriptor &descriptor, const std::vector< armnn::ConstTensor > &constants, const char *name, const armnn::LayerBindingId id=0)=0
unsigned int LayerPriority
EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
const std::vector< InputSlot > & GetInputSlots() const
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
bool GetDimensionSpecificity(unsigned int i) const
Gets information about if the dimension size has been specified or not.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void Disconnect(InputSlot &slot)
const IConnectableLayer & GetOwningIConnectableLayer() const override
Base class for all descriptors.
void CreateTensorHandles(const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Creates tensor handles used by the intermediate tensors.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
unsigned int GetNumConnections() const override
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
void ExecuteStrategy(IStrategy &strategy) const override
Apply a visitor to this layer.
void ResetPriority() const
#define ARMNN_ASSERT_MSG(COND, MSG)
AdditionalInfoObjectPtr m_AdditionalInfoObject
DataType GetDataType() const
void Push(ITensorHandle *handle, const TensorInfo &info)
Validate all output shapes.
const std::string & GetNameStr() const
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
LayerPriority GetPriority() const
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
#define ARMNN_ASSERT(COND)
virtual void ValidateTensorShapesFromInputs()=0
std::vector< OutputHandler > m_OutputHandlers
void * m_AdditionalInfoObject
void SetTensorInfo(const TensorInfo &tensorInfo)
Sets the TensorInfo used by this output handler.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId &id)
arm::pipe::ProfilingGuid LayerGuid
Define LayerGuid type.
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
bool operator==(const OutputSlot &other) const
const OutputHandler & GetOutputHandler(unsigned int i=0) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
ITensorHandleFactory * GetFactory(ITensorHandleFactory::FactoryId id) const
Find a TensorHandleFactory by Id Returns nullptr if not found.
void SetTensorInfo(const TensorInfo &tensorInfo) override
virtual void SerializeLayerParameters(ParameterStringifyFunction &fn) const
Helper to serialize the layer parameters to string.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
DataType GetDataType() const
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Infer missing output shapes and validate all output shapes.
virtual const TensorInfo & GetTensorInfo() const =0
const OutputHandler & GetOutputHandler() const
const char * GetName() const override
Returns the name of the layer.
ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const
bool IsTensorInfoSet() const override
std::function< void(const std::string &name, const std::string &value)> ParameterStringifyFunction
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
const TensorInfo & GetTensorInfo() const override
ShapeInferenceMethod
The ShapeInferenceMethod modify how the output shapes are treated.
const char * GetLayerTypeAsCString(LayerType type)
static const FactoryId LegacyFactoryId
void MoveAllConnections(OutputSlot &destination)
Moves all connections to another OutputSlot.
const InputSlot * GetConnection(unsigned int index) const override
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
LayerGuid GetGuid() const final
Returns the unique id of the layer.
unsigned int CalculateIndexOnOwner() const override