ArmNN
 21.08
Layer Class Referenceabstract

#include <Layer.hpp>

Inheritance diagram for Layer:
IConnectableLayer AbsLayer BindableLayer CastLayer ConstantLayer ConvertBf16ToFp32Layer ConvertFp16ToFp32Layer ConvertFp32ToBf16Layer ConvertFp32ToFp16Layer DebugLayer DequantizeLayer ElementwiseBaseLayer FloorLayer LayerWithParameters< Parameters > MapLayer MemCopyLayer MemImportLayer MergeLayer PreluLayer QuantizedLstmLayer QuantizeLayer RankLayer RsqrtLayer ShapeLayer SwitchLayer UnmapLayer LayerWithParameters< ActivationDescriptor > LayerWithParameters< ArgMinMaxDescriptor > LayerWithParameters< BatchNormalizationDescriptor > LayerWithParameters< BatchToSpaceNdDescriptor > LayerWithParameters< ComparisonDescriptor > LayerWithParameters< Convolution2dDescriptor > LayerWithParameters< DepthToSpaceDescriptor > LayerWithParameters< DepthwiseConvolution2dDescriptor > LayerWithParameters< DetectionPostProcessDescriptor > LayerWithParameters< ElementwiseUnaryDescriptor > LayerWithParameters< FakeQuantizationDescriptor > LayerWithParameters< FillDescriptor > LayerWithParameters< FullyConnectedDescriptor > LayerWithParameters< GatherDescriptor > LayerWithParameters< InstanceNormalizationDescriptor > LayerWithParameters< L2NormalizationDescriptor > LayerWithParameters< LogicalBinaryDescriptor > LayerWithParameters< LogSoftmaxDescriptor > LayerWithParameters< LstmDescriptor > LayerWithParameters< MeanDescriptor > LayerWithParameters< NormalizationDescriptor > LayerWithParameters< OriginsDescriptor > LayerWithParameters< PadDescriptor > LayerWithParameters< PermuteDescriptor > LayerWithParameters< Pooling2dDescriptor > LayerWithParameters< PreCompiledDescriptor > LayerWithParameters< QLstmDescriptor > LayerWithParameters< ReduceDescriptor > LayerWithParameters< ReshapeDescriptor > LayerWithParameters< ResizeDescriptor > LayerWithParameters< SliceDescriptor > LayerWithParameters< SoftmaxDescriptor > LayerWithParameters< SpaceToBatchNdDescriptor > LayerWithParameters< SpaceToDepthDescriptor > LayerWithParameters< StackDescriptor > LayerWithParameters< StandInDescriptor > LayerWithParameters< StridedSliceDescriptor > LayerWithParameters< TransposeConvolution2dDescriptor > LayerWithParameters< TransposeDescriptor > LayerWithParameters< ViewsDescriptor >

Public Member Functions

 Layer (unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
 
 Layer (unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char *name)
 
void ExecuteStrategy (IStrategy &strategy) const override
 Apply a visitor to this layer. More...
 
const std::string & GetNameStr () const
 
const OutputHandlerGetOutputHandler (unsigned int i=0) const
 
OutputHandlerGetOutputHandler (unsigned int i=0)
 
ShapeInferenceMethod GetShapeInferenceMethod () const
 
const std::vector< InputSlot > & GetInputSlots () const
 
const std::vector< OutputSlot > & GetOutputSlots () const
 
std::vector< InputSlot >::iterator BeginInputSlots ()
 
std::vector< InputSlot >::iterator EndInputSlots ()
 
std::vector< OutputSlot >::iterator BeginOutputSlots ()
 
std::vector< OutputSlot >::iterator EndOutputSlots ()
 
bool IsOutputUnconnected ()
 
void ResetPriority () const
 
LayerPriority GetPriority () const
 
LayerType GetType () const override
 Returns the armnn::LayerType of this layer. More...
 
DataType GetDataType () const
 
const BackendIdGetBackendId () const
 
void SetBackendId (const BackendId &id)
 
virtual std::unique_ptr< IWorkloadCreateWorkload (const IWorkloadFactory &factory) const =0
 
virtual void CreateTensorHandles (const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
 
virtual LayerClone (Graph &graph) const =0
 Creates a dynamically-allocated copy of this layer. More...
 
void VerifyLayerConnections (unsigned int expectedConnections, const CheckLocation &location) const
 
virtual void ValidateTensorShapesFromInputs ()=0
 
std::vector< TensorShapeInferOutputShapes (const std::vector< TensorShape > &inputShapes) const override
 Infer the shape of the output(s) based on the provided input shape(s) More...
 
virtual void SerializeLayerParameters (ParameterStringifyFunction &fn) const
 Helper to serialize the layer parameters to string. More...
 
virtual void ReleaseConstantData ()
 
template<typename Op >
void OperateOnConstantTensors (Op op)
 
const char * GetName () const override
 Returns the name of the layer. More...
 
unsigned int GetNumInputSlots () const override
 Returns the number of connectable input slots. More...
 
unsigned int GetNumOutputSlots () const override
 Returns the number of connectable output slots. More...
 
const InputSlotGetInputSlot (unsigned int index) const override
 Get a const input slot handle by slot index. More...
 
InputSlotGetInputSlot (unsigned int index) override
 Get the input slot handle by slot index. More...
 
const OutputSlotGetOutputSlot (unsigned int index=0) const override
 Get the const output slot handle by slot index. More...
 
OutputSlotGetOutputSlot (unsigned int index=0) override
 Get the output slot handle by slot index. More...
 
void SetGuid (LayerGuid guid)
 
LayerGuid GetGuid () const final
 Returns the unique id of the layer. More...
 
void AddRelatedLayerName (const std::string layerName)
 
const std::list< std::string > & GetRelatedLayerNames ()
 
virtual void Reparent (Graph &dest, std::list< Layer *>::const_iterator iterator)=0
 
void BackendSelectionHint (Optional< BackendId > backend) final
 Provide a hint for the optimizer as to which backend to prefer for this layer. More...
 
Optional< BackendIdGetBackendHint () const
 
void SetShapeInferenceMethod (ShapeInferenceMethod shapeInferenceMethod)
 
template<typename T >
std::shared_ptr< T > GetAdditionalInformation () const
 
void SetAdditionalInfoForObject (const AdditionalInfoObjectPtr &additionalInfo)
 
- Public Member Functions inherited from IConnectableLayer
virtual void Accept (ILayerVisitor &visitor) const =0
 Apply a visitor to this layer. More...
 

Protected Types

using ConstantTensors = std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >>
 

Protected Member Functions

virtual ~Layer ()=default
 
template<typename QueueDescriptor >
void CollectQueueDescriptorInputs (QueueDescriptor &descriptor, WorkloadInfo &info) const
 
template<typename QueueDescriptor >
void CollectQueueDescriptorOutputs (QueueDescriptor &descriptor, WorkloadInfo &info) const
 
void ValidateAndCopyShape (const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
 
void VerifyShapeInferenceType (const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
 
template<typename QueueDescriptor >
WorkloadInfo PrepInfoAndDesc (QueueDescriptor &descriptor) const
 Helper function to reduce duplication in *LayerCreateWorkload. More...
 
template<typename LayerType , typename ... Params>
LayerTypeCloneBase (Graph &graph, Params &&... params) const
 
virtual ConstantTensors GetConstantTensorsByRef ()
 
void SetAdditionalInfo (QueueDescriptor &descriptor) const
 
- Protected Member Functions inherited from IConnectableLayer
 ~IConnectableLayer ()
 Objects are not deletable via the handle. More...
 

Protected Attributes

AdditionalInfoObjectPtr m_AdditionalInfoObject
 
std::vector< OutputHandlerm_OutputHandlers
 
ShapeInferenceMethod m_ShapeInferenceMethod
 

Friends

class Graph
 

Detailed Description

Definition at line 210 of file Layer.hpp.

Member Typedef Documentation

◆ ConstantTensors

using ConstantTensors = std::vector<std::reference_wrapper<std::shared_ptr<ConstTensorHandle> >>
protected

Definition at line 393 of file Layer.hpp.

Constructor & Destructor Documentation

◆ Layer() [1/2]

Layer ( unsigned int  numInputSlots,
unsigned int  numOutputSlots,
LayerType  type,
const char *  name 
)
Parameters
name- Optional name for the layer (may be nullptr).

Definition at line 218 of file Layer.cpp.

References ARMNN_ASSERT, Layer::GetInputSlots(), Layer::m_OutputHandlers, and WorkloadDataCollector::Push().

222 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
223 {
224 }
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:218

◆ Layer() [2/2]

Layer ( unsigned int  numInputSlots,
unsigned int  numOutputSlots,
LayerType  type,
DataLayout  layout,
const char *  name 
)

Definition at line 191 of file Layer.cpp.

References armnn::IgnoreUnused(), and Layer::m_OutputHandlers.

196 : m_OutputHandlers(numOutputSlots)
198 , m_LayerName(name ? name : "")
199 , m_Type(type)
200 , m_BackendId()
201 , m_BackendHint(EmptyOptional())
203 {
204  IgnoreUnused(layout);
205  m_InputSlots.reserve(numInputSlots);
206  for (unsigned int i = 0; i < numInputSlots; ++i)
207  {
208  m_InputSlots.emplace_back(*this, i);
209  }
210 
211  m_OutputSlots.reserve(numOutputSlots);
212  for (unsigned int i = 0; i < numOutputSlots; ++i)
213  {
214  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
215  }
216 }
void IgnoreUnused(Ts &&...)
Validate all output shapes.
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:407
static ProfilingDynamicGuid GetNextGuid()
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:408

◆ ~Layer()

virtual ~Layer ( )
protectedvirtualdefault

Member Function Documentation

◆ AddRelatedLayerName()

void AddRelatedLayerName ( const std::string  layerName)
inline

Definition at line 324 of file Layer.hpp.

324 { m_RelatedLayerNames.emplace_back(layerName); }

◆ BackendSelectionHint()

void BackendSelectionHint ( Optional< BackendId backend)
inlinefinalvirtual

Provide a hint for the optimizer as to which backend to prefer for this layer.

Implements IConnectableLayer.

Definition at line 330 of file Layer.hpp.

Referenced by TEST_SUITE().

331  {
332  m_BackendHint = backend;
333  }

◆ BeginInputSlots()

◆ BeginOutputSlots()

◆ Clone()

◆ CloneBase()

LayerType * CloneBase ( Graph graph,
Params &&...  params 
) const
protected

Definition at line 14 of file LayerCloneBase.hpp.

References Graph::AddLayer(), Layer::GetBackendHint(), Layer::GetBackendId(), Layer::GetGuid(), and Layer::m_ShapeInferenceMethod.

15 {
16  LayerType* const layer = graph.AddLayer<LayerType>(std::forward<Params>(params)...);
17 
18  layer->BackendSelectionHint(GetBackendHint());
19  layer->SetBackendId(GetBackendId());
20  layer->SetGuid(GetGuid());
21  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
22 
23  return layer;
24 }
Optional< BackendId > GetBackendHint() const
Definition: Layer.hpp:334
const BackendId & GetBackendId() const
Definition: Layer.hpp:269
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:408
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
Definition: Types.hpp:405
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:322

◆ CollectQueueDescriptorInputs()

void CollectQueueDescriptorInputs ( QueueDescriptor descriptor,
WorkloadInfo info 
) const
inlineprotected

Definition at line 358 of file Layer.hpp.

References QueueDescriptor::m_Inputs, and WorkloadInfo::m_InputTensorInfos.

359  {
360  WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
361  CollectWorkloadInputs(dataCollector);
362  }

◆ CollectQueueDescriptorOutputs()

void CollectQueueDescriptorOutputs ( QueueDescriptor descriptor,
WorkloadInfo info 
) const
inlineprotected

Definition at line 365 of file Layer.hpp.

References QueueDescriptor::m_Outputs, and WorkloadInfo::m_OutputTensorInfos.

366  {
367  WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
368  CollectWorkloadOutputs(dataCollector);
369  }

◆ CreateTensorHandles()

void CreateTensorHandles ( const TensorHandleFactoryRegistry registry,
const IWorkloadFactory factory,
const bool  IsMemoryManaged = true 
)
virtual

Reimplemented in ConcatLayer, OutputLayer, and SplitterLayer.

Definition at line 250 of file Layer.cpp.

References ARMNN_ASSERT, OutputHandler::CreateTensorHandles(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetNumOutputSlots(), Layer::GetOutputHandler(), Layer::GetOutputSlot(), OutputSlot::GetTensorHandleFactoryId(), and ITensorHandleFactory::LegacyFactoryId.

253 {
254  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
255  {
256 
257  OutputSlot& slot = GetOutputSlot(idx);
258  ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
259 
260  OutputHandler& handler = GetOutputHandler(idx);
261  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
262  {
263  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
264  }
265  else
266  {
267  ITensorHandleFactory* handleFactory;
268  handleFactory = registry.GetFactory(factoryId);
269  ARMNN_ASSERT(handleFactory);
270  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
271  }
272  }
273 }
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:314
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
static const FactoryId LegacyFactoryId

◆ CreateWorkload()

◆ EndInputSlots()

◆ EndOutputSlots()

std::vector<OutputSlot>::iterator EndOutputSlots ( )
inline

◆ ExecuteStrategy()

void ExecuteStrategy ( IStrategy strategy) const
overridevirtual

Apply a visitor to this layer.

Implements IConnectableLayer.

Reimplemented in BindableLayer, QLstmLayer, QuantizedLstmLayer, LayerWithParameters< Parameters >, LayerWithParameters< FakeQuantizationDescriptor >, LayerWithParameters< SoftmaxDescriptor >, LayerWithParameters< ReduceDescriptor >, LayerWithParameters< LogSoftmaxDescriptor >, LayerWithParameters< PreCompiledDescriptor >, LayerWithParameters< LstmDescriptor >, LayerWithParameters< BatchToSpaceNdDescriptor >, LayerWithParameters< PermuteDescriptor >, LayerWithParameters< SpaceToBatchNdDescriptor >, LayerWithParameters< DepthToSpaceDescriptor >, LayerWithParameters< ReshapeDescriptor >, LayerWithParameters< ElementwiseUnaryDescriptor >, LayerWithParameters< GatherDescriptor >, LayerWithParameters< SpaceToDepthDescriptor >, LayerWithParameters< OriginsDescriptor >, LayerWithParameters< ViewsDescriptor >, LayerWithParameters< Pooling2dDescriptor >, LayerWithParameters< Convolution2dDescriptor >, LayerWithParameters< ActivationDescriptor >, LayerWithParameters< StandInDescriptor >, LayerWithParameters< TransposeDescriptor >, LayerWithParameters< StackDescriptor >, LayerWithParameters< MeanDescriptor >, LayerWithParameters< ComparisonDescriptor >, LayerWithParameters< InstanceNormalizationDescriptor >, LayerWithParameters< TransposeConvolution2dDescriptor >, LayerWithParameters< BatchNormalizationDescriptor >, LayerWithParameters< ArgMinMaxDescriptor >, LayerWithParameters< LogicalBinaryDescriptor >, LayerWithParameters< DetectionPostProcessDescriptor >, LayerWithParameters< PadDescriptor >, LayerWithParameters< L2NormalizationDescriptor >, LayerWithParameters< FillDescriptor >, LayerWithParameters< SliceDescriptor >, LayerWithParameters< DepthwiseConvolution2dDescriptor >, LayerWithParameters< QLstmDescriptor >, LayerWithParameters< NormalizationDescriptor >, LayerWithParameters< FullyConnectedDescriptor >, LayerWithParameters< StridedSliceDescriptor >, LayerWithParameters< ResizeDescriptor >, LstmLayer, UnidirectionalSequenceLstmLayer, FullyConnectedLayer, Convolution2dLayer, DepthwiseConvolution2dLayer, TransposeConvolution2dLayer, BatchNormalizationLayer, ConstantLayer, DetectionPostProcessLayer, ShapeLayer, PreCompiledLayer, FakeQuantizationLayer, MemCopyLayer, MemImportLayer, ElementwiseBaseLayer, and RankLayer.

Definition at line 478 of file Layer.cpp.

References IStrategy::ExecuteStrategy(), and Layer::GetName().

479 {
480  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
481 }
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:311

◆ GetAdditionalInformation()

std::shared_ptr<T> GetAdditionalInformation ( ) const
inline

Definition at line 342 of file Layer.hpp.

Referenced by NeonBackend::OptimizeSubgraphView(), and ClBackend::OptimizeSubgraphView().

343  {
344  return std::static_pointer_cast<T>(m_AdditionalInfoObject);
345  }
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:394

◆ GetBackendHint()

Optional<BackendId> GetBackendHint ( ) const
inline

Definition at line 334 of file Layer.hpp.

Referenced by Layer::CloneBase().

334 { return m_BackendHint; }

◆ GetBackendId()

◆ GetConstantTensorsByRef()

virtual ConstantTensors GetConstantTensorsByRef ( )
inlineprotectedvirtual

Reimplemented in QLstmLayer, QuantizedLstmLayer, Convolution2dLayer, LstmLayer, UnidirectionalSequenceLstmLayer, DepthwiseConvolution2dLayer, FullyConnectedLayer, TransposeConvolution2dLayer, BatchNormalizationLayer, ConstantLayer, and DetectionPostProcessLayer.

Definition at line 394 of file Layer.hpp.

394 {return ConstantTensors(); };
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: Layer.hpp:393

◆ GetDataType()

DataType GetDataType ( ) const

Definition at line 284 of file Layer.cpp.

References InputSlot::GetConnection(), TensorInfo::GetDataType(), Layer::GetInputSlot(), Layer::GetNumInputSlots(), Layer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), and OutputSlot::GetTensorInfo().

Referenced by ConvertFp32NetworkToFp16Impl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), ConvertFp32NetworkToBf16Impl::Run(), IsFloat32Layer::Test(), IsFloat16Layer::Test(), IsBFloat16Layer::Test(), and TEST_SUITE().

285 {
286  if (GetNumInputSlots() > 0) // Ignore the input layer.
287  {
289  }
291 }
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:313
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
DataType GetDataType() const
Definition: Tensor.hpp:198
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
virtual const TensorInfo & GetTensorInfo() const =0
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63

◆ GetGuid()

◆ GetInputSlot() [1/2]

const InputSlot& GetInputSlot ( unsigned int  index) const
inlineoverridevirtual

Get a const input slot handle by slot index.

Implements IConnectableLayer.

Definition at line 316 of file Layer.hpp.

Referenced by armnn::ChainReduceLayers(), ConcatLayer::CreateWorkload(), DebugLayer::CreateWorkload(), armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), Layer::GetDataType(), Graph::Graph(), InputSlot::Insert(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), NeonBackend::OptimizeSubgraphView(), ClBackend::OptimizeSubgraphView(), TransposeAsReshapeImpl::Run(), PermuteAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), OptimizeInverseConversionsImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), MovePermuteUpImpl::Run(), MoveTransposeUpImpl::Run(), OptimizeInversePermutesImpl< PermuteType >::Run(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Convolution2dLayer::SerializeLayerParameters(), TEST_SUITE(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), OutputLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), and Layer::VerifyLayerConnections().

316 { return m_InputSlots.at(index); }

◆ GetInputSlot() [2/2]

InputSlot& GetInputSlot ( unsigned int  index)
inlineoverridevirtual

Get the input slot handle by slot index.

Implements IConnectableLayer.

Definition at line 317 of file Layer.hpp.

317 { return m_InputSlots.at(index); }

◆ GetInputSlots()

◆ GetName()

const char* GetName ( ) const
inlineoverridevirtual

Returns the name of the layer.

Implements IConnectableLayer.

Definition at line 311 of file Layer.hpp.

Referenced by RankLayer::Accept(), AdditionLayer::Accept(), QuantizeLayer::Accept(), DivisionLayer::Accept(), MaximumLayer::Accept(), SubtractionLayer::Accept(), MinimumLayer::Accept(), MultiplicationLayer::Accept(), ActivationLayer::Accept(), FillLayer::Accept(), ReduceLayer::Accept(), SwitchLayer::Accept(), DequantizeLayer::Accept(), NormalizationLayer::Accept(), AbsLayer::Accept(), FloorLayer::Accept(), InputLayer::Accept(), RsqrtLayer::Accept(), InstanceNormalizationLayer::Accept(), SoftmaxLayer::Accept(), L2NormalizationLayer::Accept(), LogSoftmaxLayer::Accept(), MeanLayer::Accept(), GatherLayer::Accept(), MergeLayer::Accept(), StackLayer::Accept(), StridedSliceLayer::Accept(), ArgMinMaxLayer::Accept(), DetectionPostProcessLayer::Accept(), ElementwiseUnaryLayer::Accept(), Pooling2dLayer::Accept(), ResizeLayer::Accept(), BatchToSpaceNdLayer::Accept(), SliceLayer::Accept(), LogicalBinaryLayer::Accept(), DepthToSpaceLayer::Accept(), ComparisonLayer::Accept(), PadLayer::Accept(), PreluLayer::Accept(), SpaceToBatchNdLayer::Accept(), SpaceToDepthLayer::Accept(), StandInLayer::Accept(), ConstantLayer::Accept(), BatchNormalizationLayer::Accept(), TransposeConvolution2dLayer::Accept(), OutputLayer::Accept(), DepthwiseConvolution2dLayer::Accept(), Convolution2dLayer::Accept(), SplitterLayer::Accept(), FullyConnectedLayer::Accept(), LstmLayer::Accept(), ConcatLayer::Accept(), ReshapeLayer::Accept(), TransposeLayer::Accept(), PermuteLayer::Accept(), QuantizedLstmLayer::Accept(), QLstmLayer::Accept(), Graph::AddCompatibilityLayers(), RankLayer::Clone(), QuantizeLayer::Clone(), ActivationLayer::Clone(), FillLayer::Clone(), ConvertFp32ToFp16Layer::Clone(), StackLayer::Clone(), CastLayer::Clone(), StridedSliceLayer::Clone(), SwitchLayer::Clone(), MapLayer::Clone(), UnmapLayer::Clone(), DequantizeLayer::Clone(), MemCopyLayer::Clone(), MemImportLayer::Clone(), MergeLayer::Clone(), NormalizationLayer::Clone(), Pooling2dLayer::Clone(), AbsLayer::Clone(), FakeQuantizationLayer::Clone(), ConvertBf16ToFp32Layer::Clone(), ConvertFp16ToFp32Layer::Clone(), FloorLayer::Clone(), ConvertFp32ToBf16Layer::Clone(), ReduceLayer::Clone(), SliceLayer::Clone(), ResizeLayer::Clone(), RsqrtLayer::Clone(), InputLayer::Clone(), ShapeLayer::Clone(), BatchToSpaceNdLayer::Clone(), AdditionLayer::Clone(), SoftmaxLayer::Clone(), InstanceNormalizationLayer::Clone(), L2NormalizationLayer::Clone(), DebugLayer::Clone(), LogicalBinaryLayer::Clone(), DepthToSpaceLayer::Clone(), StandInLayer::Clone(), GatherLayer::Clone(), LogSoftmaxLayer::Clone(), MaximumLayer::Clone(), ArgMinMaxLayer::Clone(), MeanLayer::Clone(), MinimumLayer::Clone(), MultiplicationLayer::Clone(), ComparisonLayer::Clone(), DivisionLayer::Clone(), PadLayer::Clone(), SubtractionLayer::Clone(), ElementwiseUnaryLayer::Clone(), PreluLayer::Clone(), TransposeLayer::Clone(), SpaceToBatchNdLayer::Clone(), SpaceToDepthLayer::Clone(), ConstantLayer::Clone(), PermuteLayer::Clone(), ReshapeLayer::Clone(), DetectionPostProcessLayer::Clone(), PreCompiledLayer::Clone(), TransposeConvolution2dLayer::Clone(), DepthwiseConvolution2dLayer::Clone(), Convolution2dLayer::Clone(), SplitterLayer::Clone(), FullyConnectedLayer::Clone(), UnidirectionalSequenceLstmLayer::Clone(), ConcatLayer::Clone(), LstmLayer::Clone(), BatchNormalizationLayer::Clone(), OutputLayer::Clone(), QuantizedLstmLayer::Clone(), QLstmLayer::Clone(), RankLayer::ExecuteStrategy(), ElementwiseBaseLayer::ExecuteStrategy(), ShapeLayer::ExecuteStrategy(), DetectionPostProcessLayer::ExecuteStrategy(), BatchNormalizationLayer::ExecuteStrategy(), ConstantLayer::ExecuteStrategy(), TransposeConvolution2dLayer::ExecuteStrategy(), DepthwiseConvolution2dLayer::ExecuteStrategy(), Convolution2dLayer::ExecuteStrategy(), FullyConnectedLayer::ExecuteStrategy(), LstmLayer::ExecuteStrategy(), UnidirectionalSequenceLstmLayer::ExecuteStrategy(), LayerWithParameters< ResizeDescriptor >::ExecuteStrategy(), QuantizedLstmLayer::ExecuteStrategy(), QLstmLayer::ExecuteStrategy(), Layer::ExecuteStrategy(), armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), ElementwiseBaseLayer::InferOutputShapes(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), NeonBackend::OptimizeSubgraphView(), ClBackend::OptimizeSubgraphView(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), TEST_SUITE(), and ErasedLayerNamesObservable::Update().

311 { return m_LayerName.c_str(); }

◆ GetNameStr()

◆ GetNumInputSlots()

◆ GetNumOutputSlots()

◆ GetOutputHandler() [1/2]

◆ GetOutputHandler() [2/2]

OutputHandler& GetOutputHandler ( unsigned int  i = 0)
inline

Definition at line 230 of file Layer.hpp.

231  {
232  return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
233  }
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:218
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:225

◆ GetOutputSlot() [1/2]

const OutputSlot& GetOutputSlot ( unsigned int  index = 0) const
inlineoverridevirtual

Get the const output slot handle by slot index.

Implements IConnectableLayer.

Examples:
CustomMemoryAllocatorSample.cpp.

Definition at line 318 of file Layer.hpp.

Referenced by Graph::AddCompatibilityLayers(), NetworkImpl::AddFullyConnectedLayer(), armnn::AttemptBackendAssignment(), OutputSlot::CalculateIndexOnOwner(), armnn::ChainReduceLayers(), armnn::CheckScaleSetOnQuantizedType(), SplitterLayer::CreateTensorHandles(), ConcatLayer::CreateTensorHandles(), Layer::CreateTensorHandles(), CreateTestNetwork(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), Layer::GetDataType(), InputSlot::Insert(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), IsConnected(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), OptimizeInverseConversionsImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), OptimizeInversePermutesImpl< PermuteType >::Run(), MovePermuteUpImpl::Run(), MoveTransposeUpImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), AddBroadcastReshapeLayerImpl::Run(), armnn::SelectTensorHandleStrategy(), TEST_SUITE(), Layer::ValidateAndCopyShape(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

318 { return m_OutputSlots.at(index); }

◆ GetOutputSlot() [2/2]

OutputSlot& GetOutputSlot ( unsigned int  index = 0)
inlineoverridevirtual

Get the output slot handle by slot index.

Implements IConnectableLayer.

Definition at line 319 of file Layer.hpp.

319 { return m_OutputSlots.at(index); }

◆ GetOutputSlots()

const std::vector<OutputSlot>& GetOutputSlots ( ) const
inline

Definition at line 238 of file Layer.hpp.

Referenced by Graph::AddCompatibilityLayers(), LoadedNetwork::CreateWorkingMemHandle(), armnn::ForEachLayerOutput(), and Graph::Print().

238 { return m_OutputSlots; }

◆ GetPriority()

LayerPriority GetPriority ( ) const

Definition at line 299 of file Layer.cpp.

References Layer::GetInputSlots(), OutputSlot::GetOwningLayer(), Layer::GetPriority(), Layer::GetType(), armnn::Input, and armnn::Output.

Referenced by Layer::GetPriority(), and SquashEqualSiblingsImpl< Comparable >::Run().

300 {
301  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
302  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
303 
304  if (GetType() == LayerType::Input)
305  {
306  m_Priority = inputPrio;
307  }
308  else if (GetType() == LayerType::Output)
309  {
310  m_Priority = outputPrio;
311  }
312  else if (m_Priority == 0)
313  {
314  if (m_Visiting)
315  {
316  throw GraphValidationException("Graph has circular dependencies: cannot walk");
317  }
318 
319  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
320  {
321  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
322  if (outputSlot)
323  {
324  const Layer& input = outputSlot->GetOwningLayer();
325  return std::max(prio, input.GetPriority());
326  }
327  else
328  {
329  // unconnected input slot
330  return prio;
331  }
332  };
333 
334  m_Visiting = true;
335  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
336  m_Visiting = false;
337 
338  if (parentPrio >= outputPrio)
339  {
340  throw GraphValidationException("Graph has too many edges");
341  }
342 
343  m_Priority = parentPrio + 1U;
344  }
345 
346  return m_Priority;
347 }
unsigned int LayerPriority
Definition: Layer.hpp:207
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:237
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:218

◆ GetRelatedLayerNames()

const std::list<std::string>& GetRelatedLayerNames ( )
inline

Definition at line 326 of file Layer.hpp.

Referenced by ErasedLayerNamesObservable::Update().

326 { return m_RelatedLayerNames; }

◆ GetShapeInferenceMethod()

ShapeInferenceMethod GetShapeInferenceMethod ( ) const
inline

Definition at line 235 of file Layer.hpp.

235 { return m_ShapeInferenceMethod; };
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:408

◆ GetType()

LayerType GetType ( ) const
inlineoverridevirtual

Returns the armnn::LayerType of this layer.

Implements IConnectableLayer.

Definition at line 265 of file Layer.hpp.

Referenced by armnn::ApplyBackendOptimizations(), armnn::AttemptBackendAssignment(), armnn::CalculateEdgeStrategy(), armnn::CalculateSlotOption(), armnn::CalculateSlotOptionForInput(), armnn::CheckScaleSetOnQuantizedType(), LoadedNetwork::CreateWorkingMemHandle(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), LoadedNetwork::EnqueueWorkload(), LoadedNetwork::FreeWorkingMemory(), Layer::GetPriority(), ElementwiseBaseLayer::InferOutputShapes(), Layer::InferOutputShapes(), ReshapeLayer::IsEqual(), TransposeLayer::IsEqual(), PermuteLayer::IsEqual(), TransposeLayer::IsInverse(), PermuteLayer::IsInverse(), IsLayerOfType(), NeonBackend::OptimizeSubgraphView(), ClBackend::OptimizeSubgraphView(), MockBackend::OptimizeSubgraphView(), armnn::ReturnWithError(), ConvertFp32NetworkToFp16Impl::Run(), OptimizeConsecutiveReshapesImpl::Run(), AddDebugImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), OptimizeInverseConversionsImpl::Run(), RedirectMembersToConstantInputsImpl::Run(), MoveTransposeUpImpl::Run(), MovePermuteUpImpl::Run(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), OptimizeForTypeImpl< BaseType, OptimizeForConnectionImpl< BaseType, ChildType, Wrapped > >::Run(), ConvertFp32NetworkToBf16Impl::Run(), armnn::SelectTensorHandleStrategy(), TEST_SUITE(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), and Layer::VerifyLayerConnections().

265 { return m_Type; }

◆ InferOutputShapes()

std::vector< TensorShape > InferOutputShapes ( const std::vector< TensorShape > &  inputShapes) const
overridevirtual

Infer the shape of the output(s) based on the provided input shape(s)

Implements IConnectableLayer.

Reimplemented in QLstmLayer, QuantizedLstmLayer, ConcatLayer, LstmLayer, UnidirectionalSequenceLstmLayer, FullyConnectedLayer, SplitterLayer, Convolution2dLayer, DepthwiseConvolution2dLayer, TransposeConvolution2dLayer, ConstantLayer, PermuteLayer, ReshapeLayer, PadLayer, StandInLayer, BatchToSpaceNdLayer, Pooling2dLayer, ResizeLayer, ShapeLayer, SliceLayer, StackLayer, TransposeLayer, MergeLayer, ComparisonLayer, DepthToSpaceLayer, LogicalBinaryLayer, PreluLayer, SpaceToBatchNdLayer, SpaceToDepthLayer, ArgMinMaxLayer, ElementwiseUnaryLayer, StridedSliceLayer, and ElementwiseBaseLayer.

Definition at line 368 of file Layer.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, armnn::GetLayerTypeAsCString(), Layer::GetNameStr(), Layer::GetNumInputSlots(), Layer::GetNumOutputSlots(), and Layer::GetType().

Referenced by QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), and BatchNormalizationLayer::ValidateTensorShapesFromInputs().

369 {
372 
373  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
374  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
375  // base class, this means the implementation needs to be overridden in the specific layers for
376  // the other cases. So the missing implementation justifies the UnimplementedException.
377 
379  {
380  throw UnimplementedException(
381  fmt::format("Default implementation for InferOutputShapes can only be used for "
382  "layers with the same number of input and output slots. This doesn't "
383  "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
385  GetNameStr(),
388  CHECK_LOCATION().AsString()));
389  }
390  return inputShapes;
391 }
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:313
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:314
const std::string & GetNameStr() const
Definition: Layer.hpp:220
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
const char * GetLayerTypeAsCString(LayerType type)

◆ IsOutputUnconnected()

bool IsOutputUnconnected ( )
inline

Definition at line 249 of file Layer.hpp.

Referenced by OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), and OptimizeForExclusiveConnectionImpl< BaseType, ChildType, Wrapped >::Run().

250  {
251  unsigned int numConnections = 0;
252 
253  for (auto&& output : GetOutputSlots())
254  {
255  numConnections += output.GetNumConnections();
256  }
257 
258  return (GetNumOutputSlots() > 0) && (numConnections == 0);
259  }
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:314
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:238

◆ OperateOnConstantTensors()

void OperateOnConstantTensors ( Op  op)
inline

Definition at line 298 of file Layer.hpp.

Referenced by Layer::ReleaseConstantData(), and ConvertConstants< Converter, Predicate >::Run().

299  {
300  for (auto constant : GetConstantTensorsByRef())
301  {
302  if (constant.get())
303  {
304  op(constant);
305  }
306  }
307  };
virtual ConstantTensors GetConstantTensorsByRef()
Definition: Layer.hpp:394

◆ PrepInfoAndDesc()

WorkloadInfo PrepInfoAndDesc ( QueueDescriptor descriptor) const
inlineprotected

Helper function to reduce duplication in *LayerCreateWorkload.

Definition at line 381 of file Layer.hpp.

References armnn::info.

Referenced by ConvertFp32ToFp16Layer::CreateWorkload(), RankLayer::CreateWorkload(), UnmapLayer::CreateWorkload(), AdditionLayer::CreateWorkload(), ShapeLayer::CreateWorkload(), RsqrtLayer::CreateWorkload(), QuantizeLayer::CreateWorkload(), MemCopyLayer::CreateWorkload(), MemImportLayer::CreateWorkload(), DebugLayer::CreateWorkload(), AbsLayer::CreateWorkload(), DequantizeLayer::CreateWorkload(), ConvertBf16ToFp32Layer::CreateWorkload(), CastLayer::CreateWorkload(), ConvertFp16ToFp32Layer::CreateWorkload(), MapLayer::CreateWorkload(), FloorLayer::CreateWorkload(), SwitchLayer::CreateWorkload(), ConvertFp32ToBf16Layer::CreateWorkload(), PreluLayer::CreateWorkload(), SubtractionLayer::CreateWorkload(), DivisionLayer::CreateWorkload(), MinimumLayer::CreateWorkload(), MaximumLayer::CreateWorkload(), MultiplicationLayer::CreateWorkload(), ConstantLayer::CreateWorkload(), QuantizedLstmLayer::CreateWorkload(), and LayerWithParameters< ResizeDescriptor >::PrepInfoAndDesc().

382  {
383  WorkloadInfo info;
384  CollectQueueDescriptorInputs(descriptor, info);
385  CollectQueueDescriptorOutputs(descriptor, info);
386  return info;
387  }
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:358
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:365

◆ ReleaseConstantData()

void ReleaseConstantData ( )
virtual

Reimplemented in ConstantLayer.

Definition at line 275 of file Layer.cpp.

References Layer::OperateOnConstantTensors().

Referenced by TEST_SUITE().

276 {
277  // Now free up the static data.
278  OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
279  {
280  handle.reset();
281  });
282 }
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:298

◆ Reparent()

virtual void Reparent ( Graph dest,
std::list< Layer *>::const_iterator  iterator 
)
pure virtual

◆ ResetPriority()

void ResetPriority ( ) const

Definition at line 293 of file Layer.cpp.

294 {
295  m_Priority = 0;
296  m_Visiting = false;
297 }

◆ SerializeLayerParameters()

void SerializeLayerParameters ( ParameterStringifyFunction fn) const
virtual

Helper to serialize the layer parameters to string.

(currently used in DotSerializer and company).

Reimplemented in Convolution2dLayer, DepthwiseConvolution2dLayer, LayerWithParameters< Parameters >, LayerWithParameters< FakeQuantizationDescriptor >, LayerWithParameters< SoftmaxDescriptor >, LayerWithParameters< ReduceDescriptor >, LayerWithParameters< LogSoftmaxDescriptor >, LayerWithParameters< PreCompiledDescriptor >, LayerWithParameters< LstmDescriptor >, LayerWithParameters< BatchToSpaceNdDescriptor >, LayerWithParameters< PermuteDescriptor >, LayerWithParameters< SpaceToBatchNdDescriptor >, LayerWithParameters< DepthToSpaceDescriptor >, LayerWithParameters< ReshapeDescriptor >, LayerWithParameters< ElementwiseUnaryDescriptor >, LayerWithParameters< GatherDescriptor >, LayerWithParameters< SpaceToDepthDescriptor >, LayerWithParameters< OriginsDescriptor >, LayerWithParameters< ViewsDescriptor >, LayerWithParameters< Pooling2dDescriptor >, LayerWithParameters< Convolution2dDescriptor >, LayerWithParameters< ActivationDescriptor >, LayerWithParameters< StandInDescriptor >, LayerWithParameters< TransposeDescriptor >, LayerWithParameters< StackDescriptor >, LayerWithParameters< MeanDescriptor >, LayerWithParameters< ComparisonDescriptor >, LayerWithParameters< InstanceNormalizationDescriptor >, LayerWithParameters< TransposeConvolution2dDescriptor >, LayerWithParameters< BatchNormalizationDescriptor >, LayerWithParameters< ArgMinMaxDescriptor >, LayerWithParameters< LogicalBinaryDescriptor >, LayerWithParameters< DetectionPostProcessDescriptor >, LayerWithParameters< PadDescriptor >, LayerWithParameters< L2NormalizationDescriptor >, LayerWithParameters< FillDescriptor >, LayerWithParameters< SliceDescriptor >, LayerWithParameters< DepthwiseConvolution2dDescriptor >, LayerWithParameters< QLstmDescriptor >, LayerWithParameters< NormalizationDescriptor >, LayerWithParameters< FullyConnectedDescriptor >, LayerWithParameters< StridedSliceDescriptor >, and LayerWithParameters< ResizeDescriptor >.

Definition at line 447 of file Layer.cpp.

References armnn::GetLayerTypeAsCString(), and StringifyLayerParameters< LayerParameter >::Serialize().

Referenced by LayerWithParameters< ResizeDescriptor >::SerializeLayerParameters().

448 {
449  std::string guid = std::to_string(m_Guid);
450  std::string layerType = GetLayerTypeAsCString(m_Type);
451  std::string backendId = std::string(m_BackendId);
452  if (!(guid.compare("") == 0) && !guid.empty())
453  {
454  fn("Guid", guid);
455  }
456  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
457  {
458  fn("LayerName",m_LayerName);
459  }
460  if(!(layerType.compare("") == 0) && !layerType.empty())
461  {
462  fn("LayerType",layerType);
463  }
464  if(!(backendId.compare("") == 0) && !backendId.empty())
465  {
466  fn("BackendID",backendId);
467  }
468  std::shared_ptr<ActivationDescriptor>
469  activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
470 
471  if (activationDescPtr)
472  {
473  StringifyLayerParameters<ActivationDescriptor>::Serialize(fn, *activationDescPtr.get());
474  }
475 }
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
const char * GetLayerTypeAsCString(LayerType type)

◆ SetAdditionalInfo()

void SetAdditionalInfo ( QueueDescriptor descriptor) const
protected

Definition at line 245 of file Layer.cpp.

References QueueDescriptor::m_AdditionalInfoObject, and Layer::m_AdditionalInfoObject.

Referenced by ActivationLayer::CreateWorkload(), FillLayer::CreateWorkload(), RankLayer::CreateWorkload(), ConvertFp32ToFp16Layer::CreateWorkload(), ConcatLayer::CreateWorkload(), DebugLayer::CreateWorkload(), UnmapLayer::CreateWorkload(), StridedSliceLayer::CreateWorkload(), MemCopyLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), MemImportLayer::CreateWorkload(), DequantizeLayer::CreateWorkload(), SoftmaxLayer::CreateWorkload(), SliceLayer::CreateWorkload(), NormalizationLayer::CreateWorkload(), RsqrtLayer::CreateWorkload(), Pooling2dLayer::CreateWorkload(), AbsLayer::CreateWorkload(), ConvertBf16ToFp32Layer::CreateWorkload(), QuantizeLayer::CreateWorkload(), FakeQuantizationLayer::CreateWorkload(), BatchToSpaceNdLayer::CreateWorkload(), ConvertFp16ToFp32Layer::CreateWorkload(), FloorLayer::CreateWorkload(), ReduceLayer::CreateWorkload(), ResizeLayer::CreateWorkload(), ConvertFp32ToBf16Layer::CreateWorkload(), ShapeLayer::CreateWorkload(), InstanceNormalizationLayer::CreateWorkload(), AdditionLayer::CreateWorkload(), CastLayer::CreateWorkload(), L2NormalizationLayer::CreateWorkload(), StackLayer::CreateWorkload(), MapLayer::CreateWorkload(), SwitchLayer::CreateWorkload(), TransposeLayer::CreateWorkload(), SpaceToBatchNdLayer::CreateWorkload(), ComparisonLayer::CreateWorkload(), MeanLayer::CreateWorkload(), SubtractionLayer::CreateWorkload(), SpaceToDepthLayer::CreateWorkload(), MinimumLayer::CreateWorkload(), LogSoftmaxLayer::CreateWorkload(), MaximumLayer::CreateWorkload(), DivisionLayer::CreateWorkload(), PreluLayer::CreateWorkload(), GatherLayer::CreateWorkload(), PadLayer::CreateWorkload(), ArgMinMaxLayer::CreateWorkload(), MultiplicationLayer::CreateWorkload(), DepthToSpaceLayer::CreateWorkload(), ReshapeLayer::CreateWorkload(), PermuteLayer::CreateWorkload(), ConstantLayer::CreateWorkload(), DetectionPostProcessLayer::CreateWorkload(), TransposeConvolution2dLayer::CreateWorkload(), DepthwiseConvolution2dLayer::CreateWorkload(), Convolution2dLayer::CreateWorkload(), PreCompiledLayer::CreateWorkload(), FullyConnectedLayer::CreateWorkload(), LstmLayer::CreateWorkload(), UnidirectionalSequenceLstmLayer::CreateWorkload(), BatchNormalizationLayer::CreateWorkload(), QuantizedLstmLayer::CreateWorkload(), and QLstmLayer::CreateWorkload().

246 {
247  descriptor.m_AdditionalInfoObject = m_AdditionalInfoObject.get();
248 }
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:394

◆ SetAdditionalInfoForObject()

void SetAdditionalInfoForObject ( const AdditionalInfoObjectPtr additionalInfo)
inline

Definition at line 347 of file Layer.hpp.

348  {
349  m_AdditionalInfoObject = additionalInfo;
350  }
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:394

◆ SetBackendId()

void SetBackendId ( const BackendId id)
inline

Definition at line 270 of file Layer.hpp.

References CreateWorkload().

Referenced by armnn::AttemptBackendAssignment(), armnn::InsertDebugLayerAfter(), MockBackend::OptimizeSubgraphView(), and TEST_SUITE().

270 { m_BackendId = id; }

◆ SetGuid()

void SetGuid ( LayerGuid  guid)
inline

Definition at line 321 of file Layer.hpp.

321 { m_Guid = guid; }

◆ SetShapeInferenceMethod()

void SetShapeInferenceMethod ( ShapeInferenceMethod  shapeInferenceMethod)
inline

Definition at line 336 of file Layer.hpp.

337  {
338  m_ShapeInferenceMethod = shapeInferenceMethod;
339  }
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:408

◆ ValidateAndCopyShape()

void ValidateAndCopyShape ( const TensorShape outputShape,
const TensorShape inferredShape,
const ShapeInferenceMethod  shapeInferenceMethod,
const std::string &  layerName,
const unsigned int  outputSlotIndex = 0 
)
protected

Definition at line 393 of file Layer.cpp.

References TensorShape::GetDimensionality(), TensorShape::GetDimensionSpecificity(), TensorShape::GetNumDimensions(), Layer::GetOutputSlot(), OutputSlot::GetTensorInfo(), armnn::info, OutputSlot::SetTensorInfo(), armnn::Specified, and armnn::ValidateOnly.

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

398 {
399  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
400  {
401  ConditionalThrowIfNotEqual<LayerValidationException>(
402  layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
403  outputShape,
404  inferredShape);
405  return;
406  }
407 
408  if (outputShape.GetDimensionality() == Dimensionality::Specified)
409  {
410  for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
411  {
412  if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
413  {
414  std::stringstream ss;
415  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
416  "] does not match the inferred shape at dimension index [";
417  ss << i << "] " << outputShape << " != " << inferredShape;
418  throw LayerValidationException(ss.str());
419  }
420  }
421  }
422 
423  TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
424 
425  armnn::TensorInfo inferredTensorInfo(inferredShape,
426  info.GetDataType(),
427  info.GetQuantizationScale(),
428  info.GetQuantizationOffset());
429 
430  GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
431 }
Validate all output shapes.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63

◆ ValidateTensorShapesFromInputs()

◆ VerifyLayerConnections()

void VerifyLayerConnections ( unsigned int  expectedConnections,
const CheckLocation location 
) const

Definition at line 349 of file Layer.cpp.

References ARMNN_ASSERT, CheckLocation::AsString(), Layer::GetInputSlot(), armnn::GetLayerTypeAsCString(), Layer::GetNameStr(), Layer::GetNumInputSlots(), and Layer::GetType().

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), UnmapLayer::ValidateTensorShapesFromInputs(), MapLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

350 {
351  ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
352 
353  for (unsigned int i=0; i<expectedConnections; ++i)
354  {
355  if (GetInputSlot(i).GetConnection() == nullptr)
356  {
357  throw LayerValidationException(
358  fmt::format("Input connection #{0} must be connected "
359  "for {1} layer {2} {3}",
360  i,
362  GetNameStr(),
363  location.AsString()));
364  }
365  }
366 }
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:313
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
const std::string & GetNameStr() const
Definition: Layer.hpp:220
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:265
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const char * GetLayerTypeAsCString(LayerType type)

◆ VerifyShapeInferenceType()

void VerifyShapeInferenceType ( const TensorShape outputShape,
ShapeInferenceMethod  shapeInferenceMethod 
)
protected

Definition at line 433 of file Layer.cpp.

References TensorShape::AreAllDimensionsSpecified(), TensorShape::GetDimensionality(), armnn::NotSpecified, and armnn::ValidateOnly.

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

434 {
435  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
436  {
437  ConditionalThrow<LayerValidationException>(
438  outputShape.GetDimensionality() != Dimensionality::NotSpecified,
439  "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
440 
441  ConditionalThrow<LayerValidationException>(
442  outputShape.AreAllDimensionsSpecified(),
443  "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
444  }
445 }
Validate all output shapes.

Friends And Related Function Documentation

◆ Graph

friend class Graph
friend

Definition at line 354 of file Layer.hpp.

Member Data Documentation

◆ m_AdditionalInfoObject

AdditionalInfoObjectPtr m_AdditionalInfoObject
protected

Definition at line 394 of file Layer.hpp.

Referenced by Layer::SetAdditionalInfo().

◆ m_OutputHandlers

std::vector<OutputHandler> m_OutputHandlers
protected

◆ m_ShapeInferenceMethod

ShapeInferenceMethod m_ShapeInferenceMethod
protected

Definition at line 408 of file Layer.hpp.

Referenced by Layer::CloneBase(), ElementwiseBaseLayer::InferOutputShapes(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().


The documentation for this class was generated from the following files: