ArmNN
 24.02
Layer Class Referenceabstract

#include <Layer.hpp>

Inheritance diagram for Layer:
[legend]
Collaboration diagram for Layer:
[legend]

Public Member Functions

 Layer (unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
 
 Layer (unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char *name)
 
void ExecuteStrategy (IStrategy &strategy) const override
 Apply a visitor to this layer. More...
 
const std::string & GetNameStr () const
 
const OutputHandlerGetOutputHandler (unsigned int i=0) const
 
OutputHandlerGetOutputHandler (unsigned int i=0)
 
ShapeInferenceMethod GetShapeInferenceMethod () const
 
bool GetAllowExpandedDims () const
 
const std::vector< InputSlot > & GetInputSlots () const
 
const std::vector< OutputSlot > & GetOutputSlots () const
 
std::vector< InputSlot >::iterator BeginInputSlots ()
 
std::vector< InputSlot >::iterator EndInputSlots ()
 
std::vector< OutputSlot >::iterator BeginOutputSlots ()
 
std::vector< OutputSlot >::iterator EndOutputSlots ()
 
bool IsOutputUnconnected ()
 
void ResetPriority () const
 
LayerPriority GetPriority () const
 
LayerType GetType () const override
 Returns the armnn::LayerType of this layer. More...
 
DataType GetDataType () const
 
const BackendIdGetBackendId () const
 
void SetBackendId (const BackendId &id) override
 Set the backend of the IConnectableLayer. More...
 
virtual std::unique_ptr< IWorkloadCreateWorkload (const IWorkloadFactory &factory) const =0
 
virtual void CreateTensorHandles (const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
 
virtual LayerClone (Graph &graph) const =0
 Creates a dynamically-allocated copy of this layer. More...
 
void VerifyLayerConnections (unsigned int expectedConnections, const CheckLocation &location) const
 
virtual void ValidateTensorShapesFromInputs ()=0
 
std::vector< TensorShapeInferOutputShapes (const std::vector< TensorShape > &inputShapes) const override
 Infer the shape of the output(s) based on the provided input shape(s) More...
 
virtual void SerializeLayerParameters (ParameterStringifyFunction &fn) const
 Helper to serialize the layer parameters to string. More...
 
virtual void ReleaseConstantData ()
 
template<typename Op >
void OperateOnConstantTensors (Op op)
 
const char * GetName () const override
 Returns the name of the layer. More...
 
unsigned int GetNumInputSlots () const override
 Returns the number of connectable input slots. More...
 
unsigned int GetNumOutputSlots () const override
 Returns the number of connectable output slots. More...
 
const InputSlotGetInputSlot (unsigned int index) const override
 Get a const input slot handle by slot index. More...
 
InputSlotGetInputSlot (unsigned int index) override
 Get the input slot handle by slot index. More...
 
const OutputSlotGetOutputSlot (unsigned int index=0) const override
 Get the const output slot handle by slot index. More...
 
OutputSlotGetOutputSlot (unsigned int index=0) override
 Get the output slot handle by slot index. More...
 
void SetGuid (LayerGuid guid)
 
LayerGuid GetGuid () const final
 Returns the unique id of the layer. More...
 
void AddRelatedLayerName (const std::string layerName)
 
const std::list< std::string > & GetRelatedLayerNames ()
 
virtual void Reparent (Graph &dest, std::list< Layer * >::const_iterator iterator)=0
 
void BackendSelectionHint (Optional< BackendId > backend) final
 Provide a hint for the optimizer as to which backend to prefer for this layer. More...
 
Optional< BackendIdGetBackendHint () const
 
void SetShapeInferenceMethod (ShapeInferenceMethod shapeInferenceMethod)
 
void SetAllowExpandedDims (bool allowExpandedDims)
 
template<typename T >
std::shared_ptr< T > GetAdditionalInformation () const
 
void SetAdditionalInfoForObject (const AdditionalInfoObjectPtr &additionalInfo)
 
virtual const BaseDescriptorGetParameters () const override
 If the layer has a descriptor return it. More...
 

Protected Member Functions

virtual ~Layer ()=default
 
template<typename QueueDescriptor >
void CollectQueueDescriptorInputs (QueueDescriptor &descriptor, WorkloadInfo &info) const
 
template<typename QueueDescriptor >
void CollectQueueDescriptorOutputs (QueueDescriptor &descriptor, WorkloadInfo &info) const
 
void ValidateAndCopyShape (const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
 
void VerifyShapeInferenceType (const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
 
template<typename QueueDescriptor >
WorkloadInfo PrepInfoAndDesc (QueueDescriptor &descriptor) const
 Helper function to reduce duplication in *Layer::CreateWorkload. More...
 
template<typename LayerType , typename ... Params>
LayerTypeCloneBase (Graph &graph, Params &&... params) const
 
virtual ConstantTensors GetConstantTensorsByRef () override final
 
virtual ImmutableConstantTensors GetConstantTensorsByRef () const override
 
void SetAdditionalInfo (QueueDescriptor &descriptor) const
 
- Protected Member Functions inherited from IConnectableLayer
 ~IConnectableLayer ()
 Objects are not deletable via the handle. More...
 

Protected Attributes

AdditionalInfoObjectPtr m_AdditionalInfoObject
 
std::vector< OutputHandlerm_OutputHandlers
 
ShapeInferenceMethod m_ShapeInferenceMethod
 

Friends

class Graph
 

Additional Inherited Members

- Public Types inherited from IConnectableLayer
using ConstantTensors = std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >>
 
using ImmutableConstantTensors = std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >>
 

Detailed Description

Definition at line 230 of file Layer.hpp.

Constructor & Destructor Documentation

◆ Layer() [1/2]

Layer ( unsigned int  numInputSlots,
unsigned int  numOutputSlots,
LayerType  type,
const char *  name 
)
Parameters
name- Optional name for the layer (may be nullptr).

Definition at line 247 of file Layer.cpp.

251 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
252 {
253 }

References armnn::NCHW.

◆ Layer() [2/2]

Layer ( unsigned int  numInputSlots,
unsigned int  numOutputSlots,
LayerType  type,
DataLayout  layout,
const char *  name 
)

Definition at line 220 of file Layer.cpp.

225 : m_OutputHandlers(numOutputSlots)
227 , m_LayerName(name ? name : "")
228 , m_Type(type)
229 , m_BackendId()
230 , m_BackendHint(EmptyOptional())
231 , m_Guid(arm::pipe::IProfilingService::GetNextGuid())
232 {
233  IgnoreUnused(layout);
234  m_InputSlots.reserve(numInputSlots);
235  for (unsigned int i = 0; i < numInputSlots; ++i)
236  {
237  m_InputSlots.emplace_back(*this, i);
238  }
239 
240  m_OutputSlots.reserve(numOutputSlots);
241  for (unsigned int i = 0; i < numOutputSlots; ++i)
242  {
243  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
244  }
245 }

References armnn::IgnoreUnused(), Layer::m_OutputHandlers, and armnn::ValidateOnly.

◆ ~Layer()

virtual ~Layer ( )
protectedvirtualdefault

Member Function Documentation

◆ AddRelatedLayerName()

void AddRelatedLayerName ( const std::string  layerName)
inline

Definition at line 345 of file Layer.hpp.

345 { m_RelatedLayerNames.emplace_back(layerName); }

◆ BackendSelectionHint()

void BackendSelectionHint ( Optional< BackendId backend)
inlinefinalvirtual

Provide a hint for the optimizer as to which backend to prefer for this layer.

By providing a BackendSelectionHint there is no guarantee the input backend supports that layer. If IsLayerSupported() returns false with the backend hint, we default to calling IsLayerSupported() on the BackendPreferences vector. Use SetBackendId() if we can guarantee a backend supports that layer (IsLayerSupported returns true for a specific backend).

Implements IConnectableLayer.

Definition at line 351 of file Layer.hpp.

352  {
353  m_BackendHint = backend;
354  }

◆ BeginInputSlots()

std::vector<InputSlot>::iterator BeginInputSlots ( )
inline

◆ BeginOutputSlots()

◆ Clone()

virtual Layer* Clone ( Graph graph) const
pure virtual

Creates a dynamically-allocated copy of this layer.

Parameters
graph- The Graph into which this Layer is being cloned.

Implemented in QLstmLayer, QuantizedLstmLayer, OutputLayer, BatchNormalizationLayer, ConcatLayer, LstmLayer, UnidirectionalSequenceLstmLayer, SplitterLayer, TransposeConvolution2dLayer, DetectionPostProcessLayer, PreCompiledLayer, Convolution2dLayer, Convolution3dLayer, DepthwiseConvolution2dLayer, FullyConnectedLayer, FusedLayer, ConstantLayer, ReshapeLayer, ArgMinMaxLayer, ComparisonLayer, DepthToSpaceLayer, DivisionLayer, ElementwiseBinaryLayer, ElementwiseUnaryLayer, GatherLayer, LogicalBinaryLayer, LogSoftmaxLayer, MaximumLayer, MeanLayer, MinimumLayer, MultiplicationLayer, PadLayer, PermuteLayer, PreluLayer, ReduceLayer, ReverseV2Layer, SpaceToBatchNdLayer, SpaceToDepthLayer, StandInLayer, SubtractionLayer, TransposeLayer, AbsLayer, AdditionLayer, BatchMatMulLayer, BatchToSpaceNdLayer, CastLayer, ConvertFp16ToFp32Layer, DebugLayer, DequantizeLayer, FakeQuantizationLayer, FloorLayer, GatherNdLayer, InputLayer, InstanceNormalizationLayer, L2NormalizationLayer, MapLayer, MemCopyLayer, MemImportLayer, MergeLayer, NormalizationLayer, Pooling2dLayer, Pooling3dLayer, ResizeLayer, RsqrtLayer, ShapeLayer, SliceLayer, SoftmaxLayer, StackLayer, StridedSliceLayer, SwitchLayer, TileLayer, UnmapLayer, ActivationLayer, BroadcastToLayer, ConvertFp32ToFp16Layer, FillLayer, RankLayer, QuantizeLayer, and ChannelShuffleLayer.

Referenced by SubgraphView::GetWorkingCopy(), and Graph::Graph().

◆ CloneBase()

LayerType * CloneBase ( Graph graph,
Params &&...  params 
) const
protected

Definition at line 14 of file LayerCloneBase.hpp.

15 {
16  LayerType* const layer = graph.AddLayer<LayerType>(std::forward<Params>(params)...);
17 
18  layer->BackendSelectionHint(GetBackendHint());
19  layer->SetBackendId(GetBackendId());
20  layer->SetGuid(GetGuid());
21  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
22  layer->SetAllowExpandedDims(m_AllowExpandedDims);
23 
24  return layer;
25 }

References Graph::AddLayer(), Layer::GetBackendHint(), Layer::GetBackendId(), Layer::GetGuid(), and Layer::m_ShapeInferenceMethod.

◆ CollectQueueDescriptorInputs()

void CollectQueueDescriptorInputs ( QueueDescriptor descriptor,
WorkloadInfo info 
) const
inlineprotected

Definition at line 386 of file Layer.hpp.

387  {
388  WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
389  CollectWorkloadInputs(dataCollector);
390  }

References armnn::info, and QueueDescriptor::m_Inputs.

Referenced by Layer::PrepInfoAndDesc().

◆ CollectQueueDescriptorOutputs()

void CollectQueueDescriptorOutputs ( QueueDescriptor descriptor,
WorkloadInfo info 
) const
inlineprotected

Definition at line 393 of file Layer.hpp.

394  {
395  WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
396  CollectWorkloadOutputs(dataCollector);
397  }

References armnn::info, and QueueDescriptor::m_Outputs.

Referenced by Layer::PrepInfoAndDesc().

◆ CreateTensorHandles()

void CreateTensorHandles ( const TensorHandleFactoryRegistry registry,
const IWorkloadFactory factory,
const bool  IsMemoryManaged = true 
)
virtual

Reimplemented in ConcatLayer, OutputLayer, and SplitterLayer.

Definition at line 292 of file Layer.cpp.

295 {
296  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
297  {
298 
299  OutputSlot& slot = GetOutputSlot(idx);
300  ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
301 
302  OutputHandler& handler = GetOutputHandler(idx);
303  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
304  {
305  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
306  }
307  else
308  {
309  ITensorHandleFactory* handleFactory;
310  handleFactory = registry.GetFactory(factoryId);
311  ARMNN_ASSERT(handleFactory);
312  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
313  }
314  }
315 }

References ARMNN_ASSERT, OutputHandler::CreateTensorHandles(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetNumOutputSlots(), Layer::GetOutputHandler(), Layer::GetOutputSlot(), OutputSlot::GetTensorHandleFactoryId(), and ITensorHandleFactory::LegacyFactoryId.

◆ CreateWorkload()

◆ EndInputSlots()

std::vector<InputSlot>::iterator EndInputSlots ( )
inline

◆ EndOutputSlots()

◆ ExecuteStrategy()

void ExecuteStrategy ( IStrategy strategy) const
overridevirtual

Apply a visitor to this layer.

Implements IConnectableLayer.

Reimplemented in BindableLayer, QLstmLayer, QuantizedLstmLayer, PermuteLayer, TransposeLayer, LayerWithParameters< Parameters >, LayerWithParameters< SoftmaxDescriptor >, LayerWithParameters< FakeQuantizationDescriptor >, LayerWithParameters< ReduceDescriptor >, LayerWithParameters< LstmDescriptor >, LayerWithParameters< ChannelShuffleDescriptor >, LayerWithParameters< LogSoftmaxDescriptor >, LayerWithParameters< PreCompiledDescriptor >, LayerWithParameters< SpaceToBatchNdDescriptor >, LayerWithParameters< BatchToSpaceNdDescriptor >, LayerWithParameters< PermuteDescriptor >, LayerWithParameters< GatherDescriptor >, LayerWithParameters< ElementwiseUnaryDescriptor >, LayerWithParameters< DepthToSpaceDescriptor >, LayerWithParameters< OriginsDescriptor >, LayerWithParameters< SpaceToDepthDescriptor >, LayerWithParameters< ReshapeDescriptor >, LayerWithParameters< ViewsDescriptor >, LayerWithParameters< Pooling2dDescriptor >, LayerWithParameters< Convolution2dDescriptor >, LayerWithParameters< ActivationDescriptor >, LayerWithParameters< StandInDescriptor >, LayerWithParameters< MeanDescriptor >, LayerWithParameters< StackDescriptor >, LayerWithParameters< TransposeDescriptor >, LayerWithParameters< InstanceNormalizationDescriptor >, LayerWithParameters< ComparisonDescriptor >, LayerWithParameters< TransposeConvolution2dDescriptor >, LayerWithParameters< BroadcastToDescriptor >, LayerWithParameters< BatchNormalizationDescriptor >, LayerWithParameters< FusedDescriptor >, LayerWithParameters< BatchMatMulDescriptor >, LayerWithParameters< TileDescriptor >, LayerWithParameters< ArgMinMaxDescriptor >, LayerWithParameters< LogicalBinaryDescriptor >, LayerWithParameters< DetectionPostProcessDescriptor >, LayerWithParameters< PadDescriptor >, LayerWithParameters< L2NormalizationDescriptor >, LayerWithParameters< Convolution3dDescriptor >, LayerWithParameters< SliceDescriptor >, LayerWithParameters< QLstmDescriptor >, LayerWithParameters< FillDescriptor >, LayerWithParameters< DepthwiseConvolution2dDescriptor >, LayerWithParameters< NormalizationDescriptor >, LayerWithParameters< FullyConnectedDescriptor >, LayerWithParameters< ResizeDescriptor >, LayerWithParameters< Pooling3dDescriptor >, LayerWithParameters< ElementwiseBinaryDescriptor >, LayerWithParameters< StridedSliceDescriptor >, ReshapeLayer, UnidirectionalSequenceLstmLayer, ConcatLayer, LstmLayer, SplitterLayer, TransposeConvolution2dLayer, DetectionPostProcessLayer, OutputLayer, BatchNormalizationLayer, ConstantLayer, Convolution2dLayer, Convolution3dLayer, DepthwiseConvolution2dLayer, FullyConnectedLayer, ComparisonLayer, DepthToSpaceLayer, LogicalBinaryLayer, MeanLayer, PadLayer, PreluLayer, SpaceToBatchNdLayer, SpaceToDepthLayer, StandInLayer, ArgMinMaxLayer, BatchToSpaceNdLayer, ElementwiseUnaryLayer, GatherLayer, Pooling2dLayer, Pooling3dLayer, ReduceLayer, ResizeLayer, ReverseV2Layer, ShapeLayer, SliceLayer, StackLayer, StridedSliceLayer, BroadcastToLayer, ElementwiseBinaryLayer, MergeLayer, PreCompiledLayer, RankLayer, FakeQuantizationLayer, LogSoftmaxLayer, AbsLayer, ConvertFp16ToFp32Layer, DebugLayer, DequantizeLayer, FloorLayer, FusedLayer, InputLayer, InstanceNormalizationLayer, L2NormalizationLayer, MapLayer, MemCopyLayer, MemImportLayer, NormalizationLayer, RsqrtLayer, SoftmaxLayer, SwitchLayer, UnmapLayer, ConvertFp32ToFp16Layer, ElementwiseBaseLayer, FillLayer, ActivationLayer, DivisionLayer, MaximumLayer, MinimumLayer, MultiplicationLayer, SubtractionLayer, AdditionLayer, and QuantizeLayer.

Definition at line 549 of file Layer.cpp.

550 {
551  strategy.ExecuteStrategy(this, BaseDescriptor(), {}, GetName());
552 }

References IStrategy::ExecuteStrategy(), and Layer::GetName().

Referenced by armnn::IsNCHW().

◆ GetAdditionalInformation()

std::shared_ptr<T> GetAdditionalInformation ( ) const
inline

Definition at line 368 of file Layer.hpp.

369  {
370  return std::static_pointer_cast<T>(m_AdditionalInfoObject);
371  }

References Layer::m_AdditionalInfoObject.

Referenced by NeonBackend::OptimizeSubgraphView(), and ClBackend::OptimizeSubgraphView().

◆ GetAllowExpandedDims()

bool GetAllowExpandedDims ( ) const
inline

Definition at line 256 of file Layer.hpp.

256 { return m_AllowExpandedDims; };

Referenced by LayerWithParameters< StridedSliceDescriptor >::PrepInfoAndDesc().

◆ GetBackendHint()

Optional<BackendId> GetBackendHint ( ) const
inline

Definition at line 355 of file Layer.hpp.

355 { return m_BackendHint; }

Referenced by NetworkImpl::AddPrecompiledLayer(), and Layer::CloneBase().

◆ GetBackendId()

◆ GetConstantTensorsByRef() [1/2]

◆ GetConstantTensorsByRef() [2/2]

Layer::ConstantTensors GetConstantTensorsByRef ( )
finaloverrideprotectedvirtual

Implements IConnectableLayer.

Definition at line 554 of file Layer.cpp.

555 {
556  const Layer *constThis = const_cast<const Layer*>(this);
557  ConstantTensors res;
558 
559  ImmutableConstantTensors immutableData = constThis->GetConstantTensorsByRef();
560  for (auto i : immutableData)
561  {
562  res.push_back(const_cast<std::shared_ptr<ConstTensorHandle>&>(i.get()));
563  }
564  return res;
565 }

References Layer::GetConstantTensorsByRef().

Referenced by Layer::GetConstantTensorsByRef(), and Layer::OperateOnConstantTensors().

◆ GetDataType()

◆ GetGuid()

◆ GetInputSlot() [1/2]

const InputSlot& GetInputSlot ( unsigned int  index) const
inlineoverridevirtual

Get a const input slot handle by slot index.

Implements IConnectableLayer.

Examples
AsyncExecutionSample.cpp, and SimpleSample.cpp.

Definition at line 337 of file Layer.hpp.

337 { return m_InputSlots.at(index); }

Referenced by Graph::AddCompatibilityLayers(), armnn::ApplyBackendOptimizations(), armnn::ChainReduceLayers(), armnn::ConnectedToLayerType(), armnn::ConnectedToLayerWithNCHW(), ConvertActivationToTosaOperator(), ConvertAvgPool2DIgnoreValueToTosaOperator(), ConvertConcatToTosaOperator(), ConvertConv2dToTosaOperator(), ConvertElementwiseBinaryToTosaOperator(), ConvertElementwiseUnaryOperator(), ConvertPooling2DToTosaOperator(), ConvertQuantizeToTosaOperator(), ConvertReshapeToTosaOperator(), ConvertResizeToTosaOperator(), ConvertSliceToTosaOperator(), ConvertTransposeConv2dToTosaOperator(), ConvertTransposeToTosaOperator(), DebugLayer::CreateWorkload(), armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), LayerWithParameters< StridedSliceDescriptor >::GetConnectedConstantAsInputTensors(), Layer::GetDataType(), armnn::GetLayerInOutDatatype(), Graph::Graph(), InputSlot::Insert(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), RefBackend::OptimizeSubgraphView(), NeonBackend::OptimizeSubgraphView(), ClBackend::OptimizeSubgraphView(), GpuFsaBackend::OptimizeSubgraphView(), armnn::RemoveReshapeLayer(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), OptimizeInverseConversionsImpl::Run(), MovePermuteUpImpl::Run(), MoveTransposeUpImpl::Run(), OptimizeInversePermutesImpl< PermuteType >::Run(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), Convolution2dLayer::SerializeLayerParameters(), Convolution3dLayer::SerializeLayerParameters(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), TileLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ReverseV2Layer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), OutputLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), and Layer::VerifyLayerConnections().

◆ GetInputSlot() [2/2]

InputSlot& GetInputSlot ( unsigned int  index)
inlineoverridevirtual

Get the input slot handle by slot index.

Implements IConnectableLayer.

Definition at line 338 of file Layer.hpp.

338 { return m_InputSlots.at(index); }

◆ GetInputSlots()

◆ GetName()

const char* GetName ( ) const
inlineoverridevirtual

Returns the name of the layer.

Implements IConnectableLayer.

Definition at line 332 of file Layer.hpp.

332 { return m_LayerName.c_str(); }

Referenced by Graph::AddCompatibilityLayers(), ChannelShuffleLayer::Clone(), QuantizeLayer::Clone(), BroadcastToLayer::Clone(), ActivationLayer::Clone(), FillLayer::Clone(), ConvertFp32ToFp16Layer::Clone(), RankLayer::Clone(), AbsLayer::Clone(), AdditionLayer::Clone(), InputLayer::Clone(), SoftmaxLayer::Clone(), InstanceNormalizationLayer::Clone(), DebugLayer::Clone(), L2NormalizationLayer::Clone(), StackLayer::Clone(), StridedSliceLayer::Clone(), CastLayer::Clone(), SwitchLayer::Clone(), TileLayer::Clone(), MapLayer::Clone(), UnmapLayer::Clone(), DequantizeLayer::Clone(), MemCopyLayer::Clone(), MemImportLayer::Clone(), MergeLayer::Clone(), NormalizationLayer::Clone(), Pooling2dLayer::Clone(), Pooling3dLayer::Clone(), ConvertFp16ToFp32Layer::Clone(), FakeQuantizationLayer::Clone(), BatchMatMulLayer::Clone(), FloorLayer::Clone(), BatchToSpaceNdLayer::Clone(), ResizeLayer::Clone(), GatherNdLayer::Clone(), RsqrtLayer::Clone(), ShapeLayer::Clone(), SliceLayer::Clone(), SpaceToBatchNdLayer::Clone(), SpaceToDepthLayer::Clone(), StandInLayer::Clone(), LogicalBinaryLayer::Clone(), SubtractionLayer::Clone(), LogSoftmaxLayer::Clone(), DepthToSpaceLayer::Clone(), TransposeLayer::Clone(), MaximumLayer::Clone(), MeanLayer::Clone(), ArgMinMaxLayer::Clone(), ComparisonLayer::Clone(), MinimumLayer::Clone(), DivisionLayer::Clone(), MultiplicationLayer::Clone(), PadLayer::Clone(), ElementwiseBinaryLayer::Clone(), PermuteLayer::Clone(), ElementwiseUnaryLayer::Clone(), PreluLayer::Clone(), ReduceLayer::Clone(), ReverseV2Layer::Clone(), GatherLayer::Clone(), ConstantLayer::Clone(), ReshapeLayer::Clone(), Convolution3dLayer::Clone(), DepthwiseConvolution2dLayer::Clone(), FullyConnectedLayer::Clone(), FusedLayer::Clone(), Convolution2dLayer::Clone(), DetectionPostProcessLayer::Clone(), PreCompiledLayer::Clone(), TransposeConvolution2dLayer::Clone(), SplitterLayer::Clone(), LstmLayer::Clone(), UnidirectionalSequenceLstmLayer::Clone(), ConcatLayer::Clone(), BatchNormalizationLayer::Clone(), OutputLayer::Clone(), QuantizedLstmLayer::Clone(), QLstmLayer::Clone(), QuantizeLayer::ExecuteStrategy(), AdditionLayer::ExecuteStrategy(), SubtractionLayer::ExecuteStrategy(), MaximumLayer::ExecuteStrategy(), MinimumLayer::ExecuteStrategy(), MultiplicationLayer::ExecuteStrategy(), DivisionLayer::ExecuteStrategy(), ActivationLayer::ExecuteStrategy(), ElementwiseBaseLayer::ExecuteStrategy(), FillLayer::ExecuteStrategy(), ConvertFp32ToFp16Layer::ExecuteStrategy(), SoftmaxLayer::ExecuteStrategy(), InputLayer::ExecuteStrategy(), InstanceNormalizationLayer::ExecuteStrategy(), L2NormalizationLayer::ExecuteStrategy(), SwitchLayer::ExecuteStrategy(), DebugLayer::ExecuteStrategy(), MemCopyLayer::ExecuteStrategy(), DequantizeLayer::ExecuteStrategy(), NormalizationLayer::ExecuteStrategy(), FloorLayer::ExecuteStrategy(), ConvertFp16ToFp32Layer::ExecuteStrategy(), FusedLayer::ExecuteStrategy(), RsqrtLayer::ExecuteStrategy(), LogSoftmaxLayer::ExecuteStrategy(), BroadcastToLayer::ExecuteStrategy(), MergeLayer::ExecuteStrategy(), RankLayer::ExecuteStrategy(), ElementwiseBinaryLayer::ExecuteStrategy(), PreCompiledLayer::ExecuteStrategy(), SliceLayer::ExecuteStrategy(), ResizeLayer::ExecuteStrategy(), ShapeLayer::ExecuteStrategy(), StackLayer::ExecuteStrategy(), GatherLayer::ExecuteStrategy(), ReverseV2Layer::ExecuteStrategy(), BatchToSpaceNdLayer::ExecuteStrategy(), StridedSliceLayer::ExecuteStrategy(), ArgMinMaxLayer::ExecuteStrategy(), Pooling2dLayer::ExecuteStrategy(), Pooling3dLayer::ExecuteStrategy(), ElementwiseUnaryLayer::ExecuteStrategy(), ReduceLayer::ExecuteStrategy(), SpaceToBatchNdLayer::ExecuteStrategy(), DepthToSpaceLayer::ExecuteStrategy(), MeanLayer::ExecuteStrategy(), SpaceToDepthLayer::ExecuteStrategy(), LogicalBinaryLayer::ExecuteStrategy(), StandInLayer::ExecuteStrategy(), PadLayer::ExecuteStrategy(), PreluLayer::ExecuteStrategy(), ComparisonLayer::ExecuteStrategy(), FullyConnectedLayer::ExecuteStrategy(), Convolution3dLayer::ExecuteStrategy(), Convolution2dLayer::ExecuteStrategy(), DepthwiseConvolution2dLayer::ExecuteStrategy(), BatchNormalizationLayer::ExecuteStrategy(), ConstantLayer::ExecuteStrategy(), DetectionPostProcessLayer::ExecuteStrategy(), OutputLayer::ExecuteStrategy(), TransposeConvolution2dLayer::ExecuteStrategy(), SplitterLayer::ExecuteStrategy(), ConcatLayer::ExecuteStrategy(), LstmLayer::ExecuteStrategy(), UnidirectionalSequenceLstmLayer::ExecuteStrategy(), ReshapeLayer::ExecuteStrategy(), LayerWithParameters< StridedSliceDescriptor >::ExecuteStrategy(), TransposeLayer::ExecuteStrategy(), PermuteLayer::ExecuteStrategy(), QuantizedLstmLayer::ExecuteStrategy(), QLstmLayer::ExecuteStrategy(), Layer::ExecuteStrategy(), BindableLayer::ExecuteStrategy(), armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::IsLayerOptimizable(), NeonBackend::OptimizeSubgraphView(), ClBackend::OptimizeSubgraphView(), Layer::PrepInfoAndDesc(), TransposeAsReshapeImpl::Run(), PermuteAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), and ErasedLayerNamesObservable::Update().

◆ GetNameStr()

◆ GetNumInputSlots()

◆ GetNumOutputSlots()

◆ GetOutputHandler() [1/2]

OutputHandler& GetOutputHandler ( unsigned int  i = 0)
inline

Definition at line 250 of file Layer.hpp.

251  {
252  return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
253  }

References Layer::GetOutputHandler().

◆ GetOutputHandler() [2/2]

◆ GetOutputSlot() [1/2]

const OutputSlot& GetOutputSlot ( unsigned int  index = 0) const
inlineoverridevirtual

Get the const output slot handle by slot index.

Implements IConnectableLayer.

Examples
AsyncExecutionSample.cpp, and SimpleSample.cpp.

Definition at line 339 of file Layer.hpp.

339 { return m_OutputSlots.at(index); }

Referenced by Graph::AddCompatibilityLayers(), armnn::ApplyBackendOptimizations(), armnn::AttemptBackendAssignment(), OutputSlot::CalculateIndexOnOwner(), armnn::ChainReduceLayers(), armnn::CheckScaleSetOnQuantizedType(), armnn::ConnectedToLayerType(), armnn::ConnectedToLayerWithNCHW(), SplitterLayer::CreateTensorHandles(), ConcatLayer::CreateTensorHandles(), Layer::CreateTensorHandles(), armnn::optimizations::pad_fold::FoldPadIntoLayer2dImpl(), GenerateUniqueOutputName(), Layer::GetDataType(), armnn::GetLayerInOutDatatype(), InputSlot::Insert(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), IsConnected(), RefBackend::OptimizeSubgraphView(), ClBackend::OptimizeSubgraphView(), GpuFsaBackend::OptimizeSubgraphView(), armnn::RemoveReshapeLayer(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), DeleteBroadcastToImpl::Run(), OptimizeInverseConversionsImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), MoveTransposeUpImpl::Run(), MovePermuteUpImpl::Run(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), armnn::SelectTensorHandleStrategy(), Layer::ValidateAndCopyShape(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), BroadcastToLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), TileLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), ReverseV2Layer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

◆ GetOutputSlot() [2/2]

OutputSlot& GetOutputSlot ( unsigned int  index = 0)
inlineoverridevirtual

Get the output slot handle by slot index.

Implements IConnectableLayer.

Definition at line 340 of file Layer.hpp.

340 { return m_OutputSlots.at(index); }

◆ GetOutputSlots()

const std::vector<OutputSlot>& GetOutputSlots ( ) const
inline

◆ GetParameters()

◆ GetPriority()

LayerPriority GetPriority ( ) const

Definition at line 341 of file Layer.cpp.

342 {
343  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
344  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
345 
346  if (GetType() == LayerType::Input)
347  {
348  m_Priority = inputPrio;
349  }
350  else if (GetType() == LayerType::Output)
351  {
352  m_Priority = outputPrio;
353  }
354  else if (m_Priority == 0)
355  {
356  if (m_Visiting)
357  {
358  throw GraphValidationException("Graph has circular dependencies: cannot walk");
359  }
360 
361  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
362  {
363  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
364  if (outputSlot)
365  {
366  const Layer& input = outputSlot->GetOwningLayer();
367  return std::max(prio, input.GetPriority());
368  }
369  else
370  {
371  // unconnected input slot
372  return prio;
373  }
374  };
375 
376  m_Visiting = true;
377  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
378  m_Visiting = false;
379 
380  if (parentPrio >= outputPrio)
381  {
382  throw GraphValidationException("Graph has too many edges");
383  }
384 
385  m_Priority = parentPrio + 1U;
386  }
387 
388  return m_Priority;
389 }

References Layer::GetInputSlots(), OutputSlot::GetOwningLayer(), Layer::GetPriority(), Layer::GetType(), armnn::Input, and armnn::Output.

Referenced by Layer::GetPriority(), and SquashEqualSiblingsImpl< Comparable >::Run().

◆ GetRelatedLayerNames()

const std::list<std::string>& GetRelatedLayerNames ( )
inline

Definition at line 347 of file Layer.hpp.

347 { return m_RelatedLayerNames; }

Referenced by ErasedLayerNamesObservable::Update().

◆ GetShapeInferenceMethod()

ShapeInferenceMethod GetShapeInferenceMethod ( ) const
inline

Definition at line 255 of file Layer.hpp.

255 { return m_ShapeInferenceMethod; };

References Layer::m_ShapeInferenceMethod.

◆ GetType()

LayerType GetType ( ) const
inlineoverridevirtual

Returns the armnn::LayerType of this layer.

Implements IConnectableLayer.

Definition at line 286 of file Layer.hpp.

286 { return m_Type; }

Referenced by armnn::ApplyBackendOptimizations(), armnn::AssertNumberOfInputSlots(), armnn::AttemptBackendAssignment(), armnn::BuildAddMulAddTensorInfoLists(), armnn::CalculateEdgeStrategy(), armnn::CalculateSlotOption(), armnn::CalculateSlotOptionForInput(), armnn::CheckScaleSetOnQuantizedType(), armnn::ConnectedToLayerType(), GenerateUniqueName(), GenerateUniqueOutputName(), Layer::GetPriority(), GetTosaMappingFromLayer(), LoadedNetwork::ImportInputs(), LoadedNetwork::ImportOutputs(), Layer::InferOutputShapes(), ReshapeLayer::IsEqual(), TransposeLayer::IsEqual(), PermuteLayer::IsEqual(), TransposeLayer::IsInverse(), PermuteLayer::IsInverse(), IsLayerOfType(), armnn::IsLayerSupported(), armnn::IsSequenceLayerType(), LayerNameAndTypeCheck::operator()(), TosaRefBackend::OptimizeSubgraphView(), RefBackend::OptimizeSubgraphView(), NeonBackend::OptimizeSubgraphView(), ClBackend::OptimizeSubgraphView(), GpuFsaBackend::OptimizeSubgraphView(), armnn::RemoveReshapeLayer(), armnn::ReturnWithError(), ConvertFp32NetworkToFp16Impl::Run(), OptimizeConsecutiveReshapesImpl::Run(), AddDebugImpl::Run(), DeleteBroadcastToImpl::Run(), ConvertConstPermuteLayersToConstLayers::Run(), OptimizeInverseConversionsImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), ConvertConstDequantisationLayersToConstLayersImpl::Run(), PermuteDepthwiseConv2dWeightsImpl::Run(), AddBroadcastReshapeLayerImpl::Run(), FuseBatchNorm< ConvLayer, ArmnnType, T >::Run(), AddDebugToFileImpl::Run(), OptimizeForTypeImpl< BaseType, OptimizeForConnectionImpl< BaseType, ChildType, Wrapped > >::Run(), armnn::SelectTensorHandleStrategy(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), and Layer::VerifyLayerConnections().

◆ InferOutputShapes()

std::vector< TensorShape > InferOutputShapes ( const std::vector< TensorShape > &  inputShapes) const
overridevirtual

Infer the shape of the output(s) based on the provided input shape(s)

Implements IConnectableLayer.

Reimplemented in QLstmLayer, QuantizedLstmLayer, ConcatLayer, LstmLayer, UnidirectionalSequenceLstmLayer, SplitterLayer, DetectionPostProcessLayer, TransposeConvolution2dLayer, Convolution2dLayer, Convolution3dLayer, DepthwiseConvolution2dLayer, FullyConnectedLayer, ConstantLayer, ReshapeLayer, MeanLayer, PadLayer, StandInLayer, BatchToSpaceNdLayer, PermuteLayer, Pooling2dLayer, Pooling3dLayer, ResizeLayer, ShapeLayer, SliceLayer, StackLayer, MergeLayer, RankLayer, TransposeLayer, ComparisonLayer, DepthToSpaceLayer, LogicalBinaryLayer, PreluLayer, ReduceLayer, ReverseV2Layer, SpaceToBatchNdLayer, SpaceToDepthLayer, ArgMinMaxLayer, ElementwiseBinaryLayer, ElementwiseUnaryLayer, GatherLayer, StridedSliceLayer, BatchMatMulLayer, GatherNdLayer, TileLayer, BroadcastToLayer, and ElementwiseBaseLayer.

Definition at line 410 of file Layer.cpp.

411 {
414 
415  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
416  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
417  // base class, this means the implementation needs to be overridden in the specific layers for
418  // the other cases. So the missing implementation justifies the UnimplementedException.
419 
421  {
422  throw UnimplementedException(
423  fmt::format("Default implementation for InferOutputShapes can only be used for "
424  "layers with the same number of input and output slots. This doesn't "
425  "hold for {0} layer {1} (#inputs={2} #outputs={3}) {4}",
427  GetNameStr(),
430  CHECK_LOCATION().AsString()));
431  }
432  return inputShapes;
433 }

References ARMNN_ASSERT, CHECK_LOCATION, armnn::GetLayerTypeAsCString(), Layer::GetNameStr(), Layer::GetNumInputSlots(), Layer::GetNumOutputSlots(), and Layer::GetType().

Referenced by QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), and BatchNormalizationLayer::ValidateTensorShapesFromInputs().

◆ IsOutputUnconnected()

bool IsOutputUnconnected ( )
inline

Definition at line 270 of file Layer.hpp.

271  {
272  unsigned int numConnections = 0;
273 
274  for (auto&& output : GetOutputSlots())
275  {
276  numConnections += output.GetNumConnections();
277  }
278 
279  return (GetNumOutputSlots() > 0) && (numConnections == 0);
280  }

References Layer::GetNumOutputSlots(), and Layer::GetOutputSlots().

Referenced by OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run(), and OptimizeForExclusiveConnectionImpl< BaseType, ChildType, Wrapped >::Run().

◆ OperateOnConstantTensors()

void OperateOnConstantTensors ( Op  op)
inline

Definition at line 319 of file Layer.hpp.

320  {
321  for (auto constant : GetConstantTensorsByRef())
322  {
323  if (constant.get())
324  {
325  op(constant);
326  }
327  }
328  };

References Layer::GetConstantTensorsByRef().

Referenced by Layer::ReleaseConstantData(), and ConvertConstants< Converter, Predicate >::Run().

◆ PrepInfoAndDesc()

◆ ReleaseConstantData()

void ReleaseConstantData ( )
virtual

Reimplemented in Convolution2dLayer, DepthwiseConvolution2dLayer, FullyConnectedLayer, and ConstantLayer.

Definition at line 317 of file Layer.cpp.

318 {
319  // Now free up the static data.
320  OperateOnConstantTensors([](std::shared_ptr<ConstTensorHandle>& handle)
321  {
322  handle.reset();
323  });
324 }

References Layer::OperateOnConstantTensors().

◆ Reparent()

virtual void Reparent ( Graph dest,
std::list< Layer * >::const_iterator  iterator 
)
pure virtual

Referenced by Graph::operator=().

◆ ResetPriority()

void ResetPriority ( ) const

Definition at line 335 of file Layer.cpp.

336 {
337  m_Priority = 0;
338  m_Visiting = false;
339 }

◆ SerializeLayerParameters()

void SerializeLayerParameters ( ParameterStringifyFunction fn) const
virtual

Helper to serialize the layer parameters to string.

(currently used in DotSerializer and company).

Reimplemented in ConstantLayer, Convolution2dLayer, Convolution3dLayer, DepthwiseConvolution2dLayer, LayerWithParameters< Parameters >, LayerWithParameters< SoftmaxDescriptor >, LayerWithParameters< FakeQuantizationDescriptor >, LayerWithParameters< ReduceDescriptor >, LayerWithParameters< LstmDescriptor >, LayerWithParameters< ChannelShuffleDescriptor >, LayerWithParameters< LogSoftmaxDescriptor >, LayerWithParameters< PreCompiledDescriptor >, LayerWithParameters< SpaceToBatchNdDescriptor >, LayerWithParameters< BatchToSpaceNdDescriptor >, LayerWithParameters< PermuteDescriptor >, LayerWithParameters< GatherDescriptor >, LayerWithParameters< ElementwiseUnaryDescriptor >, LayerWithParameters< DepthToSpaceDescriptor >, LayerWithParameters< OriginsDescriptor >, LayerWithParameters< SpaceToDepthDescriptor >, LayerWithParameters< ReshapeDescriptor >, LayerWithParameters< ViewsDescriptor >, LayerWithParameters< Pooling2dDescriptor >, LayerWithParameters< Convolution2dDescriptor >, LayerWithParameters< ActivationDescriptor >, LayerWithParameters< StandInDescriptor >, LayerWithParameters< MeanDescriptor >, LayerWithParameters< StackDescriptor >, LayerWithParameters< TransposeDescriptor >, LayerWithParameters< InstanceNormalizationDescriptor >, LayerWithParameters< ComparisonDescriptor >, LayerWithParameters< TransposeConvolution2dDescriptor >, LayerWithParameters< BroadcastToDescriptor >, LayerWithParameters< BatchNormalizationDescriptor >, LayerWithParameters< FusedDescriptor >, LayerWithParameters< BatchMatMulDescriptor >, LayerWithParameters< TileDescriptor >, LayerWithParameters< ArgMinMaxDescriptor >, LayerWithParameters< LogicalBinaryDescriptor >, LayerWithParameters< DetectionPostProcessDescriptor >, LayerWithParameters< PadDescriptor >, LayerWithParameters< L2NormalizationDescriptor >, LayerWithParameters< Convolution3dDescriptor >, LayerWithParameters< SliceDescriptor >, LayerWithParameters< QLstmDescriptor >, LayerWithParameters< FillDescriptor >, LayerWithParameters< DepthwiseConvolution2dDescriptor >, LayerWithParameters< NormalizationDescriptor >, LayerWithParameters< FullyConnectedDescriptor >, LayerWithParameters< ResizeDescriptor >, LayerWithParameters< Pooling3dDescriptor >, LayerWithParameters< ElementwiseBinaryDescriptor >, and LayerWithParameters< StridedSliceDescriptor >.

Definition at line 518 of file Layer.cpp.

519 {
520  std::string guid = std::to_string(m_Guid);
521  std::string layerType = GetLayerTypeAsCString(m_Type);
522  std::string backendId = std::string(m_BackendId);
523  if (!(guid.compare("") == 0) && !guid.empty())
524  {
525  fn("Guid", guid);
526  }
527  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
528  {
529  fn("LayerName",m_LayerName);
530  }
531  if(!(layerType.compare("") == 0) && !layerType.empty())
532  {
533  fn("LayerType",layerType);
534  }
535  if(!(backendId.compare("") == 0) && !backendId.empty())
536  {
537  fn("BackendID",backendId);
538  }
539  std::shared_ptr<ActivationDescriptor>
540  activationDescPtr = GetAdditionalInformation<ActivationDescriptor>();
541 
542  if (activationDescPtr)
543  {
544  StringifyLayerParameters<ActivationDescriptor>::Serialize(fn, *activationDescPtr.get());
545  }
546 }

References armnn::GetLayerTypeAsCString(), and StringifyLayerParameters< LayerParameter >::Serialize().

Referenced by LayerWithParameters< StridedSliceDescriptor >::SerializeLayerParameters(), and ConstantLayer::SerializeLayerParameters().

◆ SetAdditionalInfo()

void SetAdditionalInfo ( QueueDescriptor descriptor) const
protected

Definition at line 287 of file Layer.cpp.

288 {
289  descriptor.m_AdditionalInfoObject = m_AdditionalInfoObject.get();
290 }

References QueueDescriptor::m_AdditionalInfoObject, and Layer::m_AdditionalInfoObject.

Referenced by ActivationLayer::CreateWorkload(), BroadcastToLayer::CreateWorkload(), ConvertFp32ToFp16Layer::CreateWorkload(), FillLayer::CreateWorkload(), QuantizeLayer::CreateWorkload(), RankLayer::CreateWorkload(), UnmapLayer::CreateWorkload(), AbsLayer::CreateWorkload(), InstanceNormalizationLayer::CreateWorkload(), L2NormalizationLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), StackLayer::CreateWorkload(), StridedSliceLayer::CreateWorkload(), SoftmaxLayer::CreateWorkload(), SwitchLayer::CreateWorkload(), ShapeLayer::CreateWorkload(), TileLayer::CreateWorkload(), CastLayer::CreateWorkload(), DebugLayer::CreateWorkload(), MapLayer::CreateWorkload(), MemCopyLayer::CreateWorkload(), BatchMatMulLayer::CreateWorkload(), DequantizeLayer::CreateWorkload(), MemImportLayer::CreateWorkload(), NormalizationLayer::CreateWorkload(), Pooling2dLayer::CreateWorkload(), Pooling3dLayer::CreateWorkload(), FakeQuantizationLayer::CreateWorkload(), ConcatLayer::CreateWorkload(), FloorLayer::CreateWorkload(), AdditionLayer::CreateWorkload(), ConvertFp16ToFp32Layer::CreateWorkload(), ResizeLayer::CreateWorkload(), RsqrtLayer::CreateWorkload(), BatchToSpaceNdLayer::CreateWorkload(), SliceLayer::CreateWorkload(), GatherNdLayer::CreateWorkload(), SpaceToBatchNdLayer::CreateWorkload(), SubtractionLayer::CreateWorkload(), ArgMinMaxLayer::CreateWorkload(), SpaceToDepthLayer::CreateWorkload(), TransposeLayer::CreateWorkload(), GatherLayer::CreateWorkload(), MaximumLayer::CreateWorkload(), ReverseV2Layer::CreateWorkload(), ReduceLayer::CreateWorkload(), MinimumLayer::CreateWorkload(), LogSoftmaxLayer::CreateWorkload(), MultiplicationLayer::CreateWorkload(), PadLayer::CreateWorkload(), PermuteLayer::CreateWorkload(), DivisionLayer::CreateWorkload(), MeanLayer::CreateWorkload(), PreluLayer::CreateWorkload(), ElementwiseBinaryLayer::CreateWorkload(), ComparisonLayer::CreateWorkload(), DepthToSpaceLayer::CreateWorkload(), ReshapeLayer::CreateWorkload(), ConstantLayer::CreateWorkload(), ChannelShuffleLayer::CreateWorkload(), FullyConnectedLayer::CreateWorkload(), Convolution2dLayer::CreateWorkload(), Convolution3dLayer::CreateWorkload(), DepthwiseConvolution2dLayer::CreateWorkload(), FusedLayer::CreateWorkload(), DetectionPostProcessLayer::CreateWorkload(), TransposeConvolution2dLayer::CreateWorkload(), PreCompiledLayer::CreateWorkload(), LstmLayer::CreateWorkload(), UnidirectionalSequenceLstmLayer::CreateWorkload(), BatchNormalizationLayer::CreateWorkload(), QuantizedLstmLayer::CreateWorkload(), and QLstmLayer::CreateWorkload().

◆ SetAdditionalInfoForObject()

void SetAdditionalInfoForObject ( const AdditionalInfoObjectPtr additionalInfo)
inline

Definition at line 373 of file Layer.hpp.

374  {
375  m_AdditionalInfoObject = additionalInfo;
376  }

References Layer::m_AdditionalInfoObject.

Referenced by NeonBackend::OptimizeSubgraphView().

◆ SetAllowExpandedDims()

void SetAllowExpandedDims ( bool  allowExpandedDims)
inline

Definition at line 362 of file Layer.hpp.

363  {
364  m_AllowExpandedDims = allowExpandedDims;
365  }

◆ SetBackendId()

void SetBackendId ( const BackendId id)
inlineoverridevirtual

Set the backend of the IConnectableLayer.

By using SetBackendId() we guarantee that the input backend supports that layer (IsLayerSupported returns true for a specific backend). If there is no guarantee the input backend supports that layer use BackendSelectionHint().

Implements IConnectableLayer.

Definition at line 291 of file Layer.hpp.

291 { m_BackendId = id; }

Referenced by Graph::AddCompatibilityLayers(), NetworkImpl::AddPrecompiledLayer(), armnn::AttemptBackendAssignment(), and armnn::InsertDebugLayerAfter().

◆ SetGuid()

void SetGuid ( LayerGuid  guid)
inline

Definition at line 342 of file Layer.hpp.

342 { m_Guid = guid; }

◆ SetShapeInferenceMethod()

void SetShapeInferenceMethod ( ShapeInferenceMethod  shapeInferenceMethod)
inline

Definition at line 357 of file Layer.hpp.

358  {
359  m_ShapeInferenceMethod = shapeInferenceMethod;
360  }

References Layer::m_ShapeInferenceMethod.

◆ ValidateAndCopyShape()

void ValidateAndCopyShape ( const TensorShape outputShape,
const TensorShape inferredShape,
const ShapeInferenceMethod  shapeInferenceMethod,
const std::string &  layerName,
const unsigned int  outputSlotIndex = 0 
)
protected

Definition at line 435 of file Layer.cpp.

440 {
441  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
442  {
443  if (m_AllowExpandedDims)
444  {
445  std::vector<unsigned int> outputDims = armnnUtils::SqueezeDims(outputShape);
446  std::vector<unsigned int> inferredDims = armnnUtils::SqueezeDims(inferredShape);
447 
448  if (outputDims.size() != inferredDims.size())
449  {
450  std::stringstream ss;
451  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
452  "] does not match the inferred shape. ";
453  ss << outputShape << " != " << inferredShape;
454  throw LayerValidationException(ss.str());
455  }
456  for (unsigned int i = 0; i < outputDims.size(); ++i)
457  {
458  if (outputDims[i] != inferredDims[i])
459  {
460  std::stringstream ss;
461  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
462  "] does not match the inferred shape at dimension index [";
463  ss << i << "] " << outputShape << " != " << inferredShape;
464  throw LayerValidationException(ss.str());
465  }
466  }
467  return;
468  }
469  else
470  {
471  ConditionalThrowIfNotEqual<LayerValidationException>(
472  layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
473  outputShape,
474  inferredShape);
475  return;
476  }
477  }
478 
479  if (outputShape.GetDimensionality() == Dimensionality::Specified)
480  {
481  for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
482  {
483  if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
484  {
485  std::stringstream ss;
486  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
487  "] does not match the inferred shape at dimension index [";
488  ss << i << "] " << outputShape << " != " << inferredShape;
489  throw LayerValidationException(ss.str());
490  }
491  }
492  }
493 
494  TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
495 
496  armnn::TensorInfo inferredTensorInfo(inferredShape,
497  info.GetDataType(),
498  info.GetQuantizationScale(),
499  info.GetQuantizationOffset());
500 
501  GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
502 }

References TensorShape::GetDimensionality(), TensorShape::GetDimensionSpecificity(), TensorShape::GetNumDimensions(), Layer::GetOutputSlot(), OutputSlot::GetTensorInfo(), armnn::info, OutputSlot::SetTensorInfo(), armnn::Specified, armnnUtils::SqueezeDims(), and armnn::ValidateOnly.

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), BroadcastToLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), TileLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ReverseV2Layer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

◆ ValidateTensorShapesFromInputs()

◆ VerifyLayerConnections()

void VerifyLayerConnections ( unsigned int  expectedConnections,
const CheckLocation location 
) const

Definition at line 391 of file Layer.cpp.

392 {
393  ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
394 
395  for (unsigned int i=0; i<expectedConnections; ++i)
396  {
397  if (GetInputSlot(i).GetConnection() == nullptr)
398  {
399  throw LayerValidationException(
400  fmt::format("Input connection #{0} must be connected "
401  "for {1} layer {2} {3}",
402  i,
404  GetNameStr(),
405  location.AsString()));
406  }
407  }
408 }

References ARMNN_ASSERT, CheckLocation::AsString(), Layer::GetInputSlot(), armnn::GetLayerTypeAsCString(), Layer::GetNameStr(), Layer::GetNumInputSlots(), and Layer::GetType().

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), UnmapLayer::ValidateTensorShapesFromInputs(), MapLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), BroadcastToLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), TileLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ReverseV2Layer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

◆ VerifyShapeInferenceType()

void VerifyShapeInferenceType ( const TensorShape outputShape,
ShapeInferenceMethod  shapeInferenceMethod 
)
protected

Definition at line 504 of file Layer.cpp.

505 {
506  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
507  {
508  ConditionalThrow<LayerValidationException>(
509  outputShape.GetDimensionality() != Dimensionality::NotSpecified,
510  "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
511 
512  ConditionalThrow<LayerValidationException>(
513  outputShape.AreAllDimensionsSpecified(),
514  "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
515  }
516 }

References TensorShape::AreAllDimensionsSpecified(), TensorShape::GetDimensionality(), armnn::NotSpecified, and armnn::ValidateOnly.

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), BroadcastToLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), TileLayer::ValidateTensorShapesFromInputs(), ReverseV2Layer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

Friends And Related Function Documentation

◆ Graph

friend class Graph
friend

Definition at line 382 of file Layer.hpp.

Member Data Documentation

◆ m_AdditionalInfoObject

◆ m_OutputHandlers

std::vector<OutputHandler> m_OutputHandlers
protected

Definition at line 440 of file Layer.hpp.

Referenced by Layer::GetOutputHandler(), and Layer::Layer().

◆ m_ShapeInferenceMethod

ShapeInferenceMethod m_ShapeInferenceMethod
protected

Definition at line 441 of file Layer.hpp.

Referenced by Layer::CloneBase(), Layer::GetShapeInferenceMethod(), Layer::SetShapeInferenceMethod(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ChannelShuffleLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), ShapeLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), CastLayer::ValidateTensorShapesFromInputs(), Pooling3dLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), BroadcastToLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), Convolution3dLayer::ValidateTensorShapesFromInputs(), GatherNdLayer::ValidateTensorShapesFromInputs(), TileLayer::ValidateTensorShapesFromInputs(), ElementwiseBinaryLayer::ValidateTensorShapesFromInputs(), BatchMatMulLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ReverseV2Layer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ReduceLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), LogicalBinaryLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), UnidirectionalSequenceLstmLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().


The documentation for this class was generated from the following files:
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::GetLayerTypeAsCString
const char * GetLayerTypeAsCString(LayerType type)
Definition: InternalTypes.cpp:13
armnn::OutputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:92
armnnUtils::SqueezeDims
std::vector< unsigned int > SqueezeDims(const armnn::TensorShape &tensorShape)
Definition: TensorUtils.cpp:195
armnn::Layer::OperateOnConstantTensors
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:319
armnn::Layer::GetBackendHint
Optional< BackendId > GetBackendHint() const
Definition: Layer.hpp:355
armnn::Layer::m_AdditionalInfoObject
AdditionalInfoObjectPtr m_AdditionalInfoObject
Definition: Layer.hpp:427
armnn::OutputSlot::SetTensorInfo
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:87
armnn::LayerPriority
unsigned int LayerPriority
Definition: Layer.hpp:227
armnn::TensorInfo
Definition: Tensor.hpp:152
CHECK_LOCATION
#define CHECK_LOCATION()
Definition: Exceptions.hpp:203
armnn::IConnectableLayer::ConstantTensors
std::vector< std::reference_wrapper< std::shared_ptr< ConstTensorHandle > >> ConstantTensors
Definition: INetwork.hpp:136
armnn::Layer::GetOutputSlot
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:339
armnn::Layer::m_OutputHandlers
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:440
armnn::Layer::GetConstantTensorsByRef
virtual ConstantTensors GetConstantTensorsByRef() override final
Definition: Layer.cpp:554
armnn::Layer::GetInputSlots
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:258
armnn::Layer::GetInputSlot
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:337
armnn::Layer::Layer
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:247
armnn::Layer::GetName
const char * GetName() const override
Returns the name of the layer.
Definition: Layer.hpp:332
armnn::ITensorHandleFactory::LegacyFactoryId
static const FactoryId LegacyFactoryId
Definition: ITensorHandleFactory.hpp:50
armnn::IConnectableLayer::ImmutableConstantTensors
std::vector< std::reference_wrapper< const std::shared_ptr< ConstTensorHandle > >> ImmutableConstantTensors
Definition: INetwork.hpp:141
armnn::InputSlot::GetTensorInfo
const TensorInfo & GetTensorInfo() const override
Gets the TensorInfo for this InputSlot.
Definition: Layer.cpp:592
armnn::Layer::GetGuid
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:343
armnn::Layer::GetOutputHandler
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:245
armnn::Layer::CollectQueueDescriptorInputs
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:386
armnn::Layer::GetNumOutputSlots
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:335
armnn::StringifyLayerParameters::Serialize
static void Serialize(ParameterStringifyFunction &, const LayerParameter &)
Definition: SerializeLayerParameters.hpp:25
armnn::Layer::GetOutputSlots
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:259
armnn::Dimensionality::NotSpecified
@ NotSpecified
armnn::BoostLogSeverityMapping::info
@ info
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::Layer::GetNameStr
const std::string & GetNameStr() const
Definition: Layer.hpp:240
armnn::Layer::GetNumInputSlots
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:334
armnn::ShapeInferenceMethod::ValidateOnly
@ ValidateOnly
Validate all output shapes.
armnn::Layer::GetType
LayerType GetType() const override
Returns the armnn::LayerType of this layer.
Definition: Layer.hpp:286
armnn::Dimensionality::Specified
@ Specified
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::Layer::GetBackendId
const BackendId & GetBackendId() const
Definition: Layer.hpp:290
armnn::Layer::CollectQueueDescriptorOutputs
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:393
armnn::ITensorHandleFactory::FactoryId
std::string FactoryId
Definition: ITensorHandleFactory.hpp:49
armnn::LayerType::Input
@ Input
armnn::Layer::m_ShapeInferenceMethod
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:441
armnn::LayerType
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below.
Definition: Types.hpp:491
armnn::LayerType::Output
@ Output
armnn::DataLayout::NCHW
@ NCHW