ArmNN
 20.08
Layer Class Referenceabstract

#include <Layer.hpp>

Inheritance diagram for Layer:
IConnectableLayer AbsLayer BindableLayer ConstantLayer ConvertBf16ToFp32Layer ConvertFp16ToFp32Layer ConvertFp32ToBf16Layer ConvertFp32ToFp16Layer DebugLayer DequantizeLayer ElementwiseBaseLayer FloorLayer LayerWithParameters< Parameters > MemCopyLayer MemImportLayer MergeLayer PreluLayer QuantizedLstmLayer QuantizeLayer RankLayer RsqrtLayer SwitchLayer LayerWithParameters< ActivationDescriptor > LayerWithParameters< ArgMinMaxDescriptor > LayerWithParameters< BatchNormalizationDescriptor > LayerWithParameters< BatchToSpaceNdDescriptor > LayerWithParameters< ComparisonDescriptor > LayerWithParameters< Convolution2dDescriptor > LayerWithParameters< DepthToSpaceDescriptor > LayerWithParameters< DepthwiseConvolution2dDescriptor > LayerWithParameters< DetectionPostProcessDescriptor > LayerWithParameters< ElementwiseUnaryDescriptor > LayerWithParameters< FakeQuantizationDescriptor > LayerWithParameters< FillDescriptor > LayerWithParameters< FullyConnectedDescriptor > LayerWithParameters< GatherDescriptor > LayerWithParameters< InstanceNormalizationDescriptor > LayerWithParameters< L2NormalizationDescriptor > LayerWithParameters< LogSoftmaxDescriptor > LayerWithParameters< LstmDescriptor > LayerWithParameters< MeanDescriptor > LayerWithParameters< NormalizationDescriptor > LayerWithParameters< OriginsDescriptor > LayerWithParameters< PadDescriptor > LayerWithParameters< PermuteDescriptor > LayerWithParameters< Pooling2dDescriptor > LayerWithParameters< PreCompiledDescriptor > LayerWithParameters< QLstmDescriptor > LayerWithParameters< ReshapeDescriptor > LayerWithParameters< ResizeDescriptor > LayerWithParameters< SliceDescriptor > LayerWithParameters< SoftmaxDescriptor > LayerWithParameters< SpaceToBatchNdDescriptor > LayerWithParameters< SpaceToDepthDescriptor > LayerWithParameters< StackDescriptor > LayerWithParameters< StandInDescriptor > LayerWithParameters< StridedSliceDescriptor > LayerWithParameters< TransposeConvolution2dDescriptor > LayerWithParameters< TransposeDescriptor > LayerWithParameters< ViewsDescriptor >

Public Member Functions

 Layer (unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
 
 Layer (unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char *name)
 
const std::string & GetNameStr () const
 
const OutputHandlerGetOutputHandler (unsigned int i=0) const
 
OutputHandlerGetOutputHandler (unsigned int i=0)
 
ShapeInferenceMethod GetShapeInferenceMethod () const
 
const std::vector< InputSlot > & GetInputSlots () const
 
const std::vector< OutputSlot > & GetOutputSlots () const
 
std::vector< InputSlot >::iterator BeginInputSlots ()
 
std::vector< InputSlot >::iterator EndInputSlots ()
 
std::vector< OutputSlot >::iterator BeginOutputSlots ()
 
std::vector< OutputSlot >::iterator EndOutputSlots ()
 
bool IsOutputUnconnected ()
 
void ResetPriority () const
 
LayerPriority GetPriority () const
 
LayerType GetType () const
 
DataType GetDataType () const
 
const BackendIdGetBackendId () const
 
void SetBackendId (const BackendId &id)
 
virtual std::unique_ptr< IWorkloadCreateWorkload (const IWorkloadFactory &factory) const =0
 
virtual void CreateTensorHandles (const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
 
virtual LayerClone (Graph &graph) const =0
 Creates a dynamically-allocated copy of this layer. More...
 
void VerifyLayerConnections (unsigned int expectedConnections, const CheckLocation &location) const
 
virtual void ValidateTensorShapesFromInputs ()=0
 
std::vector< TensorShapeInferOutputShapes (const std::vector< TensorShape > &inputShapes) const override
 Infer the shape of the output(s) based on the provided input shape(s) More...
 
virtual void SerializeLayerParameters (ParameterStringifyFunction &fn) const
 Helper to serialize the layer parameters to string. More...
 
virtual void ReleaseConstantData ()
 
template<typename Op >
void OperateOnConstantTensors (Op op)
 
const char * GetName () const override
 Returns the name of the layer. More...
 
unsigned int GetNumInputSlots () const override
 Returns the number of connectable input slots. More...
 
unsigned int GetNumOutputSlots () const override
 Returns the number of connectable output slots. More...
 
const InputSlotGetInputSlot (unsigned int index) const override
 Get a const input slot handle by slot index. More...
 
InputSlotGetInputSlot (unsigned int index) override
 Get the input slot handle by slot index. More...
 
const OutputSlotGetOutputSlot (unsigned int index=0) const override
 Get the const output slot handle by slot index. More...
 
OutputSlotGetOutputSlot (unsigned int index=0) override
 Get the output slot handle by slot index. More...
 
void SetGuid (LayerGuid guid)
 
LayerGuid GetGuid () const final
 Returns the unique id of the layer. More...
 
void AddRelatedLayerName (const std::string layerName)
 
const std::list< std::string > & GetRelatedLayerNames ()
 
virtual void Reparent (Graph &dest, std::list< Layer *>::const_iterator iterator)=0
 
void BackendSelectionHint (Optional< BackendId > backend) final
 Provide a hint for the optimizer as to which backend to prefer for this layer. More...
 
Optional< BackendIdGetBackendHint () const
 
void SetShapeInferenceMethod (ShapeInferenceMethod shapeInferenceMethod)
 
- Public Member Functions inherited from IConnectableLayer
virtual void Accept (ILayerVisitor &visitor) const =0
 Apply a visitor to this layer. More...
 

Protected Types

using ConstantTensors = std::vector< std::reference_wrapper< std::unique_ptr< ScopedCpuTensorHandle > >>
 

Protected Member Functions

virtual ~Layer ()=default
 
template<typename QueueDescriptor >
void CollectQueueDescriptorInputs (QueueDescriptor &descriptor, WorkloadInfo &info) const
 
template<typename QueueDescriptor >
void CollectQueueDescriptorOutputs (QueueDescriptor &descriptor, WorkloadInfo &info) const
 
void ValidateAndCopyShape (const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
 
void VerifyShapeInferenceType (const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
 
template<typename QueueDescriptor >
WorkloadInfo PrepInfoAndDesc (QueueDescriptor &descriptor) const
 Helper function to reduce duplication in *LayerCreateWorkload. More...
 
template<typename LayerType , typename ... Params>
LayerTypeCloneBase (Graph &graph, Params &&... params) const
 
virtual ConstantTensors GetConstantTensorsByRef ()
 
- Protected Member Functions inherited from IConnectableLayer
 ~IConnectableLayer ()
 Objects are not deletable via the handle. More...
 

Protected Attributes

std::vector< OutputHandlerm_OutputHandlers
 
ShapeInferenceMethod m_ShapeInferenceMethod
 

Friends

class Graph
 

Detailed Description

Definition at line 209 of file Layer.hpp.

Member Typedef Documentation

◆ ConstantTensors

using ConstantTensors = std::vector<std::reference_wrapper<std::unique_ptr<ScopedCpuTensorHandle> >>
protected

Definition at line 378 of file Layer.hpp.

Constructor & Destructor Documentation

◆ Layer() [1/2]

Layer ( unsigned int  numInputSlots,
unsigned int  numOutputSlots,
LayerType  type,
const char *  name 
)
Parameters
name- Optional name for the layer (may be nullptr).

Definition at line 219 of file Layer.cpp.

References ARMNN_ASSERT, Layer::GetInputSlots(), Layer::m_OutputHandlers, and WorkloadDataCollector::Push().

223 : Layer(numInputSlots, numOutputSlots, type, DataLayout::NCHW, name)
224 {
225 }
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:219

◆ Layer() [2/2]

Layer ( unsigned int  numInputSlots,
unsigned int  numOutputSlots,
LayerType  type,
DataLayout  layout,
const char *  name 
)

Definition at line 192 of file Layer.cpp.

References armnn::IgnoreUnused(), and Layer::m_OutputHandlers.

197 : m_OutputHandlers(numOutputSlots)
199 , m_LayerName(name ? name : "")
200 , m_Type(type)
201 , m_BackendId()
202 , m_BackendHint(EmptyOptional())
204 {
205  IgnoreUnused(layout);
206  m_InputSlots.reserve(numInputSlots);
207  for (unsigned int i = 0; i < numInputSlots; ++i)
208  {
209  m_InputSlots.emplace_back(*this, i);
210  }
211 
212  m_OutputSlots.reserve(numOutputSlots);
213  for (unsigned int i = 0; i < numOutputSlots; ++i)
214  {
215  m_OutputSlots.emplace_back(*this, m_OutputHandlers[i]);
216  }
217 }
void IgnoreUnused(Ts &&...)
Validate all output shapes.
std::vector< OutputHandler > m_OutputHandlers
Definition: Layer.hpp:386
static ProfilingDynamicGuid GetNextGuid()
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:387

◆ ~Layer()

virtual ~Layer ( )
protectedvirtualdefault

Member Function Documentation

◆ AddRelatedLayerName()

void AddRelatedLayerName ( const std::string  layerName)
inline

Definition at line 320 of file Layer.hpp.

320 { m_RelatedLayerNames.emplace_back(layerName); }

◆ BackendSelectionHint()

void BackendSelectionHint ( Optional< BackendId backend)
inlinefinalvirtual

Provide a hint for the optimizer as to which backend to prefer for this layer.

Implements IConnectableLayer.

Definition at line 326 of file Layer.hpp.

Referenced by BOOST_AUTO_TEST_CASE().

327  {
328  m_BackendHint = backend;
329  }

◆ BeginInputSlots()

◆ BeginOutputSlots()

std::vector<OutputSlot>::iterator BeginOutputSlots ( )
inline

◆ Clone()

◆ CloneBase()

LayerType * CloneBase ( Graph graph,
Params &&...  params 
) const
protected

Definition at line 14 of file LayerCloneBase.hpp.

References Graph::AddLayer(), Layer::GetBackendId(), Layer::GetGuid(), and Layer::m_ShapeInferenceMethod.

15 {
16  LayerType* const layer = graph.AddLayer<LayerType>(std::forward<Params>(params)...);
17 
18  layer->SetBackendId(GetBackendId());
19  layer->SetGuid(GetGuid());
20  layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
21 
22  return layer;
23 }
const BackendId & GetBackendId() const
Definition: Layer.hpp:265
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:387
LayerGuid GetGuid() const final
Returns the unique id of the layer.
Definition: Layer.hpp:318

◆ CollectQueueDescriptorInputs()

void CollectQueueDescriptorInputs ( QueueDescriptor descriptor,
WorkloadInfo info 
) const
inlineprotected

Definition at line 343 of file Layer.hpp.

References QueueDescriptor::m_Inputs, and WorkloadInfo::m_InputTensorInfos.

344  {
345  WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
346  CollectWorkloadInputs(dataCollector);
347  }

◆ CollectQueueDescriptorOutputs()

void CollectQueueDescriptorOutputs ( QueueDescriptor descriptor,
WorkloadInfo info 
) const
inlineprotected

Definition at line 350 of file Layer.hpp.

References QueueDescriptor::m_Outputs, and WorkloadInfo::m_OutputTensorInfos.

351  {
352  WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
353  CollectWorkloadOutputs(dataCollector);
354  }

◆ CreateTensorHandles()

void CreateTensorHandles ( const TensorHandleFactoryRegistry registry,
const IWorkloadFactory factory,
const bool  IsMemoryManaged = true 
)
virtual

Reimplemented in SplitterLayer, ConcatLayer, and OutputLayer.

Definition at line 246 of file Layer.cpp.

References ARMNN_ASSERT, OutputHandler::CreateTensorHandles(), TensorHandleFactoryRegistry::GetFactory(), Layer::GetNumOutputSlots(), Layer::GetOutputHandler(), Layer::GetOutputSlot(), OutputSlot::GetTensorHandleFactoryId(), and ITensorHandleFactory::LegacyFactoryId.

249 {
250  for (unsigned int idx=0; idx < GetNumOutputSlots(); idx++)
251  {
252 
253  OutputSlot& slot = GetOutputSlot(idx);
254  ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
255 
256  OutputHandler& handler = GetOutputHandler(idx);
257  if (factoryId == ITensorHandleFactory::LegacyFactoryId)
258  {
259  handler.CreateTensorHandles(workloadFactory, IsMemoryManaged);
260  }
261  else
262  {
263  ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
264  ARMNN_ASSERT(handleFactory);
265  handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
266  }
267  }
268 }
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:310
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
static const FactoryId LegacyFactoryId

◆ CreateWorkload()

◆ EndInputSlots()

◆ EndOutputSlots()

std::vector<OutputSlot>::iterator EndOutputSlots ( )
inline

Definition at line 242 of file Layer.hpp.

Referenced by armnn::InsertDebugLayerAfter(), ConvertFp32NetworkToFp16Impl::Run(), and SplitterLayer::ValidateTensorShapesFromInputs().

242 { return m_OutputSlots.end(); }

◆ GetBackendHint()

Optional<BackendId> GetBackendHint ( ) const
inline

Definition at line 330 of file Layer.hpp.

330 { return m_BackendHint; }

◆ GetBackendId()

◆ GetConstantTensorsByRef()

virtual ConstantTensors GetConstantTensorsByRef ( )
inlineprotectedvirtual

Reimplemented in QLstmLayer, LstmLayer, QuantizedLstmLayer, Convolution2dLayer, DepthwiseConvolution2dLayer, FullyConnectedLayer, TransposeConvolution2dLayer, BatchNormalizationLayer, ConstantLayer, and DetectionPostProcessLayer.

Definition at line 379 of file Layer.hpp.

379 {return ConstantTensors(); };
std::vector< std::reference_wrapper< std::unique_ptr< ScopedCpuTensorHandle > >> ConstantTensors
Definition: Layer.hpp:378

◆ GetDataType()

DataType GetDataType ( ) const

Definition at line 279 of file Layer.cpp.

References InputSlot::GetConnection(), TensorInfo::GetDataType(), Layer::GetInputSlot(), Layer::GetNumInputSlots(), Layer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), and OutputSlot::GetTensorInfo().

Referenced by BOOST_AUTO_TEST_CASE(), ConvertFp32NetworkToFp16Impl::Run(), ConvertFp32NetworkToBf16Impl::Run(), IsFloat32Layer::Test(), IsFloat16Layer::Test(), and IsBFloat16Layer::Test().

280 {
281  if (GetNumInputSlots() > 0) // Ignore the input layer.
282  {
284  }
286 }
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:309
const IOutputSlot * GetConnection() const override
Definition: Layer.hpp:199
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
DataType GetDataType() const
Definition: Tensor.hpp:194
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
virtual const TensorInfo & GetTensorInfo() const =0
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63

◆ GetGuid()

LayerGuid GetGuid ( ) const
inlinefinalvirtual

Returns the unique id of the layer.

Implements IConnectableLayer.

Definition at line 318 of file Layer.hpp.

Referenced by Layer::CloneBase(), DebugLayer::CreateWorkload(), OutputSlot::GetOwningLayerGuid(), QuantizerVisitor::QuantizerVisitor(), and Graph::SerializeToDot().

318 { return m_Guid; }

◆ GetInputSlot() [1/2]

const InputSlot& GetInputSlot ( unsigned int  index) const
inlineoverridevirtual

Get a const input slot handle by slot index.

Implements IConnectableLayer.

Definition at line 312 of file Layer.hpp.

Referenced by BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), CreateConvolution2dGraph(), CreateDepthwiseConvolution2dGraph(), CreateGatherGraph(), CreatePooling2dGraph(), CreateResizeBilinearGraph(), ConcatLayer::CreateWorkload(), DebugLayer::CreateWorkload(), Layer::GetDataType(), Graph::Graph(), InputSlot::Insert(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), IWorkloadFactory::IsLayerSupported(), TransposeAsReshapeImpl::Run(), PermuteAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), FoldPadIntoConvolution2dImpl::Run(), OptimizeInverseConversionsImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), MoveTransposeUpImpl::Run(), OptimizeInversePermutesImpl< PermuteType >::Run(), MovePermuteUpImpl::Run(), DepthwiseConvolution2dLayer::SerializeLayerParameters(), Convolution2dLayer::SerializeLayerParameters(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), OutputLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), QLstmLayer::ValidateTensorShapesFromInputs(), and Layer::VerifyLayerConnections().

312 { return m_InputSlots.at(index); }

◆ GetInputSlot() [2/2]

InputSlot& GetInputSlot ( unsigned int  index)
inlineoverridevirtual

Get the input slot handle by slot index.

Implements IConnectableLayer.

Definition at line 313 of file Layer.hpp.

313 { return m_InputSlots.at(index); }

◆ GetInputSlots()

const std::vector<InputSlot>& GetInputSlots ( ) const
inline

◆ GetName()

const char* GetName ( ) const
inlineoverridevirtual

Returns the name of the layer.

Implements IConnectableLayer.

Definition at line 307 of file Layer.hpp.

Referenced by RankLayer::Accept(), AdditionLayer::Accept(), QuantizeLayer::Accept(), MinimumLayer::Accept(), MultiplicationLayer::Accept(), DivisionLayer::Accept(), MaximumLayer::Accept(), SubtractionLayer::Accept(), ActivationLayer::Accept(), FillLayer::Accept(), AbsLayer::Accept(), NormalizationLayer::Accept(), DequantizeLayer::Accept(), FloorLayer::Accept(), InputLayer::Accept(), InstanceNormalizationLayer::Accept(), RsqrtLayer::Accept(), SoftmaxLayer::Accept(), L2NormalizationLayer::Accept(), SwitchLayer::Accept(), GatherLayer::Accept(), LogSoftmaxLayer::Accept(), MeanLayer::Accept(), MergeLayer::Accept(), ArgMinMaxLayer::Accept(), DetectionPostProcessLayer::Accept(), ElementwiseUnaryLayer::Accept(), Pooling2dLayer::Accept(), ResizeLayer::Accept(), SliceLayer::Accept(), BatchToSpaceNdLayer::Accept(), StackLayer::Accept(), StridedSliceLayer::Accept(), PadLayer::Accept(), PreluLayer::Accept(), SpaceToBatchNdLayer::Accept(), SpaceToDepthLayer::Accept(), ComparisonLayer::Accept(), DepthToSpaceLayer::Accept(), StandInLayer::Accept(), ConstantLayer::Accept(), BatchNormalizationLayer::Accept(), OutputLayer::Accept(), TransposeConvolution2dLayer::Accept(), FullyConnectedLayer::Accept(), DepthwiseConvolution2dLayer::Accept(), Convolution2dLayer::Accept(), ConcatLayer::Accept(), SplitterLayer::Accept(), ReshapeLayer::Accept(), TransposeLayer::Accept(), PermuteLayer::Accept(), QuantizedLstmLayer::Accept(), LstmLayer::Accept(), QLstmLayer::Accept(), Graph::AddCompatibilityLayers(), BOOST_AUTO_TEST_CASE(), armnnTfParser::CalcPadding(), RankLayer::Clone(), QuantizeLayer::Clone(), FillLayer::Clone(), ActivationLayer::Clone(), ConvertFp32ToFp16Layer::Clone(), MergeLayer::Clone(), NormalizationLayer::Clone(), DebugLayer::Clone(), AbsLayer::Clone(), Pooling2dLayer::Clone(), ConvertBf16ToFp32Layer::Clone(), FloorLayer::Clone(), StackLayer::Clone(), MemImportLayer::Clone(), MemCopyLayer::Clone(), ConvertFp32ToBf16Layer::Clone(), ResizeLayer::Clone(), FakeQuantizationLayer::Clone(), L2NormalizationLayer::Clone(), SliceLayer::Clone(), InputLayer::Clone(), InstanceNormalizationLayer::Clone(), BatchToSpaceNdLayer::Clone(), SoftmaxLayer::Clone(), AdditionLayer::Clone(), StridedSliceLayer::Clone(), ConvertFp16ToFp32Layer::Clone(), SwitchLayer::Clone(), RsqrtLayer::Clone(), DequantizeLayer::Clone(), MinimumLayer::Clone(), MultiplicationLayer::Clone(), ArgMinMaxLayer::Clone(), ComparisonLayer::Clone(), LogSoftmaxLayer::Clone(), StandInLayer::Clone(), DivisionLayer::Clone(), DepthToSpaceLayer::Clone(), PreluLayer::Clone(), PadLayer::Clone(), ElementwiseUnaryLayer::Clone(), TransposeLayer::Clone(), MeanLayer::Clone(), GatherLayer::Clone(), MaximumLayer::Clone(), SubtractionLayer::Clone(), SpaceToDepthLayer::Clone(), SpaceToBatchNdLayer::Clone(), ConstantLayer::Clone(), ReshapeLayer::Clone(), PermuteLayer::Clone(), DetectionPostProcessLayer::Clone(), PreCompiledLayer::Clone(), TransposeConvolution2dLayer::Clone(), FullyConnectedLayer::Clone(), DepthwiseConvolution2dLayer::Clone(), Convolution2dLayer::Clone(), ConcatLayer::Clone(), SplitterLayer::Clone(), BatchNormalizationLayer::Clone(), OutputLayer::Clone(), QuantizedLstmLayer::Clone(), LstmLayer::Clone(), QLstmLayer::Clone(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), FoldPadIntoConvolution2dImpl::Run(), and ErasedLayerNamesObservable::Update().

307 { return m_LayerName.c_str(); }

◆ GetNameStr()

◆ GetNumInputSlots()

◆ GetNumOutputSlots()

◆ GetOutputHandler() [1/2]

◆ GetOutputHandler() [2/2]

OutputHandler& GetOutputHandler ( unsigned int  i = 0)
inline

Definition at line 226 of file Layer.hpp.

227  {
228  return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
229  }
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:219
const OutputHandler & GetOutputHandler(unsigned int i=0) const
Definition: Layer.hpp:221

◆ GetOutputSlot() [1/2]

const OutputSlot& GetOutputSlot ( unsigned int  index = 0) const
inlineoverridevirtual

Get the const output slot handle by slot index.

Implements IConnectableLayer.

Definition at line 314 of file Layer.hpp.

Referenced by Graph::AddCompatibilityLayers(), BOOST_AUTO_TEST_CASE(), BOOST_FIXTURE_TEST_CASE(), armnnTfParser::CalcPadding(), OutputSlot::CalculateIndexOnOwner(), armnn::CheckScaleSetOnQuantizedType(), CreateConvolution2dGraph(), CreateDepthwiseConvolution2dGraph(), CreateGatherGraph(), CreatePooling2dGraph(), CreateResizeBilinearGraph(), ConcatLayer::CreateTensorHandles(), SplitterLayer::CreateTensorHandles(), Layer::CreateTensorHandles(), ConcatLayer::CreateWorkload(), SplitterLayer::CreateWorkload(), Layer::GetDataType(), InputSlot::Insert(), armnn::InsertConvertBf16ToFp32LayersBefore(), armnn::InsertConvertFp16ToFp32LayersBefore(), armnn::InsertConvertFp32ToBf16LayersAfter(), armnn::InsertConvertFp32ToBf16LayersBefore(), armnn::InsertConvertFp32ToFp16LayersAfter(), armnn::InsertDebugLayerAfter(), IsConnected(), IWorkloadFactory::IsLayerSupported(), PermuteAsReshapeImpl::Run(), TransposeAsReshapeImpl::Run(), OptimizeConsecutiveReshapesImpl::Run(), PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteType >::Run(), FoldPadIntoConvolution2dImpl::Run(), OptimizeInverseConversionsImpl::Run(), MovePermuteUpImpl::Run(), MoveTransposeUpImpl::Run(), OptimizeInversePermutesImpl< PermuteType >::Run(), armnn::SelectTensorHandleStrategy(), Layer::ValidateAndCopyShape(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), ConstantLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

314 { return m_OutputSlots.at(index); }

◆ GetOutputSlot() [2/2]

OutputSlot& GetOutputSlot ( unsigned int  index = 0)
inlineoverridevirtual

Get the output slot handle by slot index.

Implements IConnectableLayer.

Definition at line 315 of file Layer.hpp.

315 { return m_OutputSlots.at(index); }

◆ GetOutputSlots()

const std::vector<OutputSlot>& GetOutputSlots ( ) const
inline

Definition at line 234 of file Layer.hpp.

Referenced by Graph::AddCompatibilityLayers(), armnn::ForEachLayerOutput(), and IWorkloadFactory::IsLayerSupported().

234 { return m_OutputSlots; }

◆ GetPriority()

LayerPriority GetPriority ( ) const

Definition at line 294 of file Layer.cpp.

References Layer::GetInputSlots(), OutputSlot::GetOwningLayer(), Layer::GetPriority(), Layer::GetType(), armnn::Input, and armnn::Output.

Referenced by Layer::GetPriority(), and SquashEqualSiblingsImpl< Comparable >::Run().

295 {
296  constexpr LayerPriority inputPrio = std::numeric_limits<LayerPriority>::lowest();
297  constexpr LayerPriority outputPrio = std::numeric_limits<LayerPriority>::max();
298 
299  if (GetType() == LayerType::Input)
300  {
301  m_Priority = inputPrio;
302  }
303  else if (GetType() == LayerType::Output)
304  {
305  m_Priority = outputPrio;
306  }
307  else if (m_Priority == 0)
308  {
309  if (m_Visiting)
310  {
311  throw GraphValidationException("Graph has circular dependencies: cannot walk");
312  }
313 
314  auto maxPrio = [](const LayerPriority prio, const InputSlot& slot) -> LayerPriority
315  {
316  const OutputSlot *outputSlot = slot.GetConnectedOutputSlot();
317  if (outputSlot)
318  {
319  const Layer& input = outputSlot->GetOwningLayer();
320  return std::max(prio, input.GetPriority());
321  }
322  else
323  {
324  // unconnected input slot
325  return prio;
326  }
327  };
328 
329  m_Visiting = true;
330  LayerPriority parentPrio = std::accumulate(GetInputSlots().cbegin(), GetInputSlots().cend(), 0U, maxPrio);
331  m_Visiting = false;
332 
333  if (parentPrio >= outputPrio)
334  {
335  throw GraphValidationException("Graph has too many edges");
336  }
337 
338  m_Priority = parentPrio + 1U;
339  }
340 
341  return m_Priority;
342 }
unsigned int LayerPriority
Definition: Layer.hpp:207
const std::vector< InputSlot > & GetInputSlots() const
Definition: Layer.hpp:233
Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char *name)
Definition: Layer.cpp:219
LayerType GetType() const
Definition: Layer.hpp:261

◆ GetRelatedLayerNames()

const std::list<std::string>& GetRelatedLayerNames ( )
inline

Definition at line 322 of file Layer.hpp.

Referenced by ErasedLayerNamesObservable::Update().

322 { return m_RelatedLayerNames; }

◆ GetShapeInferenceMethod()

ShapeInferenceMethod GetShapeInferenceMethod ( ) const
inline

Definition at line 231 of file Layer.hpp.

231 { return m_ShapeInferenceMethod; };
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:387

◆ GetType()

◆ InferOutputShapes()

std::vector< TensorShape > InferOutputShapes ( const std::vector< TensorShape > &  inputShapes) const
overridevirtual

Infer the shape of the output(s) based on the provided input shape(s)

Implements IConnectableLayer.

Reimplemented in QLstmLayer, LstmLayer, QuantizedLstmLayer, SplitterLayer, ConcatLayer, Convolution2dLayer, DepthwiseConvolution2dLayer, FullyConnectedLayer, TransposeConvolution2dLayer, ConstantLayer, PermuteLayer, ReshapeLayer, PadLayer, StandInLayer, BatchToSpaceNdLayer, Pooling2dLayer, ResizeLayer, SliceLayer, StackLayer, TransposeLayer, MergeLayer, ComparisonLayer, DepthToSpaceLayer, PreluLayer, SpaceToBatchNdLayer, SpaceToDepthLayer, ArgMinMaxLayer, ElementwiseUnaryLayer, StridedSliceLayer, and ElementwiseBaseLayer.

Definition at line 365 of file Layer.cpp.

References ARMNN_ASSERT, CHECK_LOCATION, armnn::GetLayerTypeAsCString(), Layer::GetNameStr(), Layer::GetNumInputSlots(), Layer::GetNumOutputSlots(), and Layer::GetType().

Referenced by QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), and BatchNormalizationLayer::ValidateTensorShapesFromInputs().

366 {
369 
370  // By default we return what we got, meaning the output shape(s) are the same as the input(s).
371  // This only works if the number of inputs and outputs are the same. Since we are in the Layer
372  // base class, this means the implementation needs to be overridden in the specific layers for
373  // the other cases. So the missing implementation justifies the UnimplementedException.
374 
376  {
377  throw UnimplementedException(
378  boost::str(
379  boost::format(
380  "Default implementation for InferOutputShapes can only be used for "
381  "layers with the same number of input and output slots. This doesn't "
382  "hold for %1% layer %2% (#inputs=%3% #outputs=%4%) %5%")
383  % GetLayerTypeAsCString(this->GetType())
384  % GetNameStr()
385  % GetNumInputSlots()
387  % CHECK_LOCATION().AsString()));
388  }
389  return inputShapes;
390 }
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:309
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:310
char const * GetLayerTypeAsCString(LayerType type)
const std::string & GetNameStr() const
Definition: Layer.hpp:216
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
#define CHECK_LOCATION()
Definition: Exceptions.hpp:197
LayerType GetType() const
Definition: Layer.hpp:261

◆ IsOutputUnconnected()

bool IsOutputUnconnected ( )
inline

Definition at line 245 of file Layer.hpp.

Referenced by OptimizeForConnectionImpl< BaseType, ChildType, Wrapped >::Run().

246  {
247  unsigned int numConnections = 0;
248 
249  for (auto&& output : GetOutputSlots())
250  {
251  numConnections += output.GetNumConnections();
252  }
253 
254  return (GetNumOutputSlots() > 0) && (numConnections == 0);
255  }
unsigned int GetNumOutputSlots() const override
Returns the number of connectable output slots.
Definition: Layer.hpp:310
const std::vector< OutputSlot > & GetOutputSlots() const
Definition: Layer.hpp:234

◆ OperateOnConstantTensors()

void OperateOnConstantTensors ( Op  op)
inline

Definition at line 294 of file Layer.hpp.

Referenced by Layer::ReleaseConstantData(), and ConvertConstants< Converter, Predicate >::Run().

295  {
296  for (auto constant : GetConstantTensorsByRef())
297  {
298  if (constant.get())
299  {
300  op(constant);
301  }
302  }
303  };
virtual ConstantTensors GetConstantTensorsByRef()
Definition: Layer.hpp:379

◆ PrepInfoAndDesc()

WorkloadInfo PrepInfoAndDesc ( QueueDescriptor descriptor) const
inlineprotected

Helper function to reduce duplication in *LayerCreateWorkload.

Definition at line 366 of file Layer.hpp.

References armnn::info.

Referenced by ConvertFp32ToFp16Layer::CreateWorkload(), RankLayer::CreateWorkload(), ConvertBf16ToFp32Layer::CreateWorkload(), RsqrtLayer::CreateWorkload(), QuantizeLayer::CreateWorkload(), DebugLayer::CreateWorkload(), DequantizeLayer::CreateWorkload(), AdditionLayer::CreateWorkload(), ConvertFp16ToFp32Layer::CreateWorkload(), FloorLayer::CreateWorkload(), MemImportLayer::CreateWorkload(), MemCopyLayer::CreateWorkload(), AbsLayer::CreateWorkload(), SwitchLayer::CreateWorkload(), ConvertFp32ToBf16Layer::CreateWorkload(), DivisionLayer::CreateWorkload(), MinimumLayer::CreateWorkload(), SubtractionLayer::CreateWorkload(), MultiplicationLayer::CreateWorkload(), MaximumLayer::CreateWorkload(), PreluLayer::CreateWorkload(), ConstantLayer::CreateWorkload(), QuantizedLstmLayer::CreateWorkload(), and LayerWithParameters< ResizeDescriptor >::PrepInfoAndDesc().

367  {
368  WorkloadInfo info;
369  CollectQueueDescriptorInputs(descriptor, info);
370  CollectQueueDescriptorOutputs(descriptor, info);
371  return info;
372  }
void CollectQueueDescriptorInputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:343
void CollectQueueDescriptorOutputs(QueueDescriptor &descriptor, WorkloadInfo &info) const
Definition: Layer.hpp:350

◆ ReleaseConstantData()

void ReleaseConstantData ( )
virtual

Reimplemented in ConstantLayer.

Definition at line 270 of file Layer.cpp.

References Layer::OperateOnConstantTensors().

Referenced by BOOST_AUTO_TEST_CASE().

271 {
272  // Now free up the static data.
273  OperateOnConstantTensors([](std::unique_ptr<ScopedCpuTensorHandle>& handle)
274  {
275  handle.reset(nullptr);
276  });
277 }
void OperateOnConstantTensors(Op op)
Definition: Layer.hpp:294

◆ Reparent()

virtual void Reparent ( Graph dest,
std::list< Layer *>::const_iterator  iterator 
)
pure virtual

◆ ResetPriority()

void ResetPriority ( ) const

Definition at line 288 of file Layer.cpp.

289 {
290  m_Priority = 0;
291  m_Visiting = false;
292 }

◆ SerializeLayerParameters()

void SerializeLayerParameters ( ParameterStringifyFunction fn) const
virtual

Helper to serialize the layer parameters to string.

(currently used in DotSerializer and company).

Reimplemented in Convolution2dLayer, DepthwiseConvolution2dLayer, LayerWithParameters< Parameters >, LayerWithParameters< FakeQuantizationDescriptor >, LayerWithParameters< SoftmaxDescriptor >, LayerWithParameters< LstmDescriptor >, LayerWithParameters< LogSoftmaxDescriptor >, LayerWithParameters< PreCompiledDescriptor >, LayerWithParameters< BatchToSpaceNdDescriptor >, LayerWithParameters< PermuteDescriptor >, LayerWithParameters< SpaceToBatchNdDescriptor >, LayerWithParameters< DepthToSpaceDescriptor >, LayerWithParameters< ReshapeDescriptor >, LayerWithParameters< ElementwiseUnaryDescriptor >, LayerWithParameters< GatherDescriptor >, LayerWithParameters< SpaceToDepthDescriptor >, LayerWithParameters< OriginsDescriptor >, LayerWithParameters< ViewsDescriptor >, LayerWithParameters< Pooling2dDescriptor >, LayerWithParameters< Convolution2dDescriptor >, LayerWithParameters< ActivationDescriptor >, LayerWithParameters< StandInDescriptor >, LayerWithParameters< TransposeDescriptor >, LayerWithParameters< StackDescriptor >, LayerWithParameters< MeanDescriptor >, LayerWithParameters< ComparisonDescriptor >, LayerWithParameters< InstanceNormalizationDescriptor >, LayerWithParameters< TransposeConvolution2dDescriptor >, LayerWithParameters< BatchNormalizationDescriptor >, LayerWithParameters< ArgMinMaxDescriptor >, LayerWithParameters< DetectionPostProcessDescriptor >, LayerWithParameters< PadDescriptor >, LayerWithParameters< L2NormalizationDescriptor >, LayerWithParameters< FillDescriptor >, LayerWithParameters< SliceDescriptor >, LayerWithParameters< DepthwiseConvolution2dDescriptor >, LayerWithParameters< QLstmDescriptor >, LayerWithParameters< NormalizationDescriptor >, LayerWithParameters< FullyConnectedDescriptor >, LayerWithParameters< StridedSliceDescriptor >, and LayerWithParameters< ResizeDescriptor >.

Definition at line 446 of file Layer.cpp.

References armnn::GetLayerTypeAsCString().

Referenced by LayerWithParameters< ResizeDescriptor >::SerializeLayerParameters().

447 {
448  std::string layerType = GetLayerTypeAsCString(m_Type);
449  std::string backendId = std::string(m_BackendId);
450  if(!(m_LayerName.compare("") == 0) && !m_LayerName.empty())
451  {
452  fn("LayerName",m_LayerName);
453  }
454  if(!(layerType.compare("") == 0) && !layerType.empty())
455  {
456  fn("LayerType",layerType);
457  }
458  if(!(backendId.compare("") == 0) && !backendId.empty())
459  {
460  fn("BackendID",backendId);
461  }
462 }
char const * GetLayerTypeAsCString(LayerType type)

◆ SetBackendId()

void SetBackendId ( const BackendId id)
inline

◆ SetGuid()

void SetGuid ( LayerGuid  guid)
inline

Definition at line 317 of file Layer.hpp.

317 { m_Guid = guid; }

◆ SetShapeInferenceMethod()

void SetShapeInferenceMethod ( ShapeInferenceMethod  shapeInferenceMethod)
inline

Definition at line 332 of file Layer.hpp.

333  {
334  m_ShapeInferenceMethod = shapeInferenceMethod;
335  }
ShapeInferenceMethod m_ShapeInferenceMethod
Definition: Layer.hpp:387

◆ ValidateAndCopyShape()

void ValidateAndCopyShape ( const TensorShape outputShape,
const TensorShape inferredShape,
const ShapeInferenceMethod  shapeInferenceMethod,
const std::string &  layerName,
const unsigned int  outputSlotIndex = 0 
)
protected

Definition at line 392 of file Layer.cpp.

References TensorShape::GetDimensionality(), TensorShape::GetDimensionSpecificity(), TensorShape::GetNumDimensions(), Layer::GetOutputSlot(), OutputSlot::GetTensorInfo(), armnn::info, OutputSlot::SetTensorInfo(), armnn::Specified, and armnn::ValidateOnly.

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

397 {
398  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
399  {
400  ConditionalThrowIfNotEqual<LayerValidationException>(
401  layerName + ": TensorShape set on OutputSlot[0] does not match the inferred shape.",
402  outputShape,
403  inferredShape);
404  return;
405  }
406 
407  if (outputShape.GetDimensionality() == Dimensionality::Specified)
408  {
409  for (unsigned int i = 0; i < outputShape.GetNumDimensions(); ++i)
410  {
411  if (outputShape.GetDimensionSpecificity(i) && outputShape[i] != inferredShape[i])
412  {
413  std::stringstream ss;
414  ss << layerName << ": TensorShape set on OutputSlot[" << outputSlotIndex <<
415  "] does not match the inferred shape at dimension index [";
416  ss << i << "] " << outputShape << " != " << inferredShape;
417  throw LayerValidationException(ss.str());
418  }
419  }
420  }
421 
422  TensorInfo info = GetOutputSlot(outputSlotIndex).GetTensorInfo();
423 
424  armnn::TensorInfo inferredTensorInfo(inferredShape,
425  info.GetDataType(),
426  info.GetQuantizationScale(),
427  info.GetQuantizationOffset());
428 
429  GetOutputSlot(outputSlotIndex).SetTensorInfo(inferredTensorInfo);
430 }
Validate all output shapes.
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63

◆ ValidateTensorShapesFromInputs()

◆ VerifyLayerConnections()

void VerifyLayerConnections ( unsigned int  expectedConnections,
const CheckLocation location 
) const

Definition at line 344 of file Layer.cpp.

References ARMNN_ASSERT, CheckLocation::AsString(), Layer::GetInputSlot(), armnn::GetLayerTypeAsCString(), Layer::GetNameStr(), Layer::GetNumInputSlots(), and Layer::GetType().

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

345 {
346  ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
347 
348  for (unsigned int i=0; i<expectedConnections; ++i)
349  {
350  if (GetInputSlot(i).GetConnection() == nullptr)
351  {
352  throw LayerValidationException(
353  boost::str(
354  boost::format(
355  "Input connection #%1% must be connected "
356  "for %2% layer %3% %4%")
357  % i
358  % GetLayerTypeAsCString(this->GetType())
359  % GetNameStr()
360  % location.AsString()));
361  }
362  }
363 }
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
Definition: Layer.hpp:309
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:312
char const * GetLayerTypeAsCString(LayerType type)
const std::string & GetNameStr() const
Definition: Layer.hpp:216
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
LayerType GetType() const
Definition: Layer.hpp:261

◆ VerifyShapeInferenceType()

void VerifyShapeInferenceType ( const TensorShape outputShape,
ShapeInferenceMethod  shapeInferenceMethod 
)
protected

Definition at line 432 of file Layer.cpp.

References TensorShape::AreAllDimensionsSpecified(), TensorShape::GetDimensionality(), armnn::NotSpecified, and armnn::ValidateOnly.

Referenced by ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().

433 {
434  if (shapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
435  {
436  ConditionalThrow<LayerValidationException>(
437  outputShape.GetDimensionality() != Dimensionality::NotSpecified,
438  "Dimensionality can not be NotSpecified while using ShapeInferenceMethod::ValidateOnly");
439 
440  ConditionalThrow<LayerValidationException>(
441  outputShape.AreAllDimensionsSpecified(),
442  "Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
443  }
444 }
Validate all output shapes.

Friends And Related Function Documentation

◆ Graph

friend class Graph
friend

Definition at line 339 of file Layer.hpp.

Member Data Documentation

◆ m_OutputHandlers

std::vector<OutputHandler> m_OutputHandlers
protected

◆ m_ShapeInferenceMethod

ShapeInferenceMethod m_ShapeInferenceMethod
protected

Definition at line 387 of file Layer.hpp.

Referenced by Layer::CloneBase(), ElementwiseBaseLayer::ValidateTensorShapesFromInputs(), RankLayer::ValidateTensorShapesFromInputs(), QuantizeLayer::ValidateTensorShapesFromInputs(), ActivationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(), FillLayer::ValidateTensorShapesFromInputs(), DequantizeLayer::ValidateTensorShapesFromInputs(), MemImportLayer::ValidateTensorShapesFromInputs(), SoftmaxLayer::ValidateTensorShapesFromInputs(), MergeLayer::ValidateTensorShapesFromInputs(), RsqrtLayer::ValidateTensorShapesFromInputs(), ResizeLayer::ValidateTensorShapesFromInputs(), NormalizationLayer::ValidateTensorShapesFromInputs(), ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(), ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(), Pooling2dLayer::ValidateTensorShapesFromInputs(), AbsLayer::ValidateTensorShapesFromInputs(), FakeQuantizationLayer::ValidateTensorShapesFromInputs(), ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(), BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(), FloorLayer::ValidateTensorShapesFromInputs(), SliceLayer::ValidateTensorShapesFromInputs(), DebugLayer::ValidateTensorShapesFromInputs(), StackLayer::ValidateTensorShapesFromInputs(), InstanceNormalizationLayer::ValidateTensorShapesFromInputs(), SwitchLayer::ValidateTensorShapesFromInputs(), L2NormalizationLayer::ValidateTensorShapesFromInputs(), MemCopyLayer::ValidateTensorShapesFromInputs(), PadLayer::ValidateTensorShapesFromInputs(), TransposeLayer::ValidateTensorShapesFromInputs(), GatherLayer::ValidateTensorShapesFromInputs(), MeanLayer::ValidateTensorShapesFromInputs(), LogSoftmaxLayer::ValidateTensorShapesFromInputs(), ReshapeLayer::ValidateTensorShapesFromInputs(), PermuteLayer::ValidateTensorShapesFromInputs(), StridedSliceLayer::ValidateTensorShapesFromInputs(), DetectionPostProcessLayer::ValidateTensorShapesFromInputs(), ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(), ArgMinMaxLayer::ValidateTensorShapesFromInputs(), ComparisonLayer::ValidateTensorShapesFromInputs(), PreluLayer::ValidateTensorShapesFromInputs(), SpaceToDepthLayer::ValidateTensorShapesFromInputs(), TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(), SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(), FullyConnectedLayer::ValidateTensorShapesFromInputs(), DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(), DepthToSpaceLayer::ValidateTensorShapesFromInputs(), Convolution2dLayer::ValidateTensorShapesFromInputs(), ConcatLayer::ValidateTensorShapesFromInputs(), SplitterLayer::ValidateTensorShapesFromInputs(), BatchNormalizationLayer::ValidateTensorShapesFromInputs(), QuantizedLstmLayer::ValidateTensorShapesFromInputs(), LstmLayer::ValidateTensorShapesFromInputs(), and QLstmLayer::ValidateTensorShapesFromInputs().


The documentation for this class was generated from the following files: