45 std::vector<unsigned int> dimensionSizes(inputNumDimensions + 1, 0);
46 for (
unsigned int i = 0; i < axis; ++i)
48 dimensionSizes[i] = inputShape[i];
53 for (
unsigned int i = axis + 1; i < inputNumDimensions + 1; ++i)
55 dimensionSizes[i] = inputShape[i-1];
60 return std::vector<TensorShape>({ targetShape });
66 ConditionalThrowIfNotEqual<LayerValidationException>(
67 "StackLayer: Num Input Slots must match Num Inputs.",
78 std::vector<TensorShape> inputShapes;
86 "] does not match defined input shape");
88 inputShapes.push_back(inputShape);
uint32_t m_Axis
0-based axis along which to stack the input tensors.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of StackLayer.
StackDescriptor m_Param
The parameters for the layer (not including tensor-valued weights etc.).
const StackDescriptor & GetParameters() const
unsigned int GetNumInputSlots() const override
Returns the number of connectable input slots.
const TensorShape & GetShape() const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
void Stack(const StackQueueDescriptor &data, std::vector< std::unique_ptr< Decoder< float >>> &inputs, Encoder< float > &output, const TensorInfo &inputInfo, const TensorInfo &outputInfo)
TensorShape m_InputShape
Required shape of all input tensors.
virtual std::unique_ptr< IWorkload > CreateStack(const StackQueueDescriptor &descriptor, const WorkloadInfo &info) const
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
void Accept(ILayerVisitor &visitor) const override
Apply a visitor to this layer.
virtual void VisitStackLayer(const IConnectableLayer *layer, const StackDescriptor &stackDescriptor, const char *name=nullptr)=0
Function a stack layer should call back to when its Accept(ILayerVisitor&) function is invoked...
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
A StackDescriptor for the StackLayer.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
StackLayer(const StackDescriptor ¶m, const char *name)
Constructor to create a StackLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
StackLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
This layer represents a stack operation.
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the Stack type.
#define ARMNN_ASSERT(COND)
uint32_t m_NumInputs
Number of input tensors.
void SetAdditionalInfo(QueueDescriptor &descriptor) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
const TensorInfo & GetTensorInfo() const override
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...