33 auto layer = CloneBase<PreluLayer>(graph,
GetName());
35 return std::move(layer);
54 unsigned int outputDimensions = std::max(inputShapeDimensions, alphaShapeDimensions);
60 unsigned int outputShapeIndex = outputDimensions - 1;
63 while (inputShapeIndex >= 0 && alphaShapeIndex >= 0)
65 unsigned int inputDimension = inputShape[
armnn::numeric_cast<
unsigned int>(inputShapeIndex)];
66 unsigned int alphaDimension = alphaShape[
armnn::numeric_cast<
unsigned int>(alphaShapeIndex)];
69 ARMNN_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
70 "PreluLayer: Dimensions should either match or one should be of size 1");
72 outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
80 while (inputShapeIndex >= 0)
82 outputShape[outputShapeIndex] = inputShape[
armnn::numeric_cast<
unsigned int>(inputShapeIndex)];
89 while (alphaShapeIndex >= 0)
91 outputShape[outputShapeIndex] = alphaShape[
armnn::numeric_cast<
unsigned int>(alphaShapeIndex)];
97 return { outputShape };
122 visitor.VisitPreluLayer(
this,
GetName());
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the PReLU type.
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
const TensorShape & GetShape() const
PreluLayer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2021 ARM Limited and Contributors.
PreluLayer(const char *name)
Constructor to create a PreluLayer.
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
#define ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_ASSERT_MSG(COND, MSG)
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
By default returns inputShapes if the number of inputs are equal to number of outputs, otherwise infers the output shapes from given input shapes and layer properties.
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
#define ARMNN_ASSERT(COND)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of PreluLayer.
const TensorInfo & GetTensorInfo() const override
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...
virtual std::unique_ptr< IWorkload > CreatePrelu(const PreluQueueDescriptor &descriptor, const WorkloadInfo &info) const