32 return CloneBase<ConvertFp16ToFp32Layer>(graph,
GetName());
56 throw armnn::Exception(
"ConvertFp16ToFp32Layer should never appear in an input graph");
const TensorShape & GetShape() const
std::vector< TensorShape > InferOutputShapes(const std::vector< TensorShape > &inputShapes) const override
Infer the shape of the output(s) based on the provided input shape(s)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
This layer converts data type Float 16 to Float 32.
ARMNN_NO_DEPRECATE_WARN_BEGIN void Accept(ILayerVisitor &visitor) const override
void VerifyShapeInferenceType(const TensorShape &outputShape, ShapeInferenceMethod shapeInferenceMethod)
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
void ValidateAndCopyShape(const TensorShape &outputShape, const TensorShape &inferredShape, const ShapeInferenceMethod shapeInferenceMethod, const std::string &layerName, const unsigned int outputSlotIndex=0)
ConvertFp16ToFp32Layer(const char *name)
Constructor to create a ConvertFp16ToFp32Layer.
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation &location) const
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
#define ARMNN_NO_DEPRECATE_WARN_END
WorkloadInfo PrepInfoAndDesc(QueueDescriptor &descriptor) const
Helper function to reduce duplication in *LayerCreateWorkload.
ConvertFp16ToFp32Layer * Clone(Graph &graph) const override
Creates a dynamically-allocated copy of this layer.
#define ARMNN_ASSERT(COND)
void SetAdditionalInfo(QueueDescriptor &descriptor) const
virtual std::unique_ptr< IWorkload > CreateWorkload(const IWorkloadFactory &factory) const override
Makes a workload for the ConvertFp16ToFp32 type.
void ValidateTensorShapesFromInputs() override
Check if the input tensor shape(s) will lead to a valid configuration of ConvertFp16ToFp32Layer.
Base class for all ArmNN exceptions so that users can filter to just those.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
virtual const TensorInfo & GetTensorInfo() const =0
const char * GetName() const override
Returns the name of the layer.
const TensorInfo & GetTensorInfo() const override
virtual std::unique_ptr< IWorkload > CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
ShapeInferenceMethod m_ShapeInferenceMethod
LayerType
When adding a new layer, adapt also the LastLayer enum value in the enum class LayerType below...