blob: d2c006655cf4c0d13b63307a565faecfbcd308dc (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
//
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <Layer.hpp>
namespace armnn
{
/// This layer converts data type BFloat16 to Float32.
class ConvertBf16ToFp32Layer : public Layer
{
public:
/// Makes a workload for the ConvertBf16ToFp32 type.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
ConvertBf16ToFp32Layer* Clone(Graph& graph) const override;
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
void ValidateTensorShapesFromInputs() override;
ARMNN_NO_DEPRECATE_WARN_BEGIN
void Accept(ILayerVisitor& visitor) const override;
ARMNN_NO_DEPRECATE_WARN_END
protected:
/// Constructor to create a ConvertBf16ToFp32Layer.
/// @param [in] name Optional name for the layer.
ConvertBf16ToFp32Layer(const char* name);
/// Default destructor
~ConvertBf16ToFp32Layer() = default;
};
} // namespace
|