From 7ddbbae7ad3e0000d8e6a76458cac68254dc8048 Mon Sep 17 00:00:00 2001 From: Narumol Prangnawarat Date: Fri, 13 Mar 2020 10:26:05 +0000 Subject: IVGCVSW-4515 Add ConvertBf16ToFp32Layer and Ref workload support Signed-off-by: Narumol Prangnawarat Change-Id: Ida6d7e1d2c9abe0618f8b711bab9d62c011090d6 --- src/armnn/InternalTypes.cpp | 1 + src/armnn/InternalTypes.hpp | 1 + src/armnn/LayersFwd.hpp | 2 ++ src/armnn/layers/ConvertBf16ToFp32Layer.cpp | 55 +++++++++++++++++++++++++++++ src/armnn/layers/ConvertBf16ToFp32Layer.hpp | 42 ++++++++++++++++++++++ 5 files changed, 101 insertions(+) create mode 100644 src/armnn/layers/ConvertBf16ToFp32Layer.cpp create mode 100644 src/armnn/layers/ConvertBf16ToFp32Layer.hpp (limited to 'src/armnn') diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index c032e44cd3..3f3eed56e7 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -22,6 +22,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Comparison: return "Comparison"; case LayerType::Concat: return "Concat"; case LayerType::Constant: return "Constant"; + case LayerType::ConvertBf16ToFp32: return "ConvertBf16ToFp32"; case LayerType::ConvertFp16ToFp32: return "ConvertFp16ToFp32"; case LayerType::ConvertFp32ToFp16: return "ConvertFp32ToFp16"; case LayerType::Convolution2d: return "Convolution2d"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 351f12c510..9330122246 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -22,6 +22,7 @@ enum class LayerType Comparison, Concat, Constant, + ConvertBf16ToFp32, ConvertFp16ToFp32, ConvertFp32ToFp16, Convolution2d, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index f3ce7e61fa..3dde908fc3 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -14,6 +14,7 @@ #include "layers/ComparisonLayer.hpp" #include "layers/ConcatLayer.hpp" #include "layers/ConstantLayer.hpp" +#include "layers/ConvertBf16ToFp32Layer.hpp" #include "layers/ConvertFp16ToFp32Layer.hpp" #include "layers/ConvertFp32ToFp16Layer.hpp" #include "layers/Convolution2dLayer.hpp" @@ -99,6 +100,7 @@ DECLARE_LAYER(BatchToSpaceNd) DECLARE_LAYER(Comparison) DECLARE_LAYER(Concat) DECLARE_LAYER(Constant) +DECLARE_LAYER(ConvertBf16ToFp32) DECLARE_LAYER(ConvertFp16ToFp32) DECLARE_LAYER(ConvertFp32ToFp16) DECLARE_LAYER(Convolution2d) diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp new file mode 100644 index 0000000000..147aa8f46a --- /dev/null +++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp @@ -0,0 +1,55 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ConvertBf16ToFp32Layer.hpp" +#include "LayerCloneBase.hpp" + +#include + +#include +#include + +namespace armnn +{ + +ConvertBf16ToFp32Layer::ConvertBf16ToFp32Layer(const char* name) + : Layer(1, 1, LayerType::ConvertBf16ToFp32, name) +{ +} + +std::unique_ptr ConvertBf16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const +{ + ConvertBf16ToFp32QueueDescriptor descriptor; + return factory.CreateConvertBf16ToFp32(descriptor, PrepInfoAndDesc(descriptor)); +} + +ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const +{ + return CloneBase(graph, GetName()); +} + +void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual( + "ConvertBf16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const +{ + // these conversion layers are only inserted by the + // optimizer and so will never be in an input graph. + IgnoreUnused(visitor); + throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph"); +} + +} // namespace armnn diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp new file mode 100644 index 0000000000..2a79a1cb65 --- /dev/null +++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnn +{ + +/// This layer converts data type BFloat16 to Float32. +class ConvertBf16ToFp32Layer : public Layer +{ +public: + /// Makes a workload for the ConvertBf16ToFp32 type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr CreateWorkload(const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + ConvertBf16ToFp32Layer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer. + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a ConvertBf16ToFp32Layer. + /// @param [in] name Optional name for the layer. + ConvertBf16ToFp32Layer(const char* name); + + /// Default destructor + ~ConvertBf16ToFp32Layer() = default; +}; + +} // namespace -- cgit v1.2.1