aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>2020-03-13 10:26:05 +0000
committerJim Flynn <jim.flynn@arm.com>2020-03-17 20:56:46 +0000
commit7ddbbae7ad3e0000d8e6a76458cac68254dc8048 (patch)
tree43f6240df090b084528034358982e8f09706ef95 /src/armnn
parentf4a953f75b751452ae9303abc8565d310c55bfff (diff)
downloadarmnn-7ddbbae7ad3e0000d8e6a76458cac68254dc8048.tar.gz
IVGCVSW-4515 Add ConvertBf16ToFp32Layer and Ref workload support
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: Ida6d7e1d2c9abe0618f8b711bab9d62c011090d6
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.cpp1
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.cpp55
-rw-r--r--src/armnn/layers/ConvertBf16ToFp32Layer.hpp42
5 files changed, 101 insertions, 0 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index c032e44cd3..3f3eed56e7 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -22,6 +22,7 @@ char const* GetLayerTypeAsCString(LayerType type)
case LayerType::Comparison: return "Comparison";
case LayerType::Concat: return "Concat";
case LayerType::Constant: return "Constant";
+ case LayerType::ConvertBf16ToFp32: return "ConvertBf16ToFp32";
case LayerType::ConvertFp16ToFp32: return "ConvertFp16ToFp32";
case LayerType::ConvertFp32ToFp16: return "ConvertFp32ToFp16";
case LayerType::Convolution2d: return "Convolution2d";
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index 351f12c510..9330122246 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -22,6 +22,7 @@ enum class LayerType
Comparison,
Concat,
Constant,
+ ConvertBf16ToFp32,
ConvertFp16ToFp32,
ConvertFp32ToFp16,
Convolution2d,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index f3ce7e61fa..3dde908fc3 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -14,6 +14,7 @@
#include "layers/ComparisonLayer.hpp"
#include "layers/ConcatLayer.hpp"
#include "layers/ConstantLayer.hpp"
+#include "layers/ConvertBf16ToFp32Layer.hpp"
#include "layers/ConvertFp16ToFp32Layer.hpp"
#include "layers/ConvertFp32ToFp16Layer.hpp"
#include "layers/Convolution2dLayer.hpp"
@@ -99,6 +100,7 @@ DECLARE_LAYER(BatchToSpaceNd)
DECLARE_LAYER(Comparison)
DECLARE_LAYER(Concat)
DECLARE_LAYER(Constant)
+DECLARE_LAYER(ConvertBf16ToFp32)
DECLARE_LAYER(ConvertFp16ToFp32)
DECLARE_LAYER(ConvertFp32ToFp16)
DECLARE_LAYER(Convolution2d)
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
new file mode 100644
index 0000000000..147aa8f46a
--- /dev/null
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp
@@ -0,0 +1,55 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConvertBf16ToFp32Layer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+ConvertBf16ToFp32Layer::ConvertBf16ToFp32Layer(const char* name)
+ : Layer(1, 1, LayerType::ConvertBf16ToFp32, name)
+{
+}
+
+std::unique_ptr<IWorkload> ConvertBf16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ ConvertBf16ToFp32QueueDescriptor descriptor;
+ return factory.CreateConvertBf16ToFp32(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const
+{
+ return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName());
+}
+
+void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "ConvertBf16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
+{
+ // these conversion layers are only inserted by the
+ // optimizer and so will never be in an input graph.
+ IgnoreUnused(visitor);
+ throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph");
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
new file mode 100644
index 0000000000..2a79a1cb65
--- /dev/null
+++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp
@@ -0,0 +1,42 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+namespace armnn
+{
+
+/// This layer converts data type BFloat16 to Float32.
+class ConvertBf16ToFp32Layer : public Layer
+{
+public:
+ /// Makes a workload for the ConvertBf16ToFp32 type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ ConvertBf16ToFp32Layer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer.
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a ConvertBf16ToFp32Layer.
+ /// @param [in] name Optional name for the layer.
+ ConvertBf16ToFp32Layer(const char* name);
+
+ /// Default destructor
+ ~ConvertBf16ToFp32Layer() = default;
+};
+
+} // namespace