aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
diff options
context:
space:
mode:
authortelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
committertelsoa01 <telmo.soares@arm.com>2018-08-31 09:22:23 +0100
commitc577f2c6a3b4ddb6ba87a882723c53a248afbeba (patch)
treebd7d4c148df27f8be6649d313efb24f536b7cf34 /src/armnn/layers/ConvertFp32ToFp16Layer.cpp
parent4c7098bfeab1ffe1cdc77f6c15548d3e73274746 (diff)
downloadarmnn-c577f2c6a3b4ddb6ba87a882723c53a248afbeba.tar.gz
Release 18.08
Diffstat (limited to 'src/armnn/layers/ConvertFp32ToFp16Layer.cpp')
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp47
1 files changed, 47 insertions, 0 deletions
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
new file mode 100644
index 0000000000..70d6b668f8
--- /dev/null
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -0,0 +1,47 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "ConvertFp32ToFp16Layer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backends/WorkloadData.hpp>
+#include <backends/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+ConvertFp32ToFp16Layer::ConvertFp32ToFp16Layer(const char* name)
+ : Layer(1, 1, LayerType::ConvertFp32ToFp16, name)
+{
+}
+
+std::unique_ptr<IWorkload> ConvertFp32ToFp16Layer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ ConvertFp32ToFp16QueueDescriptor descriptor;
+ return factory.CreateConvertFp32ToFp16(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const
+{
+ return CloneBase<ConvertFp32ToFp16Layer>(graph, GetName());
+}
+
+void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ BOOST_ASSERT(inferredShapes.size() == 1);
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "ConvertFp32ToFp16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+} // namespace armnn