diff options
Diffstat (limited to 'src/armnn/layers/ConvertFp16ToFp32Layer.cpp')
-rw-r--r-- | src/armnn/layers/ConvertFp16ToFp32Layer.cpp | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp new file mode 100644 index 0000000000..80d981c267 --- /dev/null +++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp @@ -0,0 +1,48 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// See LICENSE file in the project root for full license information. +// + +#include "ConvertFp16ToFp32Layer.hpp" +#include "LayerCloneBase.hpp" + +#include <armnn/TypesUtils.hpp> + +#include <backends/WorkloadData.hpp> +#include <backends/WorkloadFactory.hpp> + +namespace armnn +{ + +ConvertFp16ToFp32Layer::ConvertFp16ToFp32Layer(const char* name) + : Layer(1, 1, LayerType::ConvertFp16ToFp32, name) +{ +} + +std::unique_ptr<IWorkload> ConvertFp16ToFp32Layer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + ConvertFp16ToFp32QueueDescriptor descriptor; + return factory.CreateConvertFp16ToFp32(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const +{ + return CloneBase<ConvertFp16ToFp32Layer>(graph, GetName()); +} + +void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "ConvertFp16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +} // namespace armnn |