diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2020-03-13 10:26:05 +0000 |
---|---|---|
committer | Jim Flynn <jim.flynn@arm.com> | 2020-03-17 20:56:46 +0000 |
commit | 7ddbbae7ad3e0000d8e6a76458cac68254dc8048 (patch) | |
tree | 43f6240df090b084528034358982e8f09706ef95 /src/armnn/layers/ConvertBf16ToFp32Layer.cpp | |
parent | f4a953f75b751452ae9303abc8565d310c55bfff (diff) | |
download | armnn-7ddbbae7ad3e0000d8e6a76458cac68254dc8048.tar.gz |
IVGCVSW-4515 Add ConvertBf16ToFp32Layer and Ref workload support
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Ida6d7e1d2c9abe0618f8b711bab9d62c011090d6
Diffstat (limited to 'src/armnn/layers/ConvertBf16ToFp32Layer.cpp')
-rw-r--r-- | src/armnn/layers/ConvertBf16ToFp32Layer.cpp | 55 |
1 files changed, 55 insertions, 0 deletions
diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp new file mode 100644 index 0000000000..147aa8f46a --- /dev/null +++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp @@ -0,0 +1,55 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ConvertBf16ToFp32Layer.hpp" +#include "LayerCloneBase.hpp" + +#include <armnn/TypesUtils.hpp> + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +namespace armnn +{ + +ConvertBf16ToFp32Layer::ConvertBf16ToFp32Layer(const char* name) + : Layer(1, 1, LayerType::ConvertBf16ToFp32, name) +{ +} + +std::unique_ptr<IWorkload> ConvertBf16ToFp32Layer::CreateWorkload(const IWorkloadFactory& factory) const +{ + ConvertBf16ToFp32QueueDescriptor descriptor; + return factory.CreateConvertBf16ToFp32(descriptor, PrepInfoAndDesc(descriptor)); +} + +ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const +{ + return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName()); +} + +void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "ConvertBf16ToFp32Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const +{ + // these conversion layers are only inserted by the + // optimizer and so will never be in an input graph. + IgnoreUnused(visitor); + throw armnn::Exception("ConvertBf16ToFp32Layer should never appear in an input graph"); +} + +} // namespace armnn |