diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2020-03-16 16:36:10 +0000 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2020-03-19 15:41:12 +0000 |
commit | ea54a01f6bd30f013cbe88ae1751985bc86b6af5 (patch) | |
tree | 7edb7d659ea4210c1256beb5edf57601b317c82d /src/armnn/layers/ConvertFp32ToBf16Layer.cpp | |
parent | 25334cf3d53fe7fff98776b44a199ca341f62f1a (diff) | |
download | armnn-ea54a01f6bd30f013cbe88ae1751985bc86b6af5.tar.gz |
IVGCVSW-4516 Add ConvertFp32ToBf16Layer and Ref workload support
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I9099a4f840fb747336f77d20a0868b64e801a310
Diffstat (limited to 'src/armnn/layers/ConvertFp32ToBf16Layer.cpp')
-rw-r--r-- | src/armnn/layers/ConvertFp32ToBf16Layer.cpp | 55 |
1 files changed, 55 insertions, 0 deletions
diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp new file mode 100644 index 0000000000..936acf61ab --- /dev/null +++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp @@ -0,0 +1,55 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ConvertFp32ToBf16Layer.hpp" +#include "LayerCloneBase.hpp" + +#include <armnn/TypesUtils.hpp> + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +namespace armnn +{ + +ConvertFp32ToBf16Layer::ConvertFp32ToBf16Layer(const char* name) + : Layer(1, 1, LayerType::ConvertFp32ToBf16, name) +{ +} + +std::unique_ptr<IWorkload> ConvertFp32ToBf16Layer::CreateWorkload(const IWorkloadFactory& factory) const +{ + ConvertFp32ToBf16QueueDescriptor descriptor; + return factory.CreateConvertFp32ToBf16(descriptor, PrepInfoAndDesc(descriptor)); +} + +ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const +{ + return CloneBase<ConvertFp32ToBf16Layer>(graph, GetName()); +} + +void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "ConvertFp32ToBf16Layer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const +{ + // these conversion layers are only inserted by the + // optimizer and so will never be in an input graph. + IgnoreUnused(visitor); + throw armnn::Exception("ConvertFp32ToBf16Layer should never appear in an input graph"); +} + +} // namespace armnn |