From 31441595009182c985dacbedc70c41ee6664d070 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Mon, 7 Nov 2022 16:20:48 +0000 Subject: IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers - Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c --- src/armnn/layers/ConvertFp32ToBf16Layer.cpp | 56 ----------------------------- 1 file changed, 56 deletions(-) delete mode 100644 src/armnn/layers/ConvertFp32ToBf16Layer.cpp (limited to 'src/armnn/layers/ConvertFp32ToBf16Layer.cpp') diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp deleted file mode 100644 index 7c98eea239..0000000000 --- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertFp32ToBf16Layer.hpp" -#include "LayerCloneBase.hpp" - -#include - -#include -#include - -namespace armnn -{ - -ConvertFp32ToBf16Layer::ConvertFp32ToBf16Layer(const char* name) - : Layer(1, 1, LayerType::ConvertFp32ToBf16, name) -{ -} - -std::unique_ptr ConvertFp32ToBf16Layer::CreateWorkload(const IWorkloadFactory& factory) const -{ - ConvertFp32ToBf16QueueDescriptor descriptor; - SetAdditionalInfo(descriptor); - - return factory.CreateWorkload(LayerType::ConvertFp32ToBf16, descriptor, PrepInfoAndDesc(descriptor)); -} - -ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const -{ - return CloneBase(graph, GetName()); -} - -void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs() -{ - - VerifyLayerConnections(1, CHECK_LOCATION()); - - const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape(); - - VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod); - - auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); - - ARMNN_ASSERT(inferredShapes.size() == 1); - - ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName"); -} - -void ConvertFp32ToBf16Layer::ExecuteStrategy(IStrategy& strategy) const -{ - strategy.ExecuteStrategy(this, GetParameters(), {}, GetName()); -} - -} // namespace armnn -- cgit v1.2.1