From 31441595009182c985dacbedc70c41ee6664d070 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Mon, 7 Nov 2022 16:20:48 +0000 Subject: IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers - Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c --- src/armnn/optimizations/ConvertConstants.hpp | 54 ---------------------------- 1 file changed, 54 deletions(-) (limited to 'src/armnn/optimizations/ConvertConstants.hpp') diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp index 54c14e5c89..7b2f1fd291 100644 --- a/src/armnn/optimizations/ConvertConstants.hpp +++ b/src/armnn/optimizations/ConvertConstants.hpp @@ -11,7 +11,6 @@ #include #include -#include #include namespace armnn @@ -19,27 +18,6 @@ namespace armnn namespace optimizations { -struct BFloat16ToFloat32 -{ - static void Func(std::shared_ptr& handle) - { - const TensorInfo& info = handle->GetTensorInfo(); - - if (info.GetDataType() == DataType::BFloat16) - { - std::vector newValues(info.GetNumElements()); - - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(handle->GetConstTensor(), - info.GetNumElements(), - newValues.data()); - - TensorInfo newInfo(info.GetShape(), DataType::Float32, 0.0f, 0, true); - ConstTensor newInput(newInfo, newValues); - handle.reset(new ScopedTensorHandle(newInput)); - } - } -}; - struct Float16ToFloat32 { static void Func(std::shared_ptr& handle) @@ -61,27 +39,6 @@ struct Float16ToFloat32 } }; -struct Float32ToBFloat16 -{ - static void Func(std::shared_ptr& handle) - { - const TensorInfo& info = handle->GetTensorInfo(); - - if (info.GetDataType() == DataType::Float32) - { - std::vector newValues(info.GetNumElements()); - - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(handle->GetConstTensor(), - info.GetNumElements(), - newValues.data()); - - TensorInfo newInfo(info.GetShape(), DataType::BFloat16, 0.0f, 0, true); - ConstTensor newInput(newInfo, newValues); - handle.reset(new ScopedTensorHandle(newInput)); - } - } -}; - struct Float32ToFloat16 { static void Func(std::shared_ptr& handle) @@ -138,17 +95,6 @@ struct IsFloat16Layer } }; -struct IsBFloat16Layer -{ - static bool Test(const Layer& layer) - { - return layer.GetDataType() == DataType::BFloat16; - } -}; - -using ConvertConstantsBFloatToFloat = ConvertConstants; -using ConvertConstantsFloatToBFloat = ConvertConstants; - using ConvertConstantsHalfToFloat = ConvertConstants; using ConvertConstantsFloatToHalf = ConvertConstants; -- cgit v1.2.1