From 31441595009182c985dacbedc70c41ee6664d070 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Mon, 7 Nov 2022 16:20:48 +0000 Subject: IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers - Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c --- src/backends/backendsCommon/LayerSupportBase.cpp | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'src/backends/backendsCommon/LayerSupportBase.cpp') diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 001037908d..26137f51b0 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -164,13 +164,6 @@ bool LayerSupportBase::IsConstantSupported(const TensorInfo&, // output return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsConvertBf16ToFp32Supported(const TensorInfo&, // input - const TensorInfo&, // output - Optional reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input const TensorInfo&, // output Optional reasonIfUnsupported) const @@ -178,14 +171,6 @@ bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsConvertFp32ToBf16Supported(const TensorInfo&, // input - const TensorInfo&, // output - Optional reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - - bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo&, // input const TensorInfo&, // output Optional reasonIfUnsupported) const -- cgit v1.2.1