From 31441595009182c985dacbedc70c41ee6664d070 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Mon, 7 Nov 2022 16:20:48 +0000 Subject: IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers - Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c --- src/backends/backendsCommon/WorkloadData.cpp | 46 ---------------------------- 1 file changed, 46 deletions(-) (limited to 'src/backends/backendsCommon/WorkloadData.cpp') diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 753fe06edb..62dfc6a38b 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2222,52 +2222,6 @@ void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } -void ConvertBf16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const -{ - const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"}; - - ValidateNumInputs(workloadInfo, descriptorName, 1); - ValidateNumOutputs(workloadInfo, descriptorName, 1); - - const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; - const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; - - if (inputTensorInfo.GetDataType() != DataType::BFloat16) - { - throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16."); - } - - if (outputTensorInfo.GetDataType() != DataType::Float32) - { - throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32."); - } - - ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); -} - -void ConvertFp32ToBf16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const -{ - const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"}; - - ValidateNumInputs(workloadInfo, descriptorName, 1); - ValidateNumOutputs(workloadInfo, descriptorName, 1); - - const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; - const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; - - if (inputTensorInfo.GetDataType() != DataType::Float32) - { - throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32."); - } - - if (outputTensorInfo.GetDataType() != DataType::BFloat16) - { - throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16."); - } - - ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); -} - void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"}; -- cgit v1.2.1