From 31441595009182c985dacbedc70c41ee6664d070 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Mon, 7 Nov 2022 16:20:48 +0000 Subject: IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers - Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c --- .../workloads/RefConvertBf16ToFp32Workload.cpp | 39 ---------------------- 1 file changed, 39 deletions(-) delete mode 100644 src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp (limited to 'src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp') diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp deleted file mode 100644 index 2fe2eafb9b..0000000000 --- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefConvertBf16ToFp32Workload.hpp" -#include "RefWorkloadUtils.hpp" - -#include - -#include - -namespace armnn -{ - -void RefConvertBf16ToFp32Workload::Execute() const -{ - Execute(m_Data.m_Inputs, m_Data.m_Outputs); -} - -void RefConvertBf16ToFp32Workload::ExecuteAsync(ExecutionData& executionData) -{ - WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); - Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); -} - -void RefConvertBf16ToFp32Workload::Execute(std::vector inputs, - std::vector outputs) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertBf16ToFp32Workload_Execute"); - - const BFloat16* const input = reinterpret_cast(inputs[0]->Map()); - float* const output = reinterpret_cast(outputs[0]->Map()); - - unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements(); - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output); -} - -} //namespace armnn -- cgit v1.2.1