aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/WorkloadData.cpp
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2022-11-07 16:20:48 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2022-11-16 15:22:50 +0000
commit31441595009182c985dacbedc70c41ee6664d070 (patch)
tree248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/backends/backendsCommon/WorkloadData.cpp
parentbd18eab07a8f30492de1e462b1815189014cb8d5 (diff)
downloadarmnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/backends/backendsCommon/WorkloadData.cpp')
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp46
1 files changed, 0 insertions, 46 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 753fe06edb..62dfc6a38b 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -2222,52 +2222,6 @@ void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
}
}
-void ConvertBf16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
-{
- const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"};
-
- ValidateNumInputs(workloadInfo, descriptorName, 1);
- ValidateNumOutputs(workloadInfo, descriptorName, 1);
-
- const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
- const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
-
- if (inputTensorInfo.GetDataType() != DataType::BFloat16)
- {
- throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16.");
- }
-
- if (outputTensorInfo.GetDataType() != DataType::Float32)
- {
- throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32.");
- }
-
- ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
-}
-
-void ConvertFp32ToBf16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
-{
- const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"};
-
- ValidateNumInputs(workloadInfo, descriptorName, 1);
- ValidateNumOutputs(workloadInfo, descriptorName, 1);
-
- const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0];
- const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
-
- if (inputTensorInfo.GetDataType() != DataType::Float32)
- {
- throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32.");
- }
-
- if (outputTensorInfo.GetDataType() != DataType::BFloat16)
- {
- throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16.");
- }
-
- ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
-}
-
void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"};