aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/WorkloadFactory.cpp
diff options
context:
space:
mode:
authorRyan OShea <ryan.oshea3@arm.com>2022-11-07 16:20:48 +0000
committerryan.oshea3 <ryan.oshea3@arm.com>2022-11-16 15:22:50 +0000
commit31441595009182c985dacbedc70c41ee6664d070 (patch)
tree248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/backends/backendsCommon/WorkloadFactory.cpp
parentbd18eab07a8f30492de1e462b1815189014cb8d5 (diff)
downloadarmnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea <ryan.oshea3@arm.com> Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/backends/backendsCommon/WorkloadFactory.cpp')
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp38
1 files changed, 0 insertions, 38 deletions
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 665ab3f86c..1283f67660 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -227,13 +227,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason);
break;
}
- case LayerType::ConvertBf16ToFp32:
- {
- const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
- const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason);
- break;
- }
case LayerType::ConvertFp16ToFp32:
{
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -241,13 +234,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason);
break;
}
- case LayerType::ConvertFp32ToBf16:
- {
- const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
- const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
- result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason);
- break;
- }
case LayerType::ConvertFp32ToFp16:
{
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -1630,24 +1616,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type,
auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor);
return CreateConstant(*constantQueueDescriptor, info);
}
- case LayerType::ConvertBf16ToFp32 :
- {
- auto convertBf16ToFp32QueueDescriptor
- = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor);
- return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info);
- }
case LayerType::ConvertFp16ToFp32:
{
auto convertFp16ToFp32QueueDescriptor
= PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor);
return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info);
}
- case LayerType::ConvertFp32ToBf16:
- {
- auto convertFp32ToBf16QueueDescriptor
- = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor);
- return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info);
- }
case LayerType::ConvertFp32ToFp16:
{
auto convertFp32ToFp16QueueDescriptor
@@ -1992,24 +1966,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueD
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/,
- const WorkloadInfo& /*info*/) const
-{
- return std::unique_ptr<IWorkload>();
-}
-
std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
const WorkloadInfo& /*info*/) const
{
return std::unique_ptr<IWorkload>();
}
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/,
- const WorkloadInfo& /*info*/) const
-{
- return std::unique_ptr<IWorkload>();
-}
-
std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
const WorkloadInfo& /*info*/) const
{