From 31441595009182c985dacbedc70c41ee6664d070 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Mon, 7 Nov 2022 16:20:48 +0000 Subject: IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers - Remove Bf16ToFp32 Conversion Layer - Remove Fp32ToBf16 Conversion Layer - Remove B16 Conversion tests * Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true * Provide comments to enable fast math in order to use bf16 * Update docs to inform users to enable fast math for bf16 Execute Network Changes * Require bf16_turbo_mode to also have fast_math_enabled set to true - Remove setting m_ReduceFp32ToBf16 optimizer option Signed-off-by: Ryan OShea Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c --- src/backends/reference/RefLayerSupport.cpp | 119 +-------------------- src/backends/reference/RefLayerSupport.hpp | 7 -- src/backends/reference/RefWorkloadFactory.cpp | 26 ----- src/backends/reference/RefWorkloadFactory.hpp | 10 -- src/backends/reference/backend.mk | 2 - src/backends/reference/test/RefEndToEndTests.cpp | 10 -- .../reference/test/RefLayerSupportTests.cpp | 74 +------------ src/backends/reference/test/RefLayerTests.cpp | 94 ---------------- src/backends/reference/workloads/BaseIterator.hpp | 60 ----------- src/backends/reference/workloads/CMakeLists.txt | 4 - src/backends/reference/workloads/Decoders.hpp | 4 - src/backends/reference/workloads/Encoders.hpp | 4 - .../workloads/RefConvertBf16ToFp32Workload.cpp | 39 ------- .../workloads/RefConvertBf16ToFp32Workload.hpp | 24 ----- .../workloads/RefConvertFp32ToBf16Workload.cpp | 39 ------- .../workloads/RefConvertFp32ToBf16Workload.hpp | 24 ----- src/backends/reference/workloads/RefWorkloads.hpp | 2 - 17 files changed, 8 insertions(+), 534 deletions(-) delete mode 100644 src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp delete mode 100644 src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp delete mode 100644 src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp delete mode 100644 src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp (limited to 'src/backends/reference') diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 40909019ba..669c91d628 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -120,12 +120,8 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type, } case LayerType::Constant: return IsConstantSupported(infos[0], reasonIfUnsupported); - case LayerType::ConvertBf16ToFp32: - return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::ConvertFp16ToFp32: return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::ConvertFp32ToBf16: - return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::ConvertFp32ToFp16: return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::Convolution2d: @@ -518,7 +514,6 @@ bool RefLayerSupport::IsActivationSupported(const TensorInfo& input, // Define supported types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -585,7 +580,6 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -623,7 +617,6 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const std::array supportedInputTypes = { - DataType::BFloat16, DataType::Float16, DataType::Float32, DataType::QAsymmS8, @@ -658,7 +651,6 @@ bool RefLayerSupport::IsBatchMatMulSupported(const TensorInfo& inputX, std::array supportedTypes = { - DataType::BFloat16, DataType::Float16, DataType::Float32, DataType::QAsymmS8, @@ -707,7 +699,6 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -757,7 +748,6 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, // Define supported types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -797,7 +787,6 @@ bool RefLayerSupport::IsCastSupported(const TensorInfo& input, { std::array supportedInputTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QSymmS8, @@ -832,7 +821,6 @@ bool RefLayerSupport::IsChannelShuffleSupported(const TensorInfo& input, // Define supported output and inputs types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -864,7 +852,6 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0, std::array supportedInputTypes = { DataType::Boolean, - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -896,7 +883,6 @@ bool RefLayerSupport::IsConcatSupported(const std::vector inp bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -925,7 +911,6 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, { std::array supportedTypes = { - DataType::BFloat16, DataType::Float16, DataType::Float32, DataType::QAsymmS8, @@ -939,21 +924,6 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, "Reference constant: output is not a supported type."); } -bool RefLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported) const -{ - bool supported = true; - - supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported, - "Reference for ConvertBf16ToFp32 layer: input type not supported"); - - supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported, - "Reference for ConvertBf16ToFp32 layer: output type not supported"); - - return supported; -} - bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const @@ -974,21 +944,6 @@ bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, &FalseFuncU8<>)); } -bool RefLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported) const -{ - bool supported = true; - - supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported, - "Reference for ConvertFp32ToBf16 layer: input type not supported"); - - supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported, - "Reference for ConvertFp32ToBf16 layer: output type not supported"); - - return supported; -} - bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const @@ -1021,7 +976,6 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, // Define supported types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1036,20 +990,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, "Reference Convolution2d: output is not a supported type."); - // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization. - if (input.GetDataType() == DataType::BFloat16) - { - if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32) - { - reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n"; - supported = false; - } - } - else - { - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference Convolution2d: input and output types mismatched."); - } + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference Convolution2d: input and output types mismatched."); + const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) @@ -1077,7 +1020,6 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, { std::array biasesSupportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32 @@ -1103,7 +1045,6 @@ bool RefLayerSupport::IsConvolution3dSupported(const TensorInfo& input, // Define supported types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1147,7 +1088,6 @@ bool RefLayerSupport::IsConvolution3dSupported(const TensorInfo& input, { std::array biasesSupportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32 @@ -1201,7 +1141,6 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1234,7 +1173,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, // Define supported types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1279,7 +1217,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, { std::array biasesSupportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32 @@ -1313,7 +1250,6 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input, "Reference for Dequantize layer: per-axis quantized input not supported."); std::array supportedOutputTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16 }; @@ -1344,7 +1280,6 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod std::array supportedInputTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1379,7 +1314,6 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1418,7 +1352,6 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1513,7 +1446,6 @@ bool RefLayerSupport::IsFloorSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16 }; @@ -1539,7 +1471,6 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, // Define supported types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1556,20 +1487,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported, "Reference Fully Connected: weights type not supported."); - // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization. - if (input.GetDataType() == DataType::BFloat16) - { - if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32) - { - reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n"; - supported = false; - } - } - else - { - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference Fully Connected: input and output types mismatched."); - } + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference Fully Connected: input and output types mismatched."); supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported, "Reference Fully Connected: weights is not a supported type."); @@ -1583,7 +1502,6 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, std::array supportedBiasTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32, @@ -1615,7 +1533,6 @@ bool RefLayerSupport::IsGatherNdSupported(const armnn::TensorInfo& input0, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1648,7 +1565,6 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1692,7 +1608,6 @@ bool RefLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input, // Define supported types std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16 }; @@ -1724,7 +1639,6 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, // Define supported types std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1784,7 +1698,6 @@ bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16 }; @@ -1819,7 +1732,6 @@ bool RefLayerSupport::IsLstmSupported(const TensorInfo& input, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::QSymmS16 }; @@ -1922,7 +1834,6 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1963,7 +1874,6 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2052,7 +1962,6 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2090,7 +1999,6 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2130,7 +2038,6 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input, // Define supported types std::array supportedTypes = { - DataType::BFloat16, DataType::Float16, DataType::Float32, DataType::QAsymmS8, @@ -2170,7 +2077,6 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input, // Define supported output and inputs types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2232,7 +2138,6 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input, // Define supported output and inputs types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2263,7 +2168,6 @@ bool RefLayerSupport::IsPooling3dSupported(const TensorInfo& input, // Define supported output and inputs types. std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2316,7 +2220,6 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, // Define supported input types. std::array supportedInputTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2368,7 +2271,6 @@ bool RefLayerSupport::IsReduceSupported(const TensorInfo& input, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2470,7 +2372,6 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::QAsymmS8, DataType::QAsymmU8, @@ -2498,7 +2399,6 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QSymmS8, @@ -2528,7 +2428,6 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2559,7 +2458,6 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2588,7 +2486,6 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2620,7 +2517,6 @@ bool RefLayerSupport::IsStackSupported(const std::vector& inp bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2654,7 +2550,6 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::QAsymmS8, DataType::QAsymmU8, @@ -2681,7 +2576,6 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, bool supported = true; std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2720,7 +2614,6 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, std::array supportedTypes { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2758,7 +2651,6 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, std::array supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2804,7 +2696,6 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, { std::array biasesSupportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32 diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index b64244db24..f0e9e35978 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -77,17 +77,10 @@ public: bool IsConstantSupported(const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional reasonIfUnsupported = EmptyOptional()) const override; bool IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 093d0d5e20..69f75cae8a 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -212,24 +212,12 @@ std::unique_ptr RefWorkloadFactory::CreateWorkload(LayerType type, auto constantQueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*constantQueueDescriptor, info); } - case LayerType::ConvertBf16ToFp32 : - { - auto convertBf16ToFp32QueueDescriptor - = PolymorphicDowncast(&descriptor); - return std::make_unique(*convertBf16ToFp32QueueDescriptor, info); - } case LayerType::ConvertFp16ToFp32: { auto convertFp16ToFp32QueueDescriptor = PolymorphicDowncast(&descriptor); return std::make_unique(*convertFp16ToFp32QueueDescriptor, info); } - case LayerType::ConvertFp32ToBf16: - { - auto convertFp32ToBf16QueueDescriptor - = PolymorphicDowncast(&descriptor); - return std::make_unique(*convertFp32ToBf16QueueDescriptor, info); - } case LayerType::ConvertFp32ToFp16: { auto convertFp32ToFp16QueueDescriptor @@ -724,13 +712,6 @@ std::unique_ptr RefWorkloadFactory::CreateConstant(const ConstantQueu return std::make_unique(descriptor, info); } -std::unique_ptr RefWorkloadFactory::CreateConvertBf16ToFp32( - const ConvertBf16ToFp32QueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return std::make_unique(descriptor, info); -} - std::unique_ptr RefWorkloadFactory::CreateConvertFp16ToFp32( const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const @@ -738,13 +719,6 @@ std::unique_ptr RefWorkloadFactory::CreateConvertFp16ToFp32( return std::make_unique(descriptor, info); } -std::unique_ptr RefWorkloadFactory::CreateConvertFp32ToBf16( - const ConvertFp32ToBf16QueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return std::make_unique(descriptor, info); -} - std::unique_ptr RefWorkloadFactory::CreateConvertFp32ToFp16( const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) const diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 53d0806ffe..22dc35a8c8 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -120,21 +120,11 @@ public: std::unique_ptr CreateConstant(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " - "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") - std::unique_ptr CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") std::unique_ptr CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " - "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") - std::unique_ptr CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") std::unique_ptr CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor, diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index ed942e67cd..eb2ec2df44 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -58,9 +58,7 @@ BACKEND_SOURCES := \ workloads/RefComparisonWorkload.cpp \ workloads/RefConcatWorkload.cpp \ workloads/RefConstantWorkload.cpp \ - workloads/RefConvertBf16ToFp32Workload.cpp \ workloads/RefConvertFp16ToFp32Workload.cpp \ - workloads/RefConvertFp32ToBf16Workload.cpp \ workloads/RefConvertFp32ToFp16Workload.cpp \ workloads/RefConvolution2dWorkload.cpp \ workloads/RefConvolution3dWorkload.cpp \ diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 369e98adb1..a8c0634186 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -634,11 +634,6 @@ TEST_CASE("RefEluEndToEndTestFloat16") EluEndToEndTest(defaultBackends); } -TEST_CASE("RefEluEndToEndTestBFloat16") -{ - EluEndToEndTest(defaultBackends); -} - TEST_CASE("RefEluEndToEndTestQAsymmS8") { EluEndToEndTest(defaultBackends); @@ -1006,11 +1001,6 @@ TEST_CASE("RefHardSwishEndToEndTestFloat16") HardSwishEndToEndTest(defaultBackends); } -TEST_CASE("RefHardSwishEndToEndTestBFloat16") -{ - HardSwishEndToEndTest(defaultBackends); -} - TEST_CASE("RefHardSwishEndToEndTestQAsymmS8") { HardSwishEndToEndTest(defaultBackends); diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp index 9a27c7c0b3..e671496ce2 100644 --- a/src/backends/reference/test/RefLayerSupportTests.cpp +++ b/src/backends/reference/test/RefLayerSupportTests.cpp @@ -49,12 +49,6 @@ TEST_CASE("IsLayerSupportedReferenceAddition") CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported)); } -TEST_CASE("IsLayerSupportedBFloat16Reference") -{ - armnn::RefWorkloadFactory factory; - IsLayerSupportedTests(&factory); -} - TEST_CASE("IsLayerSupportedFloat16Reference") { armnn::RefWorkloadFactory factory; @@ -117,70 +111,6 @@ TEST_CASE("IsConvertFp16ToFp32SupportedFp16OutputReference") CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type output"); } -TEST_CASE("IsConvertBf16ToFp32SupportedReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - CHECK(result); -} - -TEST_CASE("IsConvertBf16ToFp32SupportedFp32InputReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - CHECK(!result); - CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n"); -} - -TEST_CASE("IsConvertBf16ToFp32SupportedBf16OutputReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - CHECK(!result); - CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n"); -} - -TEST_CASE("IsConvertFp32ToBf16SupportedReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - CHECK(result); -} - -TEST_CASE("IsConvertFp32ToBf16SupportedBf16InputReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - CHECK(!result); - CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n"); -} - -TEST_CASE("IsConvertFp32ToBf16SupportedFp32OutputReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests(reasonIfUnsupported); - - CHECK(!result); - CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n"); -} - TEST_CASE("IsConvertFp32ToFp16SupportedReference") { std::string reasonIfUnsupported; @@ -271,7 +201,9 @@ TEST_CASE("IsConstantSupportedRef") result = IsConstantLayerSupportedTests(reasonIfUnsupported); - CHECK(result); + CHECK(!result); + CHECK(reasonIfUnsupported.find("Reference constant: output is not a supported type.") != std::string::npos); + } } diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 7375847602..750da8fba2 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -72,14 +72,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc, ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3BFloat16, - Convolution2d3x3Dilation3x3Test, - false, - DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcBFloat16, - Convolution2d3x3Dilation3x3Test, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3, Convolution2d3x3Dilation3x3Test, false, @@ -113,14 +105,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcInt16, false, DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3BFloat16, - Convolution2d2x3x3Dilation3x3Test, - false, - DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcBFloat16, - Convolution2d2x3x3Dilation3x3Test, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3, Convolution2d2x3x3Dilation3x3Test, false, @@ -154,15 +138,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcInt16, false, DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3BFloat16, - Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, - false, - DataLayout::NCHW) - -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcBFloat16, - Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test, false, @@ -199,15 +174,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC); -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2Bf16, - Convolution2d3x3Stride2x2BFloat16Test, - false, - DataLayout::NHWC); -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2BFloat16SmallValue, - Convolution2d3x3Stride2x2BFloat16SmallValueTest, - false, - DataLayout::NHWC); - // Convolution 3d - NDHWC ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32, SimpleConvolution3d3x3x3Float32Test, @@ -354,14 +320,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Nhwc, DepthwiseConvolution2d3x3Dilation3x3Test, false, DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3BFloat16, - DepthwiseConvolution2d3x3Dilation3x3Test, - false, - DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16, - DepthwiseConvolution2d3x3Dilation3x3Test, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Int8, DepthwiseConvolution2d3x3Dilation3x3Test, false, @@ -395,14 +353,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Nhwc, DepthwiseConvolution2d2x3x3Dilation3x3Test, false, DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3BFloat16, - DepthwiseConvolution2d2x3x3Dilation3x3Test, - false, - DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16, - DepthwiseConvolution2d2x3x3Dilation3x3Test, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Int8, DepthwiseConvolution2d2x3x3Dilation3x3Test, false, @@ -435,14 +385,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2, DepthwiseConvolution2dMult2Test, false, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult4BFloat16, - DepthwiseConvolution2dMult4Test, - false, - armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2BFloat16, - DepthwiseConvolution2dMult2Test, - false, - armnn::DataLayout::NCHW) ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, @@ -864,7 +806,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterInt16, CopyViaSplitterInt16Test) // Concat ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatBFloat16, ConcatBFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatFloat16, ConcatFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatInt32, ConcatInt32Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test) @@ -1063,91 +1004,78 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1DVectorInt32, Multiplicati ARMNN_AUTO_TEST_CASE_WITH_THF(Multiplication5d, Multiplication5dTest) // Batch Mat Mul -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleBFloat16, BatchMatMul2DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleFloat32, BatchMatMul2DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleFloat16, BatchMatMul2DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQAsymmS8, BatchMatMul2DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQAsymmU8, BatchMatMul2DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQASymmS16, BatchMatMul2DSimpleTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleBFloat16, BatchMatMul3DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleFloat32, BatchMatMul3DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleFloat16, BatchMatMul3DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQAsymmS8, BatchMatMul3DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQAsymmU8, BatchMatMul3DSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQASymmS16, BatchMatMul3DSimpleTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleBFloat16, BatchMatMulNCHWSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleFloat32, BatchMatMulNCHWSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleFloat16, BatchMatMulNCHWSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQAsymmS8, BatchMatMulNCHWSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQAsymmU8, BatchMatMulNCHWSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQASymmS16, BatchMatMulNCHWSimpleTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleBFloat16, BatchMatMulNHWCSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleFloat32, BatchMatMulNHWCSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleFloat16, BatchMatMulNHWCSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQAsymmS8, BatchMatMulNHWCSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQAsymmU8, BatchMatMulNHWCSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQASymmS16, BatchMatMulNHWCSimpleTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchBFloat16, BatchMatMul3DBatchTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchFloat32, BatchMatMul3DBatchTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchFloat16, BatchMatMul3DBatchTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQAsymmS8, BatchMatMul3DBatchTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQAsymmU8, BatchMatMul3DBatchTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQASymmS16, BatchMatMul3DBatchTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastBFloat16, BatchMatMul3DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastFloat32, BatchMatMul3DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastFloat16, BatchMatMul3DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQAsymmS8, BatchMatMul3DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQAsymmU8, BatchMatMul3DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQASymmS16, BatchMatMul3DBroadcastTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastBFloat16, BatchMatMul3D2DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastFloat32, BatchMatMul3D2DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastFloat16, BatchMatMul3D2DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQAsymmS8, BatchMatMul3D2DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQAsymmU8, BatchMatMul3D2DBroadcastTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQASymmSS16, BatchMatMul3D2DBroadcastTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCBFloat16, BatchMatMulNDHWCNHWCTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCFloat32, BatchMatMulNDHWCNHWCTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCFloat16, BatchMatMulNDHWCNHWCTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQAsymmS8, BatchMatMulNDHWCNHWCTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQAsymmU8, BatchMatMulNDHWCNHWCTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQASymmSS16, BatchMatMulNDHWCNHWCTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyBFloat16, BatchMatMul2DTinyTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyFloat32, BatchMatMul2DTinyTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyFloat16, BatchMatMul2DTinyTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQAsymmS8, BatchMatMul2DTinyTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQAsymmU8, BatchMatMul2DTinyTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQASymmS16, BatchMatMul2DTinyTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareBFloat16, BatchMatMul3DNonSquareTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareFloat32, BatchMatMul3DNonSquareTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareFloat16, BatchMatMul3DNonSquareTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQAsymmS8, BatchMatMul3DNonSquareTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQAsymmU8, BatchMatMul3DNonSquareTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQASymmS16, BatchMatMul3DNonSquareTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleBFloat16, BatchMatMul2DTranspSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleFloat32, BatchMatMul2DTranspSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleFloat16, BatchMatMul2DTranspSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQAsymmS8, BatchMatMul2DTranspSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQAsymmU8, BatchMatMul2DTranspSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQASymmS16,BatchMatMul2DTranspSimpleTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleBFloat16, BatchMatMul2DAdjointSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleFloat32, BatchMatMul2DAdjointSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleFloat16, BatchMatMul2DAdjointSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQAsymmS8, BatchMatMul2DAdjointSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQAsymmU8, BatchMatMul2DAdjointSimpleTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQASymmS16,BatchMatMul2DAdjointSimpleTest); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsBFloat16, BatchMatMulNHWCParamsTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsFloat32, BatchMatMulNHWCParamsTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsFloat16, BatchMatMulNHWCParamsTest); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsQAsymmS8, BatchMatMulNHWCParamsTest); @@ -1172,7 +1100,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Signed32, RankDimSize1Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS8, RankDimSize1Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmS8, RankDimSize1Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1BFloat16, RankDimSize1Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float16, RankDimSize2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float32, RankDimSize2Test) @@ -1181,7 +1108,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Signed32, RankDimSize2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS8, RankDimSize2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmS8, RankDimSize2Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2BFloat16, RankDimSize2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float16, RankDimSize3Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float32, RankDimSize3Test) @@ -1190,7 +1116,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Signed32, RankDimSize3Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS8, RankDimSize3Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmS8, RankDimSize3Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3BFloat16, RankDimSize3Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float16, RankDimSize4Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float32, RankDimSize4Test) @@ -1199,7 +1124,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Signed32, RankDimSize4Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS8, RankDimSize4Test) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmS8, RankDimSize4Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4BFloat16, RankDimSize4Test) // Resize Bilinear - NCHW ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinear, @@ -1650,11 +1574,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_3, LogSoftmaxTest3) // Pad - Constant -ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162d, PadBFloat162dTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162dCustomPadding, PadBFloat162dCustomPaddingTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat163d, PadBFloat163dTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat164d, PadBFloat164dTest) - ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322d, PadFloat322dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest) ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat323d, PadFloat323dTest) @@ -1692,8 +1611,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dInt8, PadReflect3dInt8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dFloat32, PadSymmetric4dFloat32Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dFloat32, PadReflect4dFloat32Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dBFloat16, PadSymmetric4dBFloat16Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dBFloat16, PadReflect4dBFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dUint8, PadSymmetric4dUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dUint8, PadReflect4dUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt8, PadSymmetric4dInt8Test) @@ -1878,17 +1795,10 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CastUIntToFloat, CastUInt8ToFloat2dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastInt8ToUInt, CastInt8ToUInt82dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastInt8AsymmToUInt, CastInt8AsymmToUInt82dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloat16ToFloat32, CastFloat16ToFloat322dTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(CastBFloat16ToFloat32, CastBFloat16ToFloat322dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToFloat16, CastFloat32ToFloat162dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToIn8, CastFloat32ToInt82dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToUInt8, CastFloat32ToUInt82dTest) -// Convert from BFloat16 to Float32 -ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test) - -// Convert from Float32 to BFloat16 -ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertFp32ToBf16, ConvertFp32ToBf16Test) - // Convert from Float16 to Float32 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test) // Convert from Float32 to Float16 @@ -2139,7 +2049,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1Signed32, ShapeDimSize1Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1QSymmS8, ShapeDimSize1Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1QAsymmS8, ShapeDimSize1Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1BFloat16, ShapeDimSize1Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2Float16, ShapeDimSize2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2Float32, ShapeDimSize2Test) @@ -2148,7 +2057,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2Signed32, ShapeDimSize2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2QSymmS8, ShapeDimSize2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2QAsymmS8, ShapeDimSize2Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2BFloat16, ShapeDimSize2Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3Float16, ShapeDimSize3Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3Float32, ShapeDimSize3Test) @@ -2157,7 +2065,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3Signed32, ShapeDimSize3Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3QSymmS8, ShapeDimSize3Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3QAsymmS8, ShapeDimSize3Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3BFloat16, ShapeDimSize3Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4Float16, ShapeDimSize4Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4Float32, ShapeDimSize4Test) @@ -2166,7 +2073,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4Signed32, ShapeDimSize4Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4QSymmS8, ShapeDimSize4Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4QAsymmS8, ShapeDimSize4Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4BFloat16, ShapeDimSize4Test) // SpaceToDepth ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchwAsymmQ8, SpaceToDepthNchwAsymmQ8Test) diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index e09371fd96..2d27951b73 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -260,44 +260,6 @@ private: }; -class BFloat16Decoder : public TypedIterator> -{ -public: - BFloat16Decoder(const BFloat16* data) - : TypedIterator(data) {} - - BFloat16Decoder() - : BFloat16Decoder(nullptr) {} - - float Get() const override - { - float val = 0.f; - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val); - return val; - } - std::vector DecodeTensor (const TensorShape& tensorShape, - const bool isDepthwise) override - { - IgnoreUnused(isDepthwise); - - const unsigned int size = tensorShape.GetNumElements(); - std::vector decodedTensor; - decodedTensor.reserve(size); - - for (uint32_t i = 0; i < size; ++i) - { - this->operator[](i); - - float val = 0.f; - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val); - decodedTensor.emplace_back(val); - } - - return decodedTensor; - } - -}; - class Float16Decoder : public TypedIterator> { public: @@ -624,28 +586,6 @@ private: const int32_t m_Offset; }; -class BFloat16Encoder : public TypedIterator> -{ -public: - BFloat16Encoder(armnn::BFloat16* data) - : TypedIterator(data) {} - - BFloat16Encoder() - : BFloat16Encoder(nullptr) {} - - void Set(float right) override - { - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(&right, 1, m_Iterator); - } - - float Get() const override - { - float val = 0.f; - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val); - return val; - } -}; - class Float16Encoder : public TypedIterator> { public: diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index b8835e3cdb..de6c042959 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -88,12 +88,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefConcatWorkload.hpp RefConstantWorkload.cpp RefConstantWorkload.hpp - RefConvertBf16ToFp32Workload.cpp - RefConvertBf16ToFp32Workload.hpp RefConvertFp16ToFp32Workload.cpp RefConvertFp16ToFp32Workload.hpp - RefConvertFp32ToBf16Workload.cpp - RefConvertFp32ToBf16Workload.hpp RefConvertFp32ToFp16Workload.cpp RefConvertFp32ToFp16Workload.hpp RefConvolution2dWorkload.cpp diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp index c2a456bfce..54e7008d50 100644 --- a/src/backends/reference/workloads/Decoders.hpp +++ b/src/backends/reference/workloads/Decoders.hpp @@ -88,10 +88,6 @@ inline std::unique_ptr> MakeDecoder(const TensorInfo& info, const info.GetQuantizationScale(), info.GetQuantizationOffset()); } - case DataType::BFloat16: - { - return std::make_unique(static_cast(data)); - } case DataType::Float16: { return std::make_unique(static_cast(data)); diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp index a7be9e172b..d6d611494d 100644 --- a/src/backends/reference/workloads/Encoders.hpp +++ b/src/backends/reference/workloads/Encoders.hpp @@ -65,10 +65,6 @@ inline std::unique_ptr> MakeEncoder(const TensorInfo& info, void* { return std::make_unique(static_cast(data)); } - case armnn::DataType::BFloat16: - { - return std::make_unique(static_cast(data)); - } case armnn::DataType::Float16: { return std::make_unique(static_cast(data)); diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp deleted file mode 100644 index 2fe2eafb9b..0000000000 --- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefConvertBf16ToFp32Workload.hpp" -#include "RefWorkloadUtils.hpp" - -#include - -#include - -namespace armnn -{ - -void RefConvertBf16ToFp32Workload::Execute() const -{ - Execute(m_Data.m_Inputs, m_Data.m_Outputs); -} - -void RefConvertBf16ToFp32Workload::ExecuteAsync(ExecutionData& executionData) -{ - WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); - Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); -} - -void RefConvertBf16ToFp32Workload::Execute(std::vector inputs, - std::vector outputs) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertBf16ToFp32Workload_Execute"); - - const BFloat16* const input = reinterpret_cast(inputs[0]->Map()); - float* const output = reinterpret_cast(outputs[0]->Map()); - - unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements(); - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp deleted file mode 100644 index 24dcb0f682..0000000000 --- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "RefBaseWorkload.hpp" -#include - -namespace armnn -{ - -class RefConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload -{ -public: - using BFloat16ToFloat32Workload::BFloat16ToFloat32Workload; - void Execute() const override; - void ExecuteAsync(ExecutionData& executionData) override; -private: - void Execute(std::vector inputs, std::vector outputs) const; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp deleted file mode 100644 index 71ee95b2aa..0000000000 --- a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefConvertFp32ToBf16Workload.hpp" -#include "RefWorkloadUtils.hpp" - -#include - -#include - -namespace armnn -{ - -void RefConvertFp32ToBf16Workload::Execute() const -{ - Execute(m_Data.m_Inputs, m_Data.m_Outputs); -} - -void RefConvertFp32ToBf16Workload::ExecuteAsync(ExecutionData& executionData) -{ - WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); - Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); -} - -void RefConvertFp32ToBf16Workload::Execute(std::vector inputs, - std::vector outputs) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp32ToBf16Workload_Execute"); - - const float* const input = reinterpret_cast(inputs[0]->Map()); - BFloat16* const output = reinterpret_cast(outputs[0]->Map()); - - unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements(); - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(input, numElements, output); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp deleted file mode 100644 index c1e57ec37e..0000000000 --- a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "RefBaseWorkload.hpp" -#include - -namespace armnn -{ - -class RefConvertFp32ToBf16Workload : public Float32ToBFloat16Workload -{ -public: - using Float32ToBFloat16Workload::Float32ToBFloat16Workload; - void Execute() const override; - void ExecuteAsync(ExecutionData& executionData) override; -private: - void Execute(std::vector inputs, std::vector outputs) const; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index e049d8db2c..afed71bfff 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -17,9 +17,7 @@ #include "RefConvolution3dWorkload.hpp" #include "RefConstantWorkload.hpp" #include "RefConcatWorkload.hpp" -#include "RefConvertBf16ToFp32Workload.hpp" #include "RefConvertFp16ToFp32Workload.hpp" -#include "RefConvertFp32ToBf16Workload.hpp" #include "RefConvertFp32ToFp16Workload.hpp" #include "RefDebugWorkload.hpp" #include "RefDepthToSpaceWorkload.hpp" -- cgit v1.2.1