diff options
author | Ryan OShea <ryan.oshea3@arm.com> | 2022-11-07 16:20:48 +0000 |
---|---|---|
committer | ryan.oshea3 <ryan.oshea3@arm.com> | 2022-11-16 15:22:50 +0000 |
commit | 31441595009182c985dacbedc70c41ee6664d070 (patch) | |
tree | 248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/backends | |
parent | bd18eab07a8f30492de1e462b1815189014cb8d5 (diff) | |
download | armnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz |
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer
- Remove Fp32ToBf16 Conversion Layer
- Remove B16 Conversion tests
* Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true
* Provide comments to enable fast math in order to use bf16
* Update docs to inform users to enable fast math for bf16
Execute Network Changes
* Require bf16_turbo_mode to also have fast_math_enabled set to true
- Remove setting m_ReduceFp32ToBf16 optimizer option
Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/backends')
42 files changed, 8 insertions, 1154 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 001037908d..26137f51b0 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -164,13 +164,6 @@ bool LayerSupportBase::IsConstantSupported(const TensorInfo&, // output return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsConvertBf16ToFp32Supported(const TensorInfo&, // input - const TensorInfo&, // output - Optional<std::string&> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input const TensorInfo&, // output Optional<std::string&> reasonIfUnsupported) const @@ -178,14 +171,6 @@ bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsConvertFp32ToBf16Supported(const TensorInfo&, // input - const TensorInfo&, // output - Optional<std::string&> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - - bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo&, // input const TensorInfo&, // output Optional<std::string&> reasonIfUnsupported) const diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index b18af35967..acf24a2d3a 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -83,19 +83,11 @@ public: bool IsConstantSupported(const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "23.08") bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "23.08") bool IsConvertFp32ToFp16Supported( const TensorInfo& input, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 753fe06edb..62dfc6a38b 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2222,52 +2222,6 @@ void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } -void ConvertBf16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const -{ - const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"}; - - ValidateNumInputs(workloadInfo, descriptorName, 1); - ValidateNumOutputs(workloadInfo, descriptorName, 1); - - const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; - const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; - - if (inputTensorInfo.GetDataType() != DataType::BFloat16) - { - throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16."); - } - - if (outputTensorInfo.GetDataType() != DataType::Float32) - { - throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32."); - } - - ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); -} - -void ConvertFp32ToBf16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const -{ - const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"}; - - ValidateNumInputs(workloadInfo, descriptorName, 1); - ValidateNumOutputs(workloadInfo, descriptorName, 1); - - const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; - const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; - - if (inputTensorInfo.GetDataType() != DataType::Float32) - { - throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32."); - } - - if (outputTensorInfo.GetDataType() != DataType::BFloat16) - { - throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16."); - } - - ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); -} - void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"}; diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 665ab3f86c..1283f67660 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -227,13 +227,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason); break; } - case LayerType::ConvertBf16ToFp32: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason); - break; - } case LayerType::ConvertFp16ToFp32: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -241,13 +234,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason); break; } - case LayerType::ConvertFp32ToBf16: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason); - break; - } case LayerType::ConvertFp32ToFp16: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -1630,24 +1616,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type, auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor); return CreateConstant(*constantQueueDescriptor, info); } - case LayerType::ConvertBf16ToFp32 : - { - auto convertBf16ToFp32QueueDescriptor - = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor); - return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info); - } case LayerType::ConvertFp16ToFp32: { auto convertFp16ToFp32QueueDescriptor = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor); return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info); } - case LayerType::ConvertFp32ToBf16: - { - auto convertFp32ToBf16QueueDescriptor - = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor); - return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info); - } case LayerType::ConvertFp32ToFp16: { auto convertFp32ToFp16QueueDescriptor @@ -1992,24 +1966,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueD return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 007cca57fa..3545331c8f 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -55,9 +55,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/ConstantTestImpl.cpp \ test/layerTests/Conv2dTestImpl.cpp \ test/layerTests/Conv3dTestImpl.cpp \ - test/layerTests/ConvertBf16ToFp32TestImpl.cpp \ test/layerTests/ConvertFp16ToFp32TestImpl.cpp \ - test/layerTests/ConvertFp32ToBf16TestImpl.cpp \ test/layerTests/ConvertFp32ToFp16TestImpl.cpp \ test/layerTests/DebugTestImpl.cpp \ test/layerTests/DepthToSpaceTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 5e283990f7..232226b5df 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -83,12 +83,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/Conv2dTestImpl.hpp layerTests/Conv3dTestImpl.cpp layerTests/Conv3dTestImpl.hpp - layerTests/ConvertBf16ToFp32TestImpl.cpp - layerTests/ConvertBf16ToFp32TestImpl.hpp layerTests/ConvertFp16ToFp32TestImpl.cpp layerTests/ConvertFp16ToFp32TestImpl.hpp - layerTests/ConvertFp32ToBf16TestImpl.cpp - layerTests/ConvertFp32ToBf16TestImpl.hpp layerTests/ConvertFp32ToFp16TestImpl.cpp layerTests/ConvertFp32ToFp16TestImpl.hpp layerTests/DebugTestImpl.cpp diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 5fdcd9c57a..18f11a542e 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -630,12 +630,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Concat) DECLARE_LAYER_POLICY_1_PARAM(Constant) -DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32) - DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32) -DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16) - DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16) DECLARE_LAYER_POLICY_2_PARAM(Convolution2d) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 25435b24ec..00bfea5452 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -16,9 +16,7 @@ #include <backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp> #include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp> #include <backendsCommon/test/layerTests/ConcatTestImpl.hpp> -#include <backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp> #include <backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp> -#include <backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp> #include <backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp> #include <backendsCommon/test/layerTests/Conv2dTestImpl.hpp> #include <backendsCommon/test/layerTests/Conv3dTestImpl.hpp> diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp deleted file mode 100644 index 0dd8b598ac..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertBf16ToFp32TestImpl.hpp" - -#include <armnnTestUtils/TensorCopyUtils.hpp> -#include <armnnTestUtils/WorkloadTestUtils.hpp> - -#include <armnnTestUtils/TensorHelpers.hpp> - -LayerTestResult<float, 4> ConvertBf16ToFp32Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory) -{ - IgnoreUnused(memoryManager); - - const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16); - const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); - - std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>( - { - -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f - }, - 1.0f, 0); - - std::vector<float> actualOutput(outputTensorInfo.GetNumElements()); - std::vector<float> expectedOutput = - { - -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f - }; - - std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ConvertBf16ToFp32QueueDescriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertBf16ToFp32, - data, - info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); - - workload->Execute(); - - CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - - return LayerTestResult<float, 4>(actualOutput, - expectedOutput, - outputHandle->GetShape(), - outputTensorInfo.GetShape()); -} diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp deleted file mode 100644 index bcb0d6f124..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <armnnTestUtils/LayerTestResult.hpp> - -#include <BFloat16.hpp> - -#include <armnn/backends/IBackendInternal.hpp> -#include <armnn/backends/WorkloadFactory.hpp> - -LayerTestResult<float, 4> ConvertBf16ToFp32Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory); diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp deleted file mode 100644 index 5ee8f1dd9a..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertFp32ToBf16TestImpl.hpp" - -#include <armnnTestUtils/TensorCopyUtils.hpp> -#include <armnnTestUtils/WorkloadTestUtils.hpp> - -#include <armnnTestUtils/TensorHelpers.hpp> - -LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory) -{ - IgnoreUnused(memoryManager); - - const armnn::TensorInfo inputTensorInfo({1, 2, 4, 3}, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo({1, 2, 4, 3}, armnn::DataType::BFloat16); - - std::vector<float> input = - { - -37.5f, -15.2f, -8.76f, - -2.0f, -1.5f, -1.3f, - -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, - 1.3f, 1.5f, 2.0f, - 8.76f, 15.2f, 37.5f, - 3.8f, // 0x40733333 Round down - 3.1055E+29f, // 0x707ADC3C Round up - 9.149516E-10f, // 0x307B7FFF Round down - -3.8f, // 0xC0733333 Round down - -3.1055E+29f, // 0xF07ADC3C Round up - -9.149516E-10f // 0xB07B7FFF Round down - }; - - std::vector<armnn::BFloat16> expectedOutput = armnnUtils::QuantizedVector<armnn::BFloat16>( - { - -37.5f, -15.2f, -8.76f, - -2.0f, -1.5f, -1.3f, - -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, - 1.3f, 1.5f, 2.0f, - 8.76f, 15.2f, 37.5f, - 3.796875f, // 0x4073 - 3.1072295E29f, // 0x707B - 9.131327E-10f, // 0x307B - -3.796875f, // 0xC073 - -3.1072295E29f, // 0xF07B - -9.131327E-10f // 0xB07B - }, - 1.0f, 0); - - std::vector<armnn::BFloat16> actualOutput(outputTensorInfo.GetNumElements()); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ConvertFp32ToBf16QueueDescriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToBf16, - data, - info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), input.data()); - - workload->Execute(); - - CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - - return LayerTestResult<armnn::BFloat16, 4>(actualOutput, - expectedOutput, - outputHandle->GetShape(), - outputTensorInfo.GetShape()); - -} diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp deleted file mode 100644 index c2286d9c41..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <armnnTestUtils/LayerTestResult.hpp> - -#include <BFloat16.hpp> - -#include <armnn/backends/IBackendInternal.hpp> -#include <armnn/backends/WorkloadFactory.hpp> - -LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory); diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index 9c40391f1a..a61a5bb640 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -247,14 +247,6 @@ bool ClLayerSupport::IsLayerSupported(const LayerType& type, return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::ConvertFp32ToFp16: return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::ConvertBf16ToFp32: - return LayerSupportBase::IsConvertBf16ToFp32Supported(infos[0], - infos[1], - reasonIfUnsupported); - case LayerType::ConvertFp32ToBf16: - return LayerSupportBase::IsConvertFp32ToBf16Supported(infos[0], - infos[1], - reasonIfUnsupported); case LayerType::Convolution2d: { if (infos.size() != 4) diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 7f311d8684..4c97855668 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -220,12 +220,8 @@ bool NeonLayerSupport::IsLayerSupported(const LayerType& type, } case LayerType::Constant: return IsConstantSupported(infos[0], reasonIfUnsupported); - case LayerType::ConvertBf16ToFp32: - return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::ConvertFp16ToFp32: return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::ConvertFp32ToBf16: - return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::ConvertFp32ToFp16: return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::Convolution2d: @@ -765,16 +761,6 @@ bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output, output); } -bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - armnn::IgnoreUnused(input); - armnn::IgnoreUnused(output); - armnn::IgnoreUnused(reasonIfUnsupported); - return true; -} - bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const @@ -785,16 +771,6 @@ bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, return true; } -bool NeonLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - armnn::IgnoreUnused(input); - armnn::IgnoreUnused(output); - armnn::IgnoreUnused(reasonIfUnsupported); - return true; -} - bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index e916162f93..374a9049c8 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -84,18 +84,10 @@ public: bool IsConstantSupported(const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index d5a7c684d3..dccd4a3a36 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -194,24 +194,12 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateWorkload(LayerType type, auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor); return std::make_unique<NeonConstantWorkload>(*constantQueueDescriptor, info); } - case LayerType::ConvertBf16ToFp32 : - { - auto convertBf16ToFp32QueueDescriptor - = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor); - return std::make_unique<NeonConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info); - } case LayerType::ConvertFp16ToFp32 : { auto convertFp16ToFp32QueueDescriptor = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor); return std::make_unique<NeonConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info); } - case LayerType::ConvertFp32ToBf16 : - { - auto convertFp32ToBf16QueueDescriptor - = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor); - return std::make_unique<NeonConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info); - } case LayerType::ConvertFp32ToFp16 : { auto convertFp32ToFp16QueueDescriptor @@ -655,13 +643,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQue return std::make_unique<NeonConstantWorkload>(descriptor, info); } -std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertBf16ToFp32( - const ConvertBf16ToFp32QueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return std::make_unique<NeonConvertBf16ToFp32Workload>(descriptor, info); -} - std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32( const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const @@ -669,13 +650,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32( return std::make_unique<NeonConvertFp16ToFp32Workload>(descriptor, info); } -std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToBf16( - const ConvertFp32ToBf16QueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return std::make_unique<NeonConvertFp32ToBf16Workload>(descriptor, info); -} - std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16( const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) const diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index 0c116086d4..e4f545900a 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -108,21 +108,11 @@ public: ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") - std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " - "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const override; ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") - std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " - "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index b1c0103426..bbc55547a0 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -34,8 +34,6 @@ BACKEND_SOURCES := \ workloads/NeonComparisonWorkload.cpp \ workloads/NeonConcatWorkload.cpp \ workloads/NeonConstantWorkload.cpp \ - workloads/NeonConvertBf16ToFp32Workload.cpp \ - workloads/NeonConvertFp32ToBf16Workload.cpp \ workloads/NeonConvertFp16ToFp32Workload.cpp \ workloads/NeonConvertFp32ToFp16Workload.cpp \ workloads/NeonConvolution2dWorkload.cpp \ diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 88e513e62f..2512821a85 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -743,12 +743,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam, ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false) -// Convert from BFloat16 to Float32 -ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test) - -// Convert from Float32 to BFloat16 -ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertFp32ToBf16, ConvertFp32ToBf16Test) - // Fully Connected ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnected, FullyConnectedFloat32Test, false, false) ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false) @@ -798,7 +792,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmU8, RankDimSize1Test<DataType::Q ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Signed32, RankDimSize1Test<DataType::Signed32>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1BFloat16, RankDimSize1Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float16, RankDimSize2Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float32, RankDimSize2Test<DataType::Float32>) @@ -806,7 +799,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmU8, RankDimSize2Test<DataType::Q ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Signed32, RankDimSize2Test<DataType::Signed32>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2BFloat16, RankDimSize2Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float16, RankDimSize3Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float32, RankDimSize3Test<DataType::Float32>) @@ -814,7 +806,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmU8, RankDimSize3Test<DataType::Q ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Signed32, RankDimSize3Test<DataType::Signed32>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3BFloat16, RankDimSize3Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float16, RankDimSize4Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float32, RankDimSize4Test<DataType::Float32>) @@ -822,7 +813,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmU8, RankDimSize4Test<DataType::Q ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Signed32, RankDimSize4Test<DataType::Signed32>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4BFloat16, RankDimSize4Test<DataType::BFloat16>) // InstanceNormalization ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW); diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index dd09ecf015..a3eb883079 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -28,12 +28,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonConcatWorkload.hpp NeonConstantWorkload.cpp NeonConstantWorkload.hpp - NeonConvertBf16ToFp32Workload.cpp - NeonConvertBf16ToFp32Workload.hpp NeonConvertFp16ToFp32Workload.cpp NeonConvertFp16ToFp32Workload.hpp - NeonConvertFp32ToBf16Workload.cpp - NeonConvertFp32ToBf16Workload.hpp NeonConvertFp32ToFp16Workload.cpp NeonConvertFp32ToFp16Workload.hpp NeonConvolution2dWorkload.cpp diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp deleted file mode 100644 index 7a2ff9ac1a..0000000000 --- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.cpp +++ /dev/null @@ -1,81 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "NeonConvertBf16ToFp32Workload.hpp" - -#include <armnnUtils/FloatingPointConverter.hpp> - -#include <BFloat16.hpp> - -#include <backendsCommon/WorkloadUtils.hpp> - -namespace armnn -{ - -NeonConvertBf16ToFp32Workload::NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor, - const WorkloadInfo& info) - : BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>(descriptor, info) -{ - this->m_Data.ValidateInputsOutputs("NeonConvertBf16ToFp32Workload", 1, 1); - GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); -} - -void NeonConvertBf16ToFp32Workload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertBf16ToFp32Workload_Execute", this->GetGuid()); - - auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size) - { - auto input = reinterpret_cast<const BFloat16*>(src); - auto output = reinterpret_cast<float*>(dst); - size_t numElements = size/2; // 2 bytes per Bf16 - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output); - }; - - for (const auto& pair : m_TensorHandlePairs) - { - CopyTensorContentsGeneric(pair.first, pair.second, convertFunc); - } -} - -void NeonConvertBf16ToFp32Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) -{ - ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; - this->m_Data.m_Inputs[slot] = tensorHandle; - try - { - Reconfigure(); - } - catch(armnn::UnimplementedException& e) - { - // Cannot reconfigure, revert the slot back and throw the exception. - this->m_Data.m_Inputs[slot] = backupHandle; - throw e; - } -} - -// Replace output tensor handle with the given TensorHandle -void NeonConvertBf16ToFp32Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) -{ - ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; - this->m_Data.m_Inputs[slot] = tensorHandle; - try - { - Reconfigure(); - } - catch(armnn::UnimplementedException& e) - { - // Cannot reconfigure, revert the slot back and throw the exception. - this->m_Data.m_Inputs[slot] = backupHandle; - throw e; - } -} - -void NeonConvertBf16ToFp32Workload::Reconfigure() -{ - throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); -} - -} //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp b/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp deleted file mode 100644 index 9d44ad2cac..0000000000 --- a/src/backends/neon/workloads/NeonConvertBf16ToFp32Workload.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <armnn/backends/Workload.hpp> -#include <armnn/backends/WorkloadData.hpp> -#include <neon/workloads/NeonWorkloadUtils.hpp> - -namespace armnn -{ - -class NeonConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor> -{ -public: - NeonConvertBf16ToFp32Workload(const ConvertBf16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info); - virtual void Execute() const override; - // Replace input tensor handle with the given TensorHandle - void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; - - // Replace output tensor handle with the given TensorHandle - void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; -private: - using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>; - std::vector<TensorHandlePair> m_TensorHandlePairs; - virtual void Reconfigure(); -}; - -} //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp deleted file mode 100644 index acd1a1ea8f..0000000000 --- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.cpp +++ /dev/null @@ -1,82 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "NeonConvertFp32ToBf16Workload.hpp" - -#include <BFloat16.hpp> -#include <Profiling.hpp> - -#include <armnnUtils/FloatingPointConverter.hpp> - -#include <backendsCommon/WorkloadUtils.hpp> - -namespace armnn -{ - -NeonConvertFp32ToBf16Workload::NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor& descriptor, - const WorkloadInfo& info) - : Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>(descriptor, info) -{ - this->m_Data.ValidateInputsOutputs("NeonConvertFp32ToBf16Workload", 1, 1); - GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); -} - -void NeonConvertFp32ToBf16Workload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertFp32ToBf16Workload_Execute", this->GetGuid()); - - auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size) - { - auto input = reinterpret_cast<const float*>(src); - auto output = reinterpret_cast<BFloat16*>(dst); - size_t numElements = size/2; // 2 bytes per bf16 - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(input, numElements, output); - }; - - for (const auto& pair : m_TensorHandlePairs) - { - CopyTensorContentsGeneric(pair.first, pair.second, convertFunc); - } -} - -void NeonConvertFp32ToBf16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) -{ - ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; - this->m_Data.m_Inputs[slot] = tensorHandle; - try - { - Reconfigure(); - } - catch(armnn::UnimplementedException& e) - { - // Cannot reconfigure, revert the slot back and throw the exception. - this->m_Data.m_Inputs[slot] = backupHandle; - throw e; - } -} - -// Replace output tensor handle with the given TensorHandle -void NeonConvertFp32ToBf16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) -{ - ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; - this->m_Data.m_Inputs[slot] = tensorHandle; - try - { - Reconfigure(); - } - catch(armnn::UnimplementedException& e) - { - // Cannot reconfigure, revert the slot back and throw the exception. - this->m_Data.m_Inputs[slot] = backupHandle; - throw e; - } -} - -void NeonConvertFp32ToBf16Workload::Reconfigure() -{ - throw armnn::UnimplementedException("Reconfigure not implemented for this workload"); -} - -} //namespace armnn diff --git a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp b/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp deleted file mode 100644 index 2304f8a1d4..0000000000 --- a/src/backends/neon/workloads/NeonConvertFp32ToBf16Workload.hpp +++ /dev/null @@ -1,31 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <armnn/backends/Workload.hpp> -#include <armnn/backends/WorkloadData.hpp> -#include <neon/workloads/NeonWorkloadUtils.hpp> - -namespace armnn -{ - -class NeonConvertFp32ToBf16Workload : public Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor> -{ -public: - NeonConvertFp32ToBf16Workload(const ConvertFp32ToBf16QueueDescriptor& descriptor, const WorkloadInfo& info); - virtual void Execute() const override; - // Replace input tensor handle with the given TensorHandle - void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; - - // Replace output tensor handle with the given TensorHandle - void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override; -private: - using TensorHandlePair = std::pair<const ITensorHandle*, ITensorHandle*>; - std::vector<TensorHandlePair> m_TensorHandlePairs; - virtual void Reconfigure(); -}; - -} //namespace armnn diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index c9c5421804..01fd2f7dba 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -16,9 +16,7 @@ #include "NeonComparisonWorkload.hpp" #include "NeonConcatWorkload.hpp" #include "NeonConstantWorkload.hpp" -#include "NeonConvertBf16ToFp32Workload.hpp" #include "NeonConvertFp16ToFp32Workload.hpp" -#include "NeonConvertFp32ToBf16Workload.hpp" #include "NeonConvertFp32ToFp16Workload.hpp" #include "NeonConvolution2dWorkload.hpp" #include "NeonConvolution3dWorkload.hpp" diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 40909019ba..669c91d628 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -120,12 +120,8 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type, } case LayerType::Constant: return IsConstantSupported(infos[0], reasonIfUnsupported); - case LayerType::ConvertBf16ToFp32: - return IsConvertBf16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::ConvertFp16ToFp32: return IsConvertFp16ToFp32Supported(infos[0], infos[1], reasonIfUnsupported); - case LayerType::ConvertFp32ToBf16: - return IsConvertFp32ToBf16Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::ConvertFp32ToFp16: return IsConvertFp32ToFp16Supported(infos[0], infos[1], reasonIfUnsupported); case LayerType::Convolution2d: @@ -518,7 +514,6 @@ bool RefLayerSupport::IsActivationSupported(const TensorInfo& input, // Define supported types. std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -585,7 +580,6 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -623,7 +617,6 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const std::array<DataType, 8> supportedInputTypes = { - DataType::BFloat16, DataType::Float16, DataType::Float32, DataType::QAsymmS8, @@ -658,7 +651,6 @@ bool RefLayerSupport::IsBatchMatMulSupported(const TensorInfo& inputX, std::array<DataType, 6> supportedTypes = { - DataType::BFloat16, DataType::Float16, DataType::Float32, DataType::QAsymmS8, @@ -707,7 +699,6 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, std::array<DataType, 6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -757,7 +748,6 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, // Define supported types. std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -797,7 +787,6 @@ bool RefLayerSupport::IsCastSupported(const TensorInfo& input, { std::array<DataType, 9> supportedInputTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QSymmS8, @@ -832,7 +821,6 @@ bool RefLayerSupport::IsChannelShuffleSupported(const TensorInfo& input, // Define supported output and inputs types. std::array<DataType, 7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -864,7 +852,6 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0, std::array<DataType, 8> supportedInputTypes = { DataType::Boolean, - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -896,7 +883,6 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -925,7 +911,6 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, { std::array<DataType,8> supportedTypes = { - DataType::BFloat16, DataType::Float16, DataType::Float32, DataType::QAsymmS8, @@ -939,21 +924,6 @@ bool RefLayerSupport::IsConstantSupported(const TensorInfo& output, "Reference constant: output is not a supported type."); } -bool RefLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - bool supported = true; - - supported &= CheckSupportRule(TypeIs(input, DataType::BFloat16), reasonIfUnsupported, - "Reference for ConvertBf16ToFp32 layer: input type not supported"); - - supported &= CheckSupportRule(TypeIs(output, DataType::Float32), reasonIfUnsupported, - "Reference for ConvertBf16ToFp32 layer: output type not supported"); - - return supported; -} - bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const @@ -974,21 +944,6 @@ bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input, &FalseFuncU8<>)); } -bool RefLayerSupport::IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported) const -{ - bool supported = true; - - supported &= CheckSupportRule(TypeIs(input, DataType::Float32), reasonIfUnsupported, - "Reference for ConvertFp32ToBf16 layer: input type not supported"); - - supported &= CheckSupportRule(TypeIs(output, DataType::BFloat16), reasonIfUnsupported, - "Reference for ConvertFp32ToBf16 layer: output type not supported"); - - return supported; -} - bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported) const @@ -1021,7 +976,6 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, // Define supported types. std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1036,20 +990,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, "Reference Convolution2d: output is not a supported type."); - // For Convolution2d, we allow to have BFloat16 input with Float32 output for optimization. - if (input.GetDataType() == DataType::BFloat16) - { - if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32) - { - reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n"; - supported = false; - } - } - else - { - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference Convolution2d: input and output types mismatched."); - } + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference Convolution2d: input and output types mismatched."); + const DataType inputType = input.GetDataType(); if (IsQuantized8BitType(inputType)) @@ -1077,7 +1020,6 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input, { std::array<DataType,4> biasesSupportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32 @@ -1103,7 +1045,6 @@ bool RefLayerSupport::IsConvolution3dSupported(const TensorInfo& input, // Define supported types. std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1147,7 +1088,6 @@ bool RefLayerSupport::IsConvolution3dSupported(const TensorInfo& input, { std::array<DataType,4> biasesSupportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32 @@ -1201,7 +1141,6 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input, std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1234,7 +1173,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, // Define supported types. std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1279,7 +1217,6 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input, { std::array<DataType,4> biasesSupportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32 @@ -1313,7 +1250,6 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input, "Reference for Dequantize layer: per-axis quantized input not supported."); std::array<DataType,3> supportedOutputTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16 }; @@ -1344,7 +1280,6 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod std::array<DataType,6> supportedInputTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1379,7 +1314,6 @@ bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1418,7 +1352,6 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input, std::array<DataType, 7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1513,7 +1446,6 @@ bool RefLayerSupport::IsFloorSupported(const TensorInfo& input, std::array<DataType,3> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16 }; @@ -1539,7 +1471,6 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, // Define supported types. std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1556,20 +1487,8 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported, "Reference Fully Connected: weights type not supported."); - // For FullyConnected, we allow to have BFloat16 input with Float32 output for optimization. - if (input.GetDataType() == DataType::BFloat16) - { - if (output.GetDataType() != DataType::BFloat16 && output.GetDataType() != DataType::Float32) - { - reasonIfUnsupported.value() += "Output tensor type must be BFloat16 or Float32 for BFloat16 input.\n"; - supported = false; - } - } - else - { - supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, - "Reference Fully Connected: input and output types mismatched."); - } + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference Fully Connected: input and output types mismatched."); supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported, "Reference Fully Connected: weights is not a supported type."); @@ -1583,7 +1502,6 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input, std::array<DataType, 5> supportedBiasTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32, @@ -1615,7 +1533,6 @@ bool RefLayerSupport::IsGatherNdSupported(const armnn::TensorInfo& input0, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1648,7 +1565,6 @@ bool RefLayerSupport::IsGatherSupported(const armnn::TensorInfo& input0, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1692,7 +1608,6 @@ bool RefLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input, // Define supported types std::array<DataType, 3> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16 }; @@ -1724,7 +1639,6 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input, // Define supported types std::array<DataType, 6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1784,7 +1698,6 @@ bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input, std::array<DataType, 3> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16 }; @@ -1819,7 +1732,6 @@ bool RefLayerSupport::IsLstmSupported(const TensorInfo& input, bool supported = true; std::array<DataType,3> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::QSymmS16 }; @@ -1922,7 +1834,6 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -1963,7 +1874,6 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input, std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2052,7 +1962,6 @@ bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2090,7 +1999,6 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2130,7 +2038,6 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input, // Define supported types std::array<DataType, 6> supportedTypes = { - DataType::BFloat16, DataType::Float16, DataType::Float32, DataType::QAsymmS8, @@ -2170,7 +2077,6 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input, // Define supported output and inputs types. std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2232,7 +2138,6 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input, // Define supported output and inputs types. std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2263,7 +2168,6 @@ bool RefLayerSupport::IsPooling3dSupported(const TensorInfo& input, // Define supported output and inputs types. std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2316,7 +2220,6 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input, // Define supported input types. std::array<DataType,7> supportedInputTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2368,7 +2271,6 @@ bool RefLayerSupport::IsReduceSupported(const TensorInfo& input, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2470,7 +2372,6 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input, std::array<DataType, 5> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::QAsymmS8, DataType::QAsymmU8, @@ -2498,7 +2399,6 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QSymmS8, @@ -2528,7 +2428,6 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input, bool supported = true; std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2559,7 +2458,6 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input, std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2588,7 +2486,6 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input, bool supported = true; std::array<DataType,6> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2620,7 +2517,6 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2654,7 +2550,6 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input, std::array<DataType,5> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::QAsymmS8, DataType::QAsymmU8, @@ -2681,7 +2576,6 @@ bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0, bool supported = true; std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2720,7 +2614,6 @@ bool RefLayerSupport::IsPreluSupported(const TensorInfo& input, std::array<DataType, 6> supportedTypes { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2758,7 +2651,6 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, std::array<DataType,7> supportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::QAsymmS8, @@ -2804,7 +2696,6 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input, { std::array<DataType,4> biasesSupportedTypes = { - DataType::BFloat16, DataType::Float32, DataType::Float16, DataType::Signed32 diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index b64244db24..f0e9e35978 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -77,17 +77,10 @@ public: bool IsConstantSupported(const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; bool IsConvertFp32ToFp16Supported(const TensorInfo& input, const TensorInfo& output, diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 093d0d5e20..69f75cae8a 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -212,24 +212,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateWorkload(LayerType type, auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor); return std::make_unique<RefConstantWorkload>(*constantQueueDescriptor, info); } - case LayerType::ConvertBf16ToFp32 : - { - auto convertBf16ToFp32QueueDescriptor - = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor); - return std::make_unique<RefConvertBf16ToFp32Workload>(*convertBf16ToFp32QueueDescriptor, info); - } case LayerType::ConvertFp16ToFp32: { auto convertFp16ToFp32QueueDescriptor = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor); return std::make_unique<RefConvertFp16ToFp32Workload>(*convertFp16ToFp32QueueDescriptor, info); } - case LayerType::ConvertFp32ToBf16: - { - auto convertFp32ToBf16QueueDescriptor - = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor); - return std::make_unique<RefConvertFp32ToBf16Workload>(*convertFp32ToBf16QueueDescriptor, info); - } case LayerType::ConvertFp32ToFp16: { auto convertFp32ToFp16QueueDescriptor @@ -724,13 +712,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConstant(const ConstantQueu return std::make_unique<RefConstantWorkload>(descriptor, info); } -std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertBf16ToFp32( - const ConvertBf16ToFp32QueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return std::make_unique<RefConvertBf16ToFp32Workload>(descriptor, info); -} - std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp16ToFp32( const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const @@ -738,13 +719,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp16ToFp32( return std::make_unique<RefConvertFp16ToFp32Workload>(descriptor, info); } -std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToBf16( - const ConvertFp32ToBf16QueueDescriptor& descriptor, - const WorkloadInfo& info) const -{ - return std::make_unique<RefConvertFp32ToBf16Workload>(descriptor, info); -} - std::unique_ptr<IWorkload> RefWorkloadFactory::CreateConvertFp32ToFp16( const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) const diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 53d0806ffe..22dc35a8c8 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -122,21 +122,11 @@ public: ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") - std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " - "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const override; ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") - std::unique_ptr<IWorkload> CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& descriptor, - const WorkloadInfo& info) const override; - - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("Use ABI stable " - "CreateWorkload(LayerType, const QueueDescriptor&, const WorkloadInfo& info) instead.", "23.08") std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index ed942e67cd..eb2ec2df44 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -58,9 +58,7 @@ BACKEND_SOURCES := \ workloads/RefComparisonWorkload.cpp \ workloads/RefConcatWorkload.cpp \ workloads/RefConstantWorkload.cpp \ - workloads/RefConvertBf16ToFp32Workload.cpp \ workloads/RefConvertFp16ToFp32Workload.cpp \ - workloads/RefConvertFp32ToBf16Workload.cpp \ workloads/RefConvertFp32ToFp16Workload.cpp \ workloads/RefConvolution2dWorkload.cpp \ workloads/RefConvolution3dWorkload.cpp \ diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 369e98adb1..a8c0634186 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -634,11 +634,6 @@ TEST_CASE("RefEluEndToEndTestFloat16") EluEndToEndTest<armnn::DataType::Float16>(defaultBackends); } -TEST_CASE("RefEluEndToEndTestBFloat16") -{ - EluEndToEndTest<armnn::DataType::BFloat16>(defaultBackends); -} - TEST_CASE("RefEluEndToEndTestQAsymmS8") { EluEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends); @@ -1006,11 +1001,6 @@ TEST_CASE("RefHardSwishEndToEndTestFloat16") HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends); } -TEST_CASE("RefHardSwishEndToEndTestBFloat16") -{ - HardSwishEndToEndTest<armnn::DataType::BFloat16>(defaultBackends); -} - TEST_CASE("RefHardSwishEndToEndTestQAsymmS8") { HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends); diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp index 9a27c7c0b3..e671496ce2 100644 --- a/src/backends/reference/test/RefLayerSupportTests.cpp +++ b/src/backends/reference/test/RefLayerSupportTests.cpp @@ -49,12 +49,6 @@ TEST_CASE("IsLayerSupportedReferenceAddition") CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported)); } -TEST_CASE("IsLayerSupportedBFloat16Reference") -{ - armnn::RefWorkloadFactory factory; - IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(&factory); -} - TEST_CASE("IsLayerSupportedFloat16Reference") { armnn::RefWorkloadFactory factory; @@ -117,70 +111,6 @@ TEST_CASE("IsConvertFp16ToFp32SupportedFp16OutputReference") CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type output"); } -TEST_CASE("IsConvertBf16ToFp32SupportedReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer, - armnn::DataType::BFloat16, armnn::DataType::Float32>(reasonIfUnsupported); - - CHECK(result); -} - -TEST_CASE("IsConvertBf16ToFp32SupportedFp32InputReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer, - armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported); - - CHECK(!result); - CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n"); -} - -TEST_CASE("IsConvertBf16ToFp32SupportedBf16OutputReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer, - armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported); - - CHECK(!result); - CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n"); -} - -TEST_CASE("IsConvertFp32ToBf16SupportedReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer, - armnn::DataType::Float32, armnn::DataType::BFloat16>(reasonIfUnsupported); - - CHECK(result); -} - -TEST_CASE("IsConvertFp32ToBf16SupportedBf16InputReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer, - armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported); - - CHECK(!result); - CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n"); -} - -TEST_CASE("IsConvertFp32ToBf16SupportedFp32OutputReference") -{ - std::string reasonIfUnsupported; - - bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer, - armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported); - - CHECK(!result); - CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n"); -} - TEST_CASE("IsConvertFp32ToFp16SupportedReference") { std::string reasonIfUnsupported; @@ -271,7 +201,9 @@ TEST_CASE("IsConstantSupportedRef") result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(reasonIfUnsupported); - CHECK(result); + CHECK(!result); + CHECK(reasonIfUnsupported.find("Reference constant: output is not a supported type.") != std::string::npos); + } } diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 7375847602..750da8fba2 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -72,14 +72,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc, ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3BFloat16, - Convolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcBFloat16, - Convolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3, Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>, false, @@ -113,14 +105,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcInt16, false, DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3BFloat16, - Convolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcBFloat16, - Convolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3, Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>, false, @@ -154,15 +138,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcInt16, false, DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3BFloat16, - Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NCHW) - -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcBFloat16, - Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3, Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>, false, @@ -199,15 +174,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW); ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC); -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2Bf16, - Convolution2d3x3Stride2x2BFloat16Test, - false, - DataLayout::NHWC); -ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Stride2x2BFloat16SmallValue, - Convolution2d3x3Stride2x2BFloat16SmallValueTest, - false, - DataLayout::NHWC); - // Convolution 3d - NDHWC ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution3d3x3x3Float32, SimpleConvolution3d3x3x3Float32Test, @@ -354,14 +320,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Nhwc, DepthwiseConvolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>, false, DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3BFloat16, - DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3NhwcBFloat16, - DepthwiseConvolution2d3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d3x3Dilation3x3Int8, DepthwiseConvolution2d3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, false, @@ -395,14 +353,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Nhwc, DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>, false, DataLayout::NHWC) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3BFloat16, - DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3NhwcBFloat16, - DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::BFloat16, DataType::BFloat16>, - false, - DataLayout::NHWC) ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2d2x3x3Dilation3x3Int8, DepthwiseConvolution2d2x3x3Dilation3x3Test<DataType::QAsymmS8, DataType::Signed32>, false, @@ -435,14 +385,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2, DepthwiseConvolution2dMult2Test<armnn::DataType::Float32, armnn::DataType::Float32>, false, armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult4BFloat16, - DepthwiseConvolution2dMult4Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>, - false, - armnn::DataLayout::NCHW) -ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dMult2BFloat16, - DepthwiseConvolution2dMult2Test<armnn::DataType::BFloat16, armnn::DataType::BFloat16>, - false, - armnn::DataLayout::NCHW) ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1, DepthwiseConvolution2dDepthMul1Test, @@ -864,7 +806,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterInt16, CopyViaSplitterInt16Test) // Concat ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatBFloat16, ConcatBFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatFloat16, ConcatFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatInt32, ConcatInt32Test) ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test) @@ -1063,91 +1004,78 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1DVectorInt32, Multiplicati ARMNN_AUTO_TEST_CASE_WITH_THF(Multiplication5d, Multiplication5dTest) // Batch Mat Mul -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleBFloat16, BatchMatMul2DSimpleTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleFloat32, BatchMatMul2DSimpleTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleFloat16, BatchMatMul2DSimpleTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQAsymmS8, BatchMatMul2DSimpleTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQAsymmU8, BatchMatMul2DSimpleTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DSimpleQASymmS16, BatchMatMul2DSimpleTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleBFloat16, BatchMatMul3DSimpleTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleFloat32, BatchMatMul3DSimpleTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleFloat16, BatchMatMul3DSimpleTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQAsymmS8, BatchMatMul3DSimpleTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQAsymmU8, BatchMatMul3DSimpleTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DSimpleQASymmS16, BatchMatMul3DSimpleTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleBFloat16, BatchMatMulNCHWSimpleTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleFloat32, BatchMatMulNCHWSimpleTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleFloat16, BatchMatMulNCHWSimpleTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQAsymmS8, BatchMatMulNCHWSimpleTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQAsymmU8, BatchMatMulNCHWSimpleTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNCHWSimpleQASymmS16, BatchMatMulNCHWSimpleTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleBFloat16, BatchMatMulNHWCSimpleTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleFloat32, BatchMatMulNHWCSimpleTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleFloat16, BatchMatMulNHWCSimpleTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQAsymmS8, BatchMatMulNHWCSimpleTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQAsymmU8, BatchMatMulNHWCSimpleTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCSimpleQASymmS16, BatchMatMulNHWCSimpleTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchBFloat16, BatchMatMul3DBatchTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchFloat32, BatchMatMul3DBatchTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchFloat16, BatchMatMul3DBatchTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQAsymmS8, BatchMatMul3DBatchTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQAsymmU8, BatchMatMul3DBatchTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBatchQASymmS16, BatchMatMul3DBatchTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastBFloat16, BatchMatMul3DBroadcastTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastFloat32, BatchMatMul3DBroadcastTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastFloat16, BatchMatMul3DBroadcastTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQAsymmS8, BatchMatMul3DBroadcastTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQAsymmU8, BatchMatMul3DBroadcastTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DBroadcastQASymmS16, BatchMatMul3DBroadcastTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastBFloat16, BatchMatMul3D2DBroadcastTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastFloat32, BatchMatMul3D2DBroadcastTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastFloat16, BatchMatMul3D2DBroadcastTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQAsymmS8, BatchMatMul3D2DBroadcastTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQAsymmU8, BatchMatMul3D2DBroadcastTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3D2DBroadcastQASymmSS16, BatchMatMul3D2DBroadcastTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCBFloat16, BatchMatMulNDHWCNHWCTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCFloat32, BatchMatMulNDHWCNHWCTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCFloat16, BatchMatMulNDHWCNHWCTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQAsymmS8, BatchMatMulNDHWCNHWCTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQAsymmU8, BatchMatMulNDHWCNHWCTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNDHWCNHWCQASymmSS16, BatchMatMulNDHWCNHWCTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyBFloat16, BatchMatMul2DTinyTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyFloat32, BatchMatMul2DTinyTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyFloat16, BatchMatMul2DTinyTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQAsymmS8, BatchMatMul2DTinyTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQAsymmU8, BatchMatMul2DTinyTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTinyQASymmS16, BatchMatMul2DTinyTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareBFloat16, BatchMatMul3DNonSquareTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareFloat32, BatchMatMul3DNonSquareTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareFloat16, BatchMatMul3DNonSquareTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQAsymmS8, BatchMatMul3DNonSquareTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQAsymmU8, BatchMatMul3DNonSquareTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul3DNonSquareQASymmS16, BatchMatMul3DNonSquareTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleBFloat16, BatchMatMul2DTranspSimpleTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleFloat32, BatchMatMul2DTranspSimpleTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleFloat16, BatchMatMul2DTranspSimpleTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQAsymmS8, BatchMatMul2DTranspSimpleTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQAsymmU8, BatchMatMul2DTranspSimpleTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DTranspSimpleQASymmS16,BatchMatMul2DTranspSimpleTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleBFloat16, BatchMatMul2DAdjointSimpleTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleFloat32, BatchMatMul2DAdjointSimpleTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleFloat16, BatchMatMul2DAdjointSimpleTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQAsymmS8, BatchMatMul2DAdjointSimpleTest<DataType::QAsymmS8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQAsymmU8, BatchMatMul2DAdjointSimpleTest<DataType::QAsymmU8>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMul2DAdjointSimpleQASymmS16,BatchMatMul2DAdjointSimpleTest<DataType::QSymmS16>); -ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsBFloat16, BatchMatMulNHWCParamsTest<DataType::BFloat16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsFloat32, BatchMatMulNHWCParamsTest<DataType::Float32>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsFloat16, BatchMatMulNHWCParamsTest<DataType::Float16>); ARMNN_AUTO_TEST_CASE_WITH_THF(BatchMatMulNHWCParamsQAsymmS8, BatchMatMulNHWCParamsTest<DataType::QAsymmS8>); @@ -1172,7 +1100,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Signed32, RankDimSize1Test<DataType::S ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS8, RankDimSize1Test<DataType::QSymmS8>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1BFloat16, RankDimSize1Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float16, RankDimSize2Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float32, RankDimSize2Test<DataType::Float32>) @@ -1181,7 +1108,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Signed32, RankDimSize2Test<DataType::S ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS8, RankDimSize2Test<DataType::QSymmS8>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2BFloat16, RankDimSize2Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float16, RankDimSize3Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float32, RankDimSize3Test<DataType::Float32>) @@ -1190,7 +1116,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Signed32, RankDimSize3Test<DataType::S ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS8, RankDimSize3Test<DataType::QSymmS8>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3BFloat16, RankDimSize3Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float16, RankDimSize4Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float32, RankDimSize4Test<DataType::Float32>) @@ -1199,7 +1124,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Signed32, RankDimSize4Test<DataType::S ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS8, RankDimSize4Test<DataType::QSymmS8>) ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4BFloat16, RankDimSize4Test<DataType::BFloat16>) // Resize Bilinear - NCHW ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinear, @@ -1650,11 +1574,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_3, LogSoftmaxTest3<DataType::Flo ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_4, LogSoftmaxTest4<DataType::Float16>) // Pad - Constant -ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162d, PadBFloat162dTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat162dCustomPadding, PadBFloat162dCustomPaddingTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat163d, PadBFloat163dTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadBFloat164d, PadBFloat164dTest) - ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322d, PadFloat322dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest) ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat323d, PadFloat323dTest) @@ -1692,8 +1611,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect3dInt8, PadReflect3dInt8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dFloat32, PadSymmetric4dFloat32Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dFloat32, PadReflect4dFloat32Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dBFloat16, PadSymmetric4dBFloat16Test) -ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dBFloat16, PadReflect4dBFloat16Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dUint8, PadSymmetric4dUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadReflect4dUint8, PadReflect4dUint8Test) ARMNN_AUTO_TEST_CASE_WITH_THF(PadSymmetric4dInt8, PadSymmetric4dInt8Test) @@ -1878,17 +1795,10 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(CastUIntToFloat, CastUInt8ToFloat2dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastInt8ToUInt, CastInt8ToUInt82dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastInt8AsymmToUInt, CastInt8AsymmToUInt82dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloat16ToFloat32, CastFloat16ToFloat322dTest) -ARMNN_AUTO_TEST_CASE_WITH_THF(CastBFloat16ToFloat32, CastBFloat16ToFloat322dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToFloat16, CastFloat32ToFloat162dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToIn8, CastFloat32ToInt82dTest) ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToUInt8, CastFloat32ToUInt82dTest) -// Convert from BFloat16 to Float32 -ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test) - -// Convert from Float32 to BFloat16 -ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertFp32ToBf16, ConvertFp32ToBf16Test) - // Convert from Float16 to Float32 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test) // Convert from Float32 to Float16 @@ -2139,7 +2049,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1Signed32, ShapeDimSize1Test<DataType: ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1QSymmS16, ShapeDimSize1Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1QSymmS8, ShapeDimSize1Test<DataType::QSymmS8>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1QAsymmS8, ShapeDimSize1Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize1BFloat16, ShapeDimSize1Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2Float16, ShapeDimSize2Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2Float32, ShapeDimSize2Test<DataType::Float32>) @@ -2148,7 +2057,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2Signed32, ShapeDimSize2Test<DataType: ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2QSymmS16, ShapeDimSize2Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2QSymmS8, ShapeDimSize2Test<DataType::QSymmS8>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2QAsymmS8, ShapeDimSize2Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize2BFloat16, ShapeDimSize2Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3Float16, ShapeDimSize3Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3Float32, ShapeDimSize3Test<DataType::Float32>) @@ -2157,7 +2065,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3Signed32, ShapeDimSize3Test<DataType: ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3QSymmS16, ShapeDimSize3Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3QSymmS8, ShapeDimSize3Test<DataType::QSymmS8>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3QAsymmS8, ShapeDimSize3Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize3BFloat16, ShapeDimSize3Test<DataType::BFloat16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4Float16, ShapeDimSize4Test<DataType::Float16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4Float32, ShapeDimSize4Test<DataType::Float32>) @@ -2166,7 +2073,6 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4Signed32, ShapeDimSize4Test<DataType: ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4QSymmS16, ShapeDimSize4Test<DataType::QSymmS16>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4QSymmS8, ShapeDimSize4Test<DataType::QSymmS8>) ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4QAsymmS8, ShapeDimSize4Test<DataType::QAsymmS8>) -ARMNN_AUTO_TEST_CASE_WITH_THF(ShapeDimSize4BFloat16, ShapeDimSize4Test<DataType::BFloat16>) // SpaceToDepth ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchwAsymmQ8, SpaceToDepthNchwAsymmQ8Test) diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index e09371fd96..2d27951b73 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -260,44 +260,6 @@ private: }; -class BFloat16Decoder : public TypedIterator<const BFloat16, Decoder<float>> -{ -public: - BFloat16Decoder(const BFloat16* data) - : TypedIterator(data) {} - - BFloat16Decoder() - : BFloat16Decoder(nullptr) {} - - float Get() const override - { - float val = 0.f; - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val); - return val; - } - std::vector<float> DecodeTensor (const TensorShape& tensorShape, - const bool isDepthwise) override - { - IgnoreUnused(isDepthwise); - - const unsigned int size = tensorShape.GetNumElements(); - std::vector<float> decodedTensor; - decodedTensor.reserve(size); - - for (uint32_t i = 0; i < size; ++i) - { - this->operator[](i); - - float val = 0.f; - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val); - decodedTensor.emplace_back(val); - } - - return decodedTensor; - } - -}; - class Float16Decoder : public TypedIterator<const Half, Decoder<float>> { public: @@ -624,28 +586,6 @@ private: const int32_t m_Offset; }; -class BFloat16Encoder : public TypedIterator<armnn::BFloat16, Encoder<float>> -{ -public: - BFloat16Encoder(armnn::BFloat16* data) - : TypedIterator(data) {} - - BFloat16Encoder() - : BFloat16Encoder(nullptr) {} - - void Set(float right) override - { - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(&right, 1, m_Iterator); - } - - float Get() const override - { - float val = 0.f; - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(m_Iterator, 1, &val); - return val; - } -}; - class Float16Encoder : public TypedIterator<Half, Encoder<float>> { public: diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index b8835e3cdb..de6c042959 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -88,12 +88,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefConcatWorkload.hpp RefConstantWorkload.cpp RefConstantWorkload.hpp - RefConvertBf16ToFp32Workload.cpp - RefConvertBf16ToFp32Workload.hpp RefConvertFp16ToFp32Workload.cpp RefConvertFp16ToFp32Workload.hpp - RefConvertFp32ToBf16Workload.cpp - RefConvertFp32ToBf16Workload.hpp RefConvertFp32ToFp16Workload.cpp RefConvertFp32ToFp16Workload.hpp RefConvolution2dWorkload.cpp diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp index c2a456bfce..54e7008d50 100644 --- a/src/backends/reference/workloads/Decoders.hpp +++ b/src/backends/reference/workloads/Decoders.hpp @@ -88,10 +88,6 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const info.GetQuantizationScale(), info.GetQuantizationOffset()); } - case DataType::BFloat16: - { - return std::make_unique<BFloat16Decoder>(static_cast<const BFloat16*>(data)); - } case DataType::Float16: { return std::make_unique<Float16Decoder>(static_cast<const Half*>(data)); diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp index a7be9e172b..d6d611494d 100644 --- a/src/backends/reference/workloads/Encoders.hpp +++ b/src/backends/reference/workloads/Encoders.hpp @@ -65,10 +65,6 @@ inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void* { return std::make_unique<Int32Encoder>(static_cast<int32_t*>(data)); } - case armnn::DataType::BFloat16: - { - return std::make_unique<BFloat16Encoder>(static_cast<armnn::BFloat16*>(data)); - } case armnn::DataType::Float16: { return std::make_unique<Float16Encoder>(static_cast<Half*>(data)); diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp deleted file mode 100644 index 2fe2eafb9b..0000000000 --- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefConvertBf16ToFp32Workload.hpp" -#include "RefWorkloadUtils.hpp" - -#include <armnnUtils/FloatingPointConverter.hpp> - -#include <BFloat16.hpp> - -namespace armnn -{ - -void RefConvertBf16ToFp32Workload::Execute() const -{ - Execute(m_Data.m_Inputs, m_Data.m_Outputs); -} - -void RefConvertBf16ToFp32Workload::ExecuteAsync(ExecutionData& executionData) -{ - WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data); - Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); -} - -void RefConvertBf16ToFp32Workload::Execute(std::vector<ITensorHandle*> inputs, - std::vector<ITensorHandle*> outputs) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertBf16ToFp32Workload_Execute"); - - const BFloat16* const input = reinterpret_cast<const BFloat16*>(inputs[0]->Map()); - float* const output = reinterpret_cast<float*>(outputs[0]->Map()); - - unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements(); - armnnUtils::FloatingPointConverter::ConvertBFloat16ToFloat32(input, numElements, output); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp b/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp deleted file mode 100644 index 24dcb0f682..0000000000 --- a/src/backends/reference/workloads/RefConvertBf16ToFp32Workload.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "RefBaseWorkload.hpp" -#include <armnn/backends/WorkloadData.hpp> - -namespace armnn -{ - -class RefConvertBf16ToFp32Workload : public BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor> -{ -public: - using BFloat16ToFloat32Workload<ConvertBf16ToFp32QueueDescriptor>::BFloat16ToFloat32Workload; - void Execute() const override; - void ExecuteAsync(ExecutionData& executionData) override; -private: - void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp deleted file mode 100644 index 71ee95b2aa..0000000000 --- a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "RefConvertFp32ToBf16Workload.hpp" -#include "RefWorkloadUtils.hpp" - -#include <armnnUtils/FloatingPointConverter.hpp> - -#include <BFloat16.hpp> - -namespace armnn -{ - -void RefConvertFp32ToBf16Workload::Execute() const -{ - Execute(m_Data.m_Inputs, m_Data.m_Outputs); -} - -void RefConvertFp32ToBf16Workload::ExecuteAsync(ExecutionData& executionData) -{ - WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data); - Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); -} - -void RefConvertFp32ToBf16Workload::Execute(std::vector<ITensorHandle*> inputs, - std::vector<ITensorHandle*> outputs) const -{ - ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp32ToBf16Workload_Execute"); - - const float* const input = reinterpret_cast<const float*>(inputs[0]->Map()); - BFloat16* const output = reinterpret_cast<BFloat16*>(outputs[0]->Map()); - - unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements(); - armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(input, numElements, output); -} - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp b/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp deleted file mode 100644 index c1e57ec37e..0000000000 --- a/src/backends/reference/workloads/RefConvertFp32ToBf16Workload.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// -// Copyright © 2022 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "RefBaseWorkload.hpp" -#include <armnn/backends/WorkloadData.hpp> - -namespace armnn -{ - -class RefConvertFp32ToBf16Workload : public Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor> -{ -public: - using Float32ToBFloat16Workload<ConvertFp32ToBf16QueueDescriptor>::Float32ToBFloat16Workload; - void Execute() const override; - void ExecuteAsync(ExecutionData& executionData) override; -private: - void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const; -}; - -} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index e049d8db2c..afed71bfff 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -17,9 +17,7 @@ #include "RefConvolution3dWorkload.hpp" #include "RefConstantWorkload.hpp" #include "RefConcatWorkload.hpp" -#include "RefConvertBf16ToFp32Workload.hpp" #include "RefConvertFp16ToFp32Workload.hpp" -#include "RefConvertFp32ToBf16Workload.hpp" #include "RefConvertFp32ToFp16Workload.hpp" #include "RefDebugWorkload.hpp" #include "RefDepthToSpaceWorkload.hpp" |