diff options
Diffstat (limited to 'src/backends/backendsCommon')
12 files changed, 0 insertions, 301 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 001037908d..26137f51b0 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -164,13 +164,6 @@ bool LayerSupportBase::IsConstantSupported(const TensorInfo&, // output return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsConvertBf16ToFp32Supported(const TensorInfo&, // input - const TensorInfo&, // output - Optional<std::string&> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input const TensorInfo&, // output Optional<std::string&> reasonIfUnsupported) const @@ -178,14 +171,6 @@ bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo&, // input return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } -bool LayerSupportBase::IsConvertFp32ToBf16Supported(const TensorInfo&, // input - const TensorInfo&, // output - Optional<std::string&> reasonIfUnsupported) const -{ - return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); -} - - bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo&, // input const TensorInfo&, // output Optional<std::string&> reasonIfUnsupported) const diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index b18af35967..acf24a2d3a 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -83,19 +83,11 @@ public: bool IsConstantSupported(const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertBf16ToFp32Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "23.08") bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - bool IsConvertFp32ToBf16Supported(const TensorInfo& input, - const TensorInfo& output, - Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; - ARMNN_DEPRECATED_MSG_REMOVAL_DATE("This method is deprecated. Use IsLayerSupported instead.", "23.08") bool IsConvertFp32ToFp16Supported( const TensorInfo& input, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 753fe06edb..62dfc6a38b 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2222,52 +2222,6 @@ void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } -void ConvertBf16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const -{ - const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"}; - - ValidateNumInputs(workloadInfo, descriptorName, 1); - ValidateNumOutputs(workloadInfo, descriptorName, 1); - - const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; - const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; - - if (inputTensorInfo.GetDataType() != DataType::BFloat16) - { - throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16."); - } - - if (outputTensorInfo.GetDataType() != DataType::Float32) - { - throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32."); - } - - ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); -} - -void ConvertFp32ToBf16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const -{ - const std::string descriptorName{"ConvertFp32ToBf16QueueDescriptor"}; - - ValidateNumInputs(workloadInfo, descriptorName, 1); - ValidateNumOutputs(workloadInfo, descriptorName, 1); - - const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; - const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; - - if (inputTensorInfo.GetDataType() != DataType::Float32) - { - throw InvalidArgumentException(descriptorName + ": Input tensor type must be Float32."); - } - - if (outputTensorInfo.GetDataType() != DataType::BFloat16) - { - throw InvalidArgumentException(descriptorName + ": Output tensor type must be BFloat16."); - } - - ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); -} - void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"}; diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 665ab3f86c..1283f67660 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -227,13 +227,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, result = layerSupportObject.IsConstantSupported(OverrideDataType(output, dataType), reason); break; } - case LayerType::ConvertBf16ToFp32: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject.IsConvertBf16ToFp32Supported(input, output, reason); - break; - } case LayerType::ConvertFp16ToFp32: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -241,13 +234,6 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, result = layerSupportObject.IsConvertFp16ToFp32Supported(input, output, reason); break; } - case LayerType::ConvertFp32ToBf16: - { - const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); - const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); - result = layerSupportObject.IsConvertFp32ToBf16Supported(input, output, reason); - break; - } case LayerType::ConvertFp32ToFp16: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -1630,24 +1616,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateWorkload(LayerType type, auto constantQueueDescriptor = PolymorphicDowncast<const ConstantQueueDescriptor*>(&descriptor); return CreateConstant(*constantQueueDescriptor, info); } - case LayerType::ConvertBf16ToFp32 : - { - auto convertBf16ToFp32QueueDescriptor - = PolymorphicDowncast<const ConvertBf16ToFp32QueueDescriptor*>(&descriptor); - return CreateConvertBf16ToFp32(*convertBf16ToFp32QueueDescriptor, info); - } case LayerType::ConvertFp16ToFp32: { auto convertFp16ToFp32QueueDescriptor = PolymorphicDowncast<const ConvertFp16ToFp32QueueDescriptor*>(&descriptor); return CreateConvertFp16ToFp32(*convertFp16ToFp32QueueDescriptor, info); } - case LayerType::ConvertFp32ToBf16: - { - auto convertFp32ToBf16QueueDescriptor - = PolymorphicDowncast<const ConvertFp32ToBf16QueueDescriptor*>(&descriptor); - return CreateConvertFp32ToBf16(*convertFp32ToBf16QueueDescriptor, info); - } case LayerType::ConvertFp32ToFp16: { auto convertFp32ToFp16QueueDescriptor @@ -1992,24 +1966,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueD return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { return std::unique_ptr<IWorkload>(); } -std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToBf16(const ConvertFp32ToBf16QueueDescriptor& /*desc*/, - const WorkloadInfo& /*info*/) const -{ - return std::unique_ptr<IWorkload>(); -} - std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 007cca57fa..3545331c8f 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -55,9 +55,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/ConstantTestImpl.cpp \ test/layerTests/Conv2dTestImpl.cpp \ test/layerTests/Conv3dTestImpl.cpp \ - test/layerTests/ConvertBf16ToFp32TestImpl.cpp \ test/layerTests/ConvertFp16ToFp32TestImpl.cpp \ - test/layerTests/ConvertFp32ToBf16TestImpl.cpp \ test/layerTests/ConvertFp32ToFp16TestImpl.cpp \ test/layerTests/DebugTestImpl.cpp \ test/layerTests/DepthToSpaceTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 5e283990f7..232226b5df 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -83,12 +83,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/Conv2dTestImpl.hpp layerTests/Conv3dTestImpl.cpp layerTests/Conv3dTestImpl.hpp - layerTests/ConvertBf16ToFp32TestImpl.cpp - layerTests/ConvertBf16ToFp32TestImpl.hpp layerTests/ConvertFp16ToFp32TestImpl.cpp layerTests/ConvertFp16ToFp32TestImpl.hpp - layerTests/ConvertFp32ToBf16TestImpl.cpp - layerTests/ConvertFp32ToBf16TestImpl.hpp layerTests/ConvertFp32ToFp16TestImpl.cpp layerTests/ConvertFp32ToFp16TestImpl.hpp layerTests/DebugTestImpl.cpp diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 5fdcd9c57a..18f11a542e 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -630,12 +630,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Concat) DECLARE_LAYER_POLICY_1_PARAM(Constant) -DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32) - DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32) -DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16) - DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16) DECLARE_LAYER_POLICY_2_PARAM(Convolution2d) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 25435b24ec..00bfea5452 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -16,9 +16,7 @@ #include <backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp> #include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp> #include <backendsCommon/test/layerTests/ConcatTestImpl.hpp> -#include <backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp> #include <backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp> -#include <backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp> #include <backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp> #include <backendsCommon/test/layerTests/Conv2dTestImpl.hpp> #include <backendsCommon/test/layerTests/Conv3dTestImpl.hpp> diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp deleted file mode 100644 index 0dd8b598ac..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertBf16ToFp32TestImpl.hpp" - -#include <armnnTestUtils/TensorCopyUtils.hpp> -#include <armnnTestUtils/WorkloadTestUtils.hpp> - -#include <armnnTestUtils/TensorHelpers.hpp> - -LayerTestResult<float, 4> ConvertBf16ToFp32Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory) -{ - IgnoreUnused(memoryManager); - - const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16); - const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); - - std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>( - { - -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f - }, - 1.0f, 0); - - std::vector<float> actualOutput(outputTensorInfo.GetNumElements()); - std::vector<float> expectedOutput = - { - -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f - }; - - std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ConvertBf16ToFp32QueueDescriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertBf16ToFp32, - data, - info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); - - workload->Execute(); - - CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - - return LayerTestResult<float, 4>(actualOutput, - expectedOutput, - outputHandle->GetShape(), - outputTensorInfo.GetShape()); -} diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp deleted file mode 100644 index bcb0d6f124..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <armnnTestUtils/LayerTestResult.hpp> - -#include <BFloat16.hpp> - -#include <armnn/backends/IBackendInternal.hpp> -#include <armnn/backends/WorkloadFactory.hpp> - -LayerTestResult<float, 4> ConvertBf16ToFp32Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory); diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp deleted file mode 100644 index 5ee8f1dd9a..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertFp32ToBf16TestImpl.hpp" - -#include <armnnTestUtils/TensorCopyUtils.hpp> -#include <armnnTestUtils/WorkloadTestUtils.hpp> - -#include <armnnTestUtils/TensorHelpers.hpp> - -LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory) -{ - IgnoreUnused(memoryManager); - - const armnn::TensorInfo inputTensorInfo({1, 2, 4, 3}, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo({1, 2, 4, 3}, armnn::DataType::BFloat16); - - std::vector<float> input = - { - -37.5f, -15.2f, -8.76f, - -2.0f, -1.5f, -1.3f, - -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, - 1.3f, 1.5f, 2.0f, - 8.76f, 15.2f, 37.5f, - 3.8f, // 0x40733333 Round down - 3.1055E+29f, // 0x707ADC3C Round up - 9.149516E-10f, // 0x307B7FFF Round down - -3.8f, // 0xC0733333 Round down - -3.1055E+29f, // 0xF07ADC3C Round up - -9.149516E-10f // 0xB07B7FFF Round down - }; - - std::vector<armnn::BFloat16> expectedOutput = armnnUtils::QuantizedVector<armnn::BFloat16>( - { - -37.5f, -15.2f, -8.76f, - -2.0f, -1.5f, -1.3f, - -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, - 1.3f, 1.5f, 2.0f, - 8.76f, 15.2f, 37.5f, - 3.796875f, // 0x4073 - 3.1072295E29f, // 0x707B - 9.131327E-10f, // 0x307B - -3.796875f, // 0xC073 - -3.1072295E29f, // 0xF07B - -9.131327E-10f // 0xB07B - }, - 1.0f, 0); - - std::vector<armnn::BFloat16> actualOutput(outputTensorInfo.GetNumElements()); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ConvertFp32ToBf16QueueDescriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToBf16, - data, - info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), input.data()); - - workload->Execute(); - - CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - - return LayerTestResult<armnn::BFloat16, 4>(actualOutput, - expectedOutput, - outputHandle->GetShape(), - outputTensorInfo.GetShape()); - -} diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp deleted file mode 100644 index c2286d9c41..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <armnnTestUtils/LayerTestResult.hpp> - -#include <BFloat16.hpp> - -#include <armnn/backends/IBackendInternal.hpp> -#include <armnn/backends/WorkloadFactory.hpp> - -LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory); |