diff options
author | Ryan OShea <ryan.oshea3@arm.com> | 2022-11-07 16:20:48 +0000 |
---|---|---|
committer | ryan.oshea3 <ryan.oshea3@arm.com> | 2022-11-16 15:22:50 +0000 |
commit | 31441595009182c985dacbedc70c41ee6664d070 (patch) | |
tree | 248a85295aeff4022c9b395fc97748b0a0aa6b35 /src/backends/backendsCommon/test | |
parent | bd18eab07a8f30492de1e462b1815189014cb8d5 (diff) | |
download | armnn-31441595009182c985dacbedc70c41ee6664d070.tar.gz |
IVGCVSW-7214 Disable BF16-Turbo-Mode and remove conversion layers
- Remove Bf16ToFp32 Conversion Layer
- Remove Fp32ToBf16 Conversion Layer
- Remove B16 Conversion tests
* Throw exception if m_ReduceFp32ToBf16 optimzer option is set to true
* Provide comments to enable fast math in order to use bf16
* Update docs to inform users to enable fast math for bf16
Execute Network Changes
* Require bf16_turbo_mode to also have fast_math_enabled set to true
- Remove setting m_ReduceFp32ToBf16 optimizer option
Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: Ibaa6da9d29c96a1ce32ff5196b0847fde9f04a1c
Diffstat (limited to 'src/backends/backendsCommon/test')
7 files changed, 0 insertions, 192 deletions
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 5e283990f7..232226b5df 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -83,12 +83,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/Conv2dTestImpl.hpp layerTests/Conv3dTestImpl.cpp layerTests/Conv3dTestImpl.hpp - layerTests/ConvertBf16ToFp32TestImpl.cpp - layerTests/ConvertBf16ToFp32TestImpl.hpp layerTests/ConvertFp16ToFp32TestImpl.cpp layerTests/ConvertFp16ToFp32TestImpl.hpp - layerTests/ConvertFp32ToBf16TestImpl.cpp - layerTests/ConvertFp32ToBf16TestImpl.hpp layerTests/ConvertFp32ToFp16TestImpl.cpp layerTests/ConvertFp32ToFp16TestImpl.hpp layerTests/DebugTestImpl.cpp diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 5fdcd9c57a..18f11a542e 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -630,12 +630,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Concat) DECLARE_LAYER_POLICY_1_PARAM(Constant) -DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32) - DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32) -DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToBf16) - DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16) DECLARE_LAYER_POLICY_2_PARAM(Convolution2d) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 25435b24ec..00bfea5452 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -16,9 +16,7 @@ #include <backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp> #include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp> #include <backendsCommon/test/layerTests/ConcatTestImpl.hpp> -#include <backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp> #include <backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp> -#include <backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp> #include <backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp> #include <backendsCommon/test/layerTests/Conv2dTestImpl.hpp> #include <backendsCommon/test/layerTests/Conv3dTestImpl.hpp> diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp deleted file mode 100644 index 0dd8b598ac..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertBf16ToFp32TestImpl.hpp" - -#include <armnnTestUtils/TensorCopyUtils.hpp> -#include <armnnTestUtils/WorkloadTestUtils.hpp> - -#include <armnnTestUtils/TensorHelpers.hpp> - -LayerTestResult<float, 4> ConvertBf16ToFp32Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory) -{ - IgnoreUnused(memoryManager); - - const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16); - const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); - - std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>( - { - -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f - }, - 1.0f, 0); - - std::vector<float> actualOutput(outputTensorInfo.GetNumElements()); - std::vector<float> expectedOutput = - { - -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f - }; - - std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ConvertBf16ToFp32QueueDescriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertBf16ToFp32, - data, - info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), inputValues.data()); - - workload->Execute(); - - CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - - return LayerTestResult<float, 4>(actualOutput, - expectedOutput, - outputHandle->GetShape(), - outputTensorInfo.GetShape()); -} diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp deleted file mode 100644 index bcb0d6f124..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <armnnTestUtils/LayerTestResult.hpp> - -#include <BFloat16.hpp> - -#include <armnn/backends/IBackendInternal.hpp> -#include <armnn/backends/WorkloadFactory.hpp> - -LayerTestResult<float, 4> ConvertBf16ToFp32Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory); diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp deleted file mode 100644 index 5ee8f1dd9a..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "ConvertFp32ToBf16TestImpl.hpp" - -#include <armnnTestUtils/TensorCopyUtils.hpp> -#include <armnnTestUtils/WorkloadTestUtils.hpp> - -#include <armnnTestUtils/TensorHelpers.hpp> - -LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory) -{ - IgnoreUnused(memoryManager); - - const armnn::TensorInfo inputTensorInfo({1, 2, 4, 3}, armnn::DataType::Float32); - const armnn::TensorInfo outputTensorInfo({1, 2, 4, 3}, armnn::DataType::BFloat16); - - std::vector<float> input = - { - -37.5f, -15.2f, -8.76f, - -2.0f, -1.5f, -1.3f, - -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, - 1.3f, 1.5f, 2.0f, - 8.76f, 15.2f, 37.5f, - 3.8f, // 0x40733333 Round down - 3.1055E+29f, // 0x707ADC3C Round up - 9.149516E-10f, // 0x307B7FFF Round down - -3.8f, // 0xC0733333 Round down - -3.1055E+29f, // 0xF07ADC3C Round up - -9.149516E-10f // 0xB07B7FFF Round down - }; - - std::vector<armnn::BFloat16> expectedOutput = armnnUtils::QuantizedVector<armnn::BFloat16>( - { - -37.5f, -15.2f, -8.76f, - -2.0f, -1.5f, -1.3f, - -0.5f, -0.4f, 0.0f, - 1.0f, 0.4f, 0.5f, - 1.3f, 1.5f, 2.0f, - 8.76f, 15.2f, 37.5f, - 3.796875f, // 0x4073 - 3.1072295E29f, // 0x707B - 9.131327E-10f, // 0x307B - -3.796875f, // 0xC073 - -3.1072295E29f, // 0xF07B - -9.131327E-10f // 0xB07B - }, - 1.0f, 0); - - std::vector<armnn::BFloat16> actualOutput(outputTensorInfo.GetNumElements()); - - std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); - - armnn::ConvertFp32ToBf16QueueDescriptor data; - armnn::WorkloadInfo info; - AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); - AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); - - std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp32ToBf16, - data, - info); - - inputHandle->Allocate(); - outputHandle->Allocate(); - - CopyDataToITensorHandle(inputHandle.get(), input.data()); - - workload->Execute(); - - CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); - - return LayerTestResult<armnn::BFloat16, 4>(actualOutput, - expectedOutput, - outputHandle->GetShape(), - outputTensorInfo.GetShape()); - -} diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp deleted file mode 100644 index c2286d9c41..0000000000 --- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.hpp +++ /dev/null @@ -1,18 +0,0 @@ -// -// Copyright © 2020 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include <armnnTestUtils/LayerTestResult.hpp> - -#include <BFloat16.hpp> - -#include <armnn/backends/IBackendInternal.hpp> -#include <armnn/backends/WorkloadFactory.hpp> - -LayerTestResult<armnn::BFloat16, 4> ConvertFp32ToBf16Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::ITensorHandleFactory& tensorHandleFactory); |