diff options
author | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2020-03-13 10:26:05 +0000 |
---|---|---|
committer | Jim Flynn <jim.flynn@arm.com> | 2020-03-17 20:56:46 +0000 |
commit | 7ddbbae7ad3e0000d8e6a76458cac68254dc8048 (patch) | |
tree | 43f6240df090b084528034358982e8f09706ef95 /src/backends/backendsCommon | |
parent | f4a953f75b751452ae9303abc8565d310c55bfff (diff) | |
download | armnn-7ddbbae7ad3e0000d8e6a76458cac68254dc8048.tar.gz |
IVGCVSW-4515 Add ConvertBf16ToFp32Layer and Ref workload support
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Ida6d7e1d2c9abe0618f8b711bab9d62c011090d6
Diffstat (limited to 'src/backends/backendsCommon')
13 files changed, 139 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index e8ef46ecd8..1ac08afd7a 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -111,6 +111,13 @@ bool LayerSupportBase::IsConstantSupported(const TensorInfo& /*output*/, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsConvertBf16ToFp32Supported(const TensorInfo& /*input*/, + const TensorInfo& /*output*/, + Optional<std::string&> reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& /*input*/, const TensorInfo& /*output*/, Optional<std::string&> reasonIfUnsupported) const diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 888bef5f89..59e8b969bd 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -61,6 +61,10 @@ public: bool IsConstantSupported(const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsConvertBf16ToFp32Supported(const TensorInfo& input, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsConvertFp16ToFp32Supported(const TensorInfo& input, const TensorInfo& output, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp index 8ec09f98b6..d7434c0d01 100644 --- a/src/backends/backendsCommon/Workload.hpp +++ b/src/backends/backendsCommon/Workload.hpp @@ -177,6 +177,11 @@ using BaseUint8ComparisonWorkload = MultiTypedWorkload<QueueDescriptor, armnn::DataType::Boolean>; template <typename QueueDescriptor> +using BFloat16ToFloat32Workload = MultiTypedWorkload<QueueDescriptor, + armnn::DataType::BFloat16, + armnn::DataType::Float32>; + +template <typename QueueDescriptor> using Float16ToFloat32Workload = MultiTypedWorkload<QueueDescriptor, armnn::DataType::Float16, armnn::DataType::Float32>; diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index b501b3dbec..81aefa94e7 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -2016,6 +2016,29 @@ void LstmQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const } } +void ConvertBf16ToFp32QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"ConvertBf16ToFp32QueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + if (inputTensorInfo.GetDataType() != DataType::BFloat16) + { + throw InvalidArgumentException(descriptorName + ": Input tensor type must be BFloat16."); + } + + if (outputTensorInfo.GetDataType() != DataType::Float32) + { + throw InvalidArgumentException(descriptorName + ": Output tensor type must be Float32."); + } + + ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); +} + void ConvertFp32ToFp16QueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { const std::string descriptorName{"ConvertFp32ToFp16QueueDescriptor"}; diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 06289fa039..9c392d3219 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -407,6 +407,11 @@ struct LstmQueueDescriptor : QueueDescriptorWithParameters<LstmDescriptor> void Validate(const WorkloadInfo& workloadInfo) const; }; +struct ConvertBf16ToFp32QueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + struct ConvertFp16ToFp32QueueDescriptor : QueueDescriptor { void Validate(const WorkloadInfo& workloadInfo) const; diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 2e1ce0a674..d932eef49f 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -159,6 +159,13 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, result = layerSupportObject->IsConstantSupported(OverrideDataType(output, dataType), reason); break; } + case LayerType::ConvertBf16ToFp32: + { + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = layerSupportObject->IsConvertBf16ToFp32Supported(input, output, reason); + break; + } case LayerType::ConvertFp16ToFp32: { const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -1144,6 +1151,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueD return std::unique_ptr<IWorkload>(); } +std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& /*desc*/, + const WorkloadInfo& /*info*/) const +{ + return std::unique_ptr<IWorkload>(); +} + std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/, const WorkloadInfo& /*info*/) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index dae58b6d93..8c22452f2e 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -79,6 +79,9 @@ public: virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr<IWorkload> CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor& descriptor, + const WorkloadInfo& info) const; + virtual std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 56a21b386c..22de3db77c 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -44,6 +44,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/ConcatTestImpl.cpp \ test/layerTests/ConstantTestImpl.cpp \ test/layerTests/Conv2dTestImpl.cpp \ + test/layerTests/ConvertBf16ToFp32TestImpl.cpp \ test/layerTests/ConvertFp16ToFp32TestImpl.cpp \ test/layerTests/ConvertFp32ToFp16TestImpl.cpp \ test/layerTests/DebugTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 0376e3e5e6..dc8031f6b4 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -68,6 +68,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/ConstantTestImpl.hpp layerTests/Conv2dTestImpl.cpp layerTests/Conv2dTestImpl.hpp + layerTests/ConvertBf16ToFp32TestImpl.cpp + layerTests/ConvertBf16ToFp32TestImpl.hpp layerTests/ConvertFp16ToFp32TestImpl.cpp layerTests/ConvertFp16ToFp32TestImpl.hpp layerTests/ConvertFp32ToFp16TestImpl.cpp diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 15608ccdd8..a070ac05e8 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -443,6 +443,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Concat) DECLARE_LAYER_POLICY_1_PARAM(Constant) +DECLARE_LAYER_POLICY_1_PARAM(ConvertBf16ToFp32) + DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32) DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 62a66df166..1c6277a333 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -13,6 +13,7 @@ #include <backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp> #include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp> #include <backendsCommon/test/layerTests/ConcatTestImpl.hpp> +#include <backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp> #include <backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.hpp> #include <backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.hpp> #include <backendsCommon/test/layerTests/Conv2dTestImpl.hpp> diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp new file mode 100644 index 0000000000..0dc3048ee3 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.cpp @@ -0,0 +1,56 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ConvertBf16ToFp32TestImpl.hpp" + +#include <backendsCommon/test/TensorCopyUtils.hpp> +#include <backendsCommon/test/WorkloadTestUtils.hpp> + +#include <test/TensorHelpers.hpp> + +LayerTestResult<float, 4> ConvertBf16ToFp32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + IgnoreUnused(memoryManager); + + const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16); + const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); + + std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>( + { + -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f + }, + 1.0f, 0); + + auto input = MakeTensor<armnn::BFloat16, 4>(inputTensorInfo, std::vector<armnn::BFloat16>(inputValues)); + + LayerTestResult<float, 4> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, + { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, + 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ConvertBf16ToFp32QueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertBf16ToFp32(data, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->Execute(); + + CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); + + return ret; +} diff --git a/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp new file mode 100644 index 0000000000..717ec6a121 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ConvertBf16ToFp32TestImpl.hpp @@ -0,0 +1,17 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include <BFloat16.hpp> + +#include <armnn/backends/IBackendInternal.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +LayerTestResult<float, 4> ConvertBf16ToFp32Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); |