From f71079328ae72a65c91e410b2bd35eabb67cb6d1 Mon Sep 17 00:00:00 2001 From: Jan Eilers Date: Fri, 1 Nov 2019 11:09:36 +0000 Subject: Add fp16 support for dequantize * Changed RefDequantizeWorkload to use Encoder/Decoder * Added related unit tests for Cl, Neon and Ref Signed-off-by: Jan Eilers Change-Id: Ic2fd4103090dd2127c6859b49305736f7b2dfb05 --- src/backends/backendsCommon/WorkloadFactory.cpp | 2 +- .../test/layerTests/DequantizeTestImpl.cpp | 72 +++++++++++++--------- .../test/layerTests/DequantizeTestImpl.hpp | 10 +++ src/backends/cl/test/ClLayerTests.cpp | 6 +- src/backends/neon/test/NeonLayerTests.cpp | 2 + src/backends/reference/RefLayerSupport.cpp | 5 +- src/backends/reference/backend.mk | 1 + src/backends/reference/test/RefLayerTests.cpp | 2 + src/backends/reference/workloads/CMakeLists.txt | 2 + src/backends/reference/workloads/Dequantize.cpp | 29 +++++++++ src/backends/reference/workloads/Dequantize.hpp | 20 ++++++ .../reference/workloads/RefDequantizeWorkload.cpp | 20 +++--- 12 files changed, 124 insertions(+), 47 deletions(-) create mode 100644 src/backends/reference/workloads/Dequantize.cpp create mode 100644 src/backends/reference/workloads/Dequantize.hpp diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index b4b4ffca30..4a7f007c2e 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -266,7 +266,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); result = layerSupportObject->IsDequantizeSupported(OverrideDataType(input, dataType), - OverrideDataType(output, DataType::Float32), + output, reason); break; } diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp index 42673d5b99..6a3e852ed2 100644 --- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp @@ -17,20 +17,20 @@ namespace { -template -LayerTestResult DequantizeTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo& inputTensorInfo, - const armnn::TensorInfo& outputTensorInfo, - const std::vector& inputData, - const std::vector& expectedOutputData, - armnn::DequantizeQueueDescriptor descriptor) +template +LayerTestResult DequantizeTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector& inputData, + const std::vector& expectedOutputData, + armnn::DequantizeQueueDescriptor descriptor) { boost::multi_array input = MakeTensor(inputTensorInfo, inputData); - LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutputData); + LayerTestResult ret(outputTensorInfo); + ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -53,8 +53,10 @@ LayerTestResult DequantizeTestImpl( return ret; } -template -LayerTestResult DequantizeSimpleTest( +template > +LayerTestResult DequantizeSimpleTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -63,7 +65,7 @@ LayerTestResult DequantizeSimpleTest( armnn::DequantizeQueueDescriptor desc; const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, ArmnnInputType, 0.5f, 0); - const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType); std::vector inputData = std::vector( { @@ -73,21 +75,19 @@ LayerTestResult DequantizeSimpleTest( 20, 22, 24, }); - std::vector expectedOutputData = std::vector( + std::vector expectedOutputData; + for (OutType i = OutType(1); i <= OutType(12); ++i) { - 1.0f, 2.0f, 3.0f, - 4.0f, 5.0f, 6.0f, - 7.0f, 8.0f, 9.0f, - 10.0f, 11.0f, 12.0f, - }); - - return DequantizeTestImpl(workloadFactory, - memoryManager, - inputTensorInfo, - outputTensorInfo, - inputData, - expectedOutputData, - desc); + expectedOutputData.push_back(i); + } + + return DequantizeTestImpl(workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + inputData, + expectedOutputData, + desc); } template @@ -149,3 +149,19 @@ LayerTestResult DequantizeSimpleInt16Test( { return DequantizeSimpleTest(workloadFactory, memoryManager); } + +LayerTestResult DequantizeSimpleUint8ToFp16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeSimpleTest(workloadFactory, + memoryManager); +} + +LayerTestResult DequantizeSimpleInt16ToFp16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeSimpleTest(workloadFactory, + memoryManager); +} diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp index 55ea4b498f..4d837808a6 100644 --- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.hpp @@ -10,6 +10,8 @@ #include #include +#include + LayerTestResult DequantizeSimpleUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -21,3 +23,11 @@ LayerTestResult DequantizeOffsetUint8Test( LayerTestResult DequantizeSimpleInt16Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult DequantizeSimpleUint8ToFp16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult DequantizeSimpleInt16ToFp16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index 4e987db0fb..909ebc73c2 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -710,11 +710,11 @@ ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test) ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test) // Dequantize -// NOTE: current clframework (46a49a0a8206f0efa7afd514940e180a88ffd732) -// CLDequantizationLayerKernel accepts DataType::QASYMM8 input -// and can output DataType::F16 or DataType::F32 ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test) ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test) +ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test) +ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test) +ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test) // TransposeConvolution2d ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw, diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 50a2d1d630..046ca2ac66 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -325,8 +325,10 @@ BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils) } // Dequantize +// Fp16 is only supported if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC is enabled ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test) ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test) +ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test) // Pooling ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true) diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 59721582a3..716e8d9492 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -587,8 +587,9 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input, supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported, "Reference dequantize: input type not supported."); - std::array supportedOutputTypes = { - DataType::Float32 + std::array supportedOutputTypes = { + DataType::Float32, + DataType::Float16 }; supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported, diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 7e97acdee2..5f9af59e74 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -31,6 +31,7 @@ BACKEND_SOURCES := \ workloads/Debug.cpp \ workloads/DepthToSpace.cpp \ workloads/DetectionPostProcess.cpp \ + workloads/Dequantize.cpp \ workloads/ElementwiseFunction.cpp \ workloads/FullyConnected.cpp \ workloads/Gather.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 1b284c3876..7f280382f1 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1378,6 +1378,8 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16) ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test) ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test) ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test) +ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test) +ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test) // Quantize ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test) diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 7844518620..29abfedcef 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -28,6 +28,8 @@ list(APPEND armnnRefBackendWorkloads_sources DepthToSpace.hpp DetectionPostProcess.cpp DetectionPostProcess.hpp + Dequantize.cpp + Dequantize.hpp ElementwiseFunction.cpp ElementwiseFunction.hpp Encoders.hpp diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp new file mode 100644 index 0000000000..fafc03e69b --- /dev/null +++ b/src/backends/reference/workloads/Dequantize.cpp @@ -0,0 +1,29 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "Dequantize.hpp" + +namespace armnn +{ + +void Dequantize(Decoder& inputDecoder, + Encoder& outputEncoder, + const TensorInfo& inputInfo, + const TensorInfo& outputInfo) +{ + BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements()); + for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++) + { + // inputDecoder.Get() dequantizes the data element from whatever + // type is given by inputInfo to fp32 (If MakeDecoder supports that dequantization) + // outputEncoder.Set() transforms the data element to whatever type is + // given by outputInfo (if MakeEncoder supports that transformation) + outputEncoder.Set(inputDecoder.Get()); + ++outputEncoder; + ++inputDecoder; + } +} + +} // armnn namespace \ No newline at end of file diff --git a/src/backends/reference/workloads/Dequantize.hpp b/src/backends/reference/workloads/Dequantize.hpp new file mode 100644 index 0000000000..c01b454833 --- /dev/null +++ b/src/backends/reference/workloads/Dequantize.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include "Encoders.hpp" +#include "Decoders.hpp" + +namespace armnn +{ + +void Dequantize(Decoder& inputDecoder, + Encoder& outputEncoder, + const TensorInfo& inputInfo, + const TensorInfo& outputInfo); + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefDequantizeWorkload.cpp b/src/backends/reference/workloads/RefDequantizeWorkload.cpp index d861c50730..e6f5c6b359 100644 --- a/src/backends/reference/workloads/RefDequantizeWorkload.cpp +++ b/src/backends/reference/workloads/RefDequantizeWorkload.cpp @@ -5,6 +5,9 @@ #include "RefDequantizeWorkload.hpp" #include "RefWorkloadUtils.hpp" +#include "Encoders.hpp" +#include "Decoders.hpp" +#include "Dequantize.hpp" namespace armnn { @@ -14,21 +17,12 @@ void RefDequantizeWorkload::Execute() const ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDequantizeWorkload_Execute"); const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); - const DataType& inputDataType = inputInfo.GetDataType(); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); - float* outputData = GetOutputTensorData(0, m_Data); + auto inputDecoder = MakeDecoder(inputInfo, m_Data.m_Inputs[0]->Map()); + auto outputEncoder = MakeEncoder(outputInfo, m_Data.m_Outputs[0]->Map()); - switch (inputDataType) - { - case DataType::QuantisedAsymm8: - Dequantize(GetInputTensorData(0, m_Data), outputData, inputInfo); - break; - case DataType::QuantisedSymm16: - Dequantize(GetInputTensorData(0, m_Data), outputData, inputInfo); - break; - default: - throw InvalidArgumentException("RefDequantizeWorkload: Unsupported input data type"); - } + Dequantize(*inputDecoder, *outputEncoder, inputInfo, outputInfo); } } // namespace armnn -- cgit v1.2.1