diff options
author | Jan Eilers <jan.eilers@arm.com> | 2019-11-01 11:09:36 +0000 |
---|---|---|
committer | Jan Eilers <jan.eilers@arm.com> | 2019-11-04 12:09:08 +0000 |
commit | f71079328ae72a65c91e410b2bd35eabb67cb6d1 (patch) | |
tree | e5460c94ea84f0ffb6ec09df820912cd9bd750ec /src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp | |
parent | 7ff9a6096e3c1facbd6786993a6437b9f72069d2 (diff) | |
download | armnn-f71079328ae72a65c91e410b2bd35eabb67cb6d1.tar.gz |
Add fp16 support for dequantize
* Changed RefDequantizeWorkload to use Encoder/Decoder
* Added related unit tests for Cl, Neon and Ref
Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ic2fd4103090dd2127c6859b49305736f7b2dfb05
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp')
-rw-r--r-- | src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp | 72 |
1 files changed, 44 insertions, 28 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp index 42673d5b99..6a3e852ed2 100644 --- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp @@ -17,20 +17,20 @@ namespace { -template<typename T, std::size_t Dim> -LayerTestResult<float, Dim> DequantizeTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo& inputTensorInfo, - const armnn::TensorInfo& outputTensorInfo, - const std::vector<T>& inputData, - const std::vector<float>& expectedOutputData, - armnn::DequantizeQueueDescriptor descriptor) +template<typename T, std::size_t Dim, typename T1=float> +LayerTestResult<T1, Dim> DequantizeTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector<T>& inputData, + const std::vector<T1>& expectedOutputData, + armnn::DequantizeQueueDescriptor descriptor) { boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData); - LayerTestResult<float, Dim> ret(outputTensorInfo); - ret.outputExpected = MakeTensor<float, Dim>(outputTensorInfo, expectedOutputData); + LayerTestResult<T1, Dim> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T1, Dim>(outputTensorInfo, expectedOutputData); std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -53,8 +53,10 @@ LayerTestResult<float, Dim> DequantizeTestImpl( return ret; } -template <armnn::DataType ArmnnInputType> -LayerTestResult<float, 4> DequantizeSimpleTest( +template <armnn::DataType ArmnnInputType, + armnn::DataType ArmnnOutputType=armnn::DataType::Float32, + typename OutType=armnn::ResolveType<ArmnnOutputType>> +LayerTestResult<OutType, 4> DequantizeSimpleTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { @@ -63,7 +65,7 @@ LayerTestResult<float, 4> DequantizeSimpleTest( armnn::DequantizeQueueDescriptor desc; const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, ArmnnInputType, 0.5f, 0); - const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType); std::vector<T> inputData = std::vector<T>( { @@ -73,21 +75,19 @@ LayerTestResult<float, 4> DequantizeSimpleTest( 20, 22, 24, }); - std::vector<float> expectedOutputData = std::vector<float>( + std::vector<OutType> expectedOutputData; + for (OutType i = OutType(1); i <= OutType(12); ++i) { - 1.0f, 2.0f, 3.0f, - 4.0f, 5.0f, 6.0f, - 7.0f, 8.0f, 9.0f, - 10.0f, 11.0f, 12.0f, - }); - - return DequantizeTestImpl<T, 4>(workloadFactory, - memoryManager, - inputTensorInfo, - outputTensorInfo, - inputData, - expectedOutputData, - desc); + expectedOutputData.push_back(i); + } + + return DequantizeTestImpl<T, 4, OutType>(workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + inputData, + expectedOutputData, + desc); } template <armnn::DataType ArmnnInputType> @@ -149,3 +149,19 @@ LayerTestResult<float, 4> DequantizeSimpleInt16Test( { return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager); } + +LayerTestResult<armnn::Half, 4> DequantizeSimpleUint8ToFp16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8, armnn::DataType::Float16>(workloadFactory, + memoryManager); +} + +LayerTestResult<armnn::Half, 4> DequantizeSimpleInt16ToFp16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16, armnn::DataType::Float16>(workloadFactory, + memoryManager); +} |