diff options
-rw-r--r-- | src/backends/backendsCommon/test/CMakeLists.txt | 1 | ||||
-rw-r--r-- | src/backends/backendsCommon/test/LayerTests.cpp | 22 | ||||
-rw-r--r-- | src/backends/backendsCommon/test/LayerTests.hpp | 12 | ||||
-rw-r--r-- | src/backends/backendsCommon/test/QuantizeTestImpl.hpp | 126 | ||||
-rw-r--r-- | src/backends/reference/test/RefLayerTests.cpp | 5 |
5 files changed, 166 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 47d52312b6..bc190dd818 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -34,6 +34,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources PermuteTestImpl.hpp Pooling2dTestImpl.hpp QuantizeHelper.hpp + QuantizeTestImpl.hpp ReshapeTestImpl.hpp RuntimeTestImpl.hpp SoftmaxTestImpl.hpp diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index be1d43541c..e8fb84f534 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -44,6 +44,7 @@ #include "ConvertFp32ToFp16TestImpl.hpp" #include "DebugTestImpl.hpp" #include "DequantizeTestImpl.hpp" +#include "QuantizeTestImpl.hpp" // 3-channel 16x8 image used as common input data for a number of Conv2d tests. static std::vector<float> ConvInput3x8x16({ @@ -8977,3 +8978,24 @@ LayerTestResult<float, 4> DequantizeUint8Test( { return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager); } + +LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return QuantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager); +} + +LayerTestResult<uint8_t, 4> QuantizeClampUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return QuantizeClampTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager); +} + +LayerTestResult<int16_t, 4> QuantizeClampInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return QuantizeClampTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager); +} diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index c14ba7b35f..b9f73a4717 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -1443,3 +1443,15 @@ LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test( LayerTestResult<float, 4> DequantizeUint8Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<uint8_t, 4> QuantizeClampUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +LayerTestResult<int16_t, 4> QuantizeClampInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); diff --git a/src/backends/backendsCommon/test/QuantizeTestImpl.hpp b/src/backends/backendsCommon/test/QuantizeTestImpl.hpp new file mode 100644 index 0000000000..fee68f073e --- /dev/null +++ b/src/backends/backendsCommon/test/QuantizeTestImpl.hpp @@ -0,0 +1,126 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "WorkloadTestUtils.hpp" + +#include <test/TensorHelpers.hpp> + +#include <armnn/ArmNN.hpp> +#include <armnn/Tensor.hpp> +#include <armnn/TypesUtils.hpp> + +#include <backendsCommon/CpuTensorHandle.hpp> +#include <backendsCommon/IBackendInternal.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + + +namespace +{ + +template<typename T, std::size_t Dim> +LayerTestResult<T, Dim> QuantizeTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector<float>& inputData, + const std::vector<T>& expectedOutputData, + armnn::QuantizeQueueDescriptor descriptor) +{ + boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData); + + LayerTestResult<T, Dim> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantize(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(ret.output.data(), outputHandle.get()); + + return ret; +} + +template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>> +LayerTestResult<T, 4> QuantizeSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::QuantizeQueueDescriptor desc; + + const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1); + + std::vector<float> inputData = std::vector<float>( + { + 1.0f, 2.0f, 3.0f, + 4.0f, 5.0f, 6.0f, + 7.0f, 8.0f, 9.0f, + 10.0f, 11.0f, 12.0f, + }); + + std::vector<T> expectedOutputData = std::vector<T>( + { + 3, 5, 7, + 9, 11, 13, + 15, 17, 19, + 21, 23, 25, + }); + + return QuantizeTestImpl<T, 4>(workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + inputData, + expectedOutputData, + desc); +} + +template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>> +LayerTestResult<T, 4> QuantizeClampTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::QuantizeQueueDescriptor desc; + + const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0); + + const T max = std::numeric_limits<T>::max(); + const T min = std::numeric_limits<T>::lowest(); + + std::vector<float> inputData = std::vector<float>( + { + -100.0f, 100.0f + }); + + std::vector<T> expectedOutputData = std::vector<T>( + { + min, max + }); + + return QuantizeTestImpl<T, 4>(workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + inputData, + expectedOutputData, + desc); +} + +} // anonymous namespace diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index c0e2e785da..5149b9f132 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -533,4 +533,9 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsUint8) // Dequantize ARMNN_AUTO_TEST_CASE(DequantizeUint8, DequantizeUint8Test) +// Quantize +ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test) +ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test) +ARMNN_AUTO_TEST_CASE(QuantizeClampInt16, QuantizeClampInt16Test) + BOOST_AUTO_TEST_SUITE_END() |