diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-08-28 18:08:46 +0100 |
---|---|---|
committer | mike.kelly <mike.kelly@arm.com> | 2019-08-30 10:58:54 +0000 |
commit | 00d306e4db5153a4f4d280de4d4cf3e03788fefb (patch) | |
tree | 329c15f71c662e199a24dc0812bf95cb389ddbd8 /src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp | |
parent | 08b518687d2bf2683a2c5f571d3e76d71d67d048 (diff) | |
download | armnn-00d306e4db5153a4f4d280de4d4cf3e03788fefb.tar.gz |
IVGCVSW-3381 Break up LayerTests.hpp into more manageable files
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Icf39434f09fd340ad664cb3b97b8bee6d9da4838
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp')
-rw-r--r-- | src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp | 151 |
1 files changed, 151 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp new file mode 100644 index 0000000000..42673d5b99 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp @@ -0,0 +1,151 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "DequantizeTestImpl.hpp" + +#include <ResolveType.hpp> + +#include <armnn/ArmNN.hpp> + +#include <backendsCommon/test/TensorCopyUtils.hpp> +#include <backendsCommon/test/WorkloadTestUtils.hpp> + +#include <test/TensorHelpers.hpp> + +namespace +{ + +template<typename T, std::size_t Dim> +LayerTestResult<float, Dim> DequantizeTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector<T>& inputData, + const std::vector<float>& expectedOutputData, + armnn::DequantizeQueueDescriptor descriptor) +{ + boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData); + + LayerTestResult<float, Dim> ret(outputTensorInfo); + ret.outputExpected = MakeTensor<float, Dim>(outputTensorInfo, expectedOutputData); + + std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDequantize(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(ret.output.data(), outputHandle.get()); + + return ret; +} + +template <armnn::DataType ArmnnInputType> +LayerTestResult<float, 4> DequantizeSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using T = armnn::ResolveType<ArmnnInputType>; + + armnn::DequantizeQueueDescriptor desc; + + const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, ArmnnInputType, 0.5f, 0); + const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32); + + std::vector<T> inputData = std::vector<T>( + { + 2, 4, 6, + 8, 10, 12, + 14, 16, 18, + 20, 22, 24, + }); + + std::vector<float> expectedOutputData = std::vector<float>( + { + 1.0f, 2.0f, 3.0f, + 4.0f, 5.0f, 6.0f, + 7.0f, 8.0f, 9.0f, + 10.0f, 11.0f, 12.0f, + }); + + return DequantizeTestImpl<T, 4>(workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + inputData, + expectedOutputData, + desc); +} + +template <armnn::DataType ArmnnInputType> +LayerTestResult<float, 4> DequantizeOffsetTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + using T = armnn::ResolveType<ArmnnInputType>; + + armnn::DequantizeQueueDescriptor desc; + + const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, ArmnnInputType, 0.5f, 1); + const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32); + + std::vector<T> inputData = std::vector<T>( + { + 3, 5, 7, + 9, 11, 13, + 15, 17, 19, + 21, 23, 25, + }); + + std::vector<float> expectedOutputData = std::vector<float>( + { + 1.0f, 2.0f, 3.0f, + 4.0f, 5.0f, 6.0f, + 7.0f, 8.0f, 9.0f, + 10.0f, 11.0f, 12.0f, + }); + + return DequantizeTestImpl<T, 4>(workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + inputData, + expectedOutputData, + desc); +} + +} // anonymous namespace + +LayerTestResult<float, 4> DequantizeSimpleUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager); +} + +LayerTestResult<float, 4> DequantizeOffsetUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeOffsetTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager); +} + +LayerTestResult<float, 4> DequantizeSimpleInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return DequantizeSimpleTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager); +} |