From 00d306e4db5153a4f4d280de4d4cf3e03788fefb Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Wed, 28 Aug 2019 18:08:46 +0100 Subject: IVGCVSW-3381 Break up LayerTests.hpp into more manageable files Signed-off-by: Aron Virginas-Tar Change-Id: Icf39434f09fd340ad664cb3b97b8bee6d9da4838 --- .../test/layerTests/QuantizeTestImpl.cpp | 147 +++++++++++++++++++++ 1 file changed, 147 insertions(+) create mode 100644 src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp (limited to 'src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp') diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp new file mode 100644 index 0000000000..7d5d73bf01 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp @@ -0,0 +1,147 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "QuantizeTestImpl.hpp" + +#include + +#include + +#include +#include + +#include +#include + +#include + +namespace +{ + +template +LayerTestResult QuantizeTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::TensorInfo& inputTensorInfo, + const armnn::TensorInfo& outputTensorInfo, + const std::vector& inputData, + const std::vector& expectedOutputData, + armnn::QuantizeQueueDescriptor descriptor) +{ + boost::multi_array input = MakeTensor(inputTensorInfo, inputData); + + LayerTestResult ret(outputTensorInfo); + ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutputData); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateQuantize(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), input.data()); + + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(ret.output.data(), outputHandle.get()); + + return ret; +} + +template > +LayerTestResult QuantizeSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::QuantizeQueueDescriptor desc; + + const armnn::TensorInfo inputTensorInfo({1, 2, 2, 3}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({1, 2, 2, 3}, ArmnnOutputType, 0.5f, 1); + + std::vector inputData = std::vector( + { + 1.0f, 2.0f, 3.0f, + 4.0f, 5.0f, 6.0f, + 7.0f, 8.0f, 9.0f, + 10.0f, 11.0f, 12.0f, + }); + + std::vector expectedOutputData = std::vector( + { + 3, 5, 7, + 9, 11, 13, + 15, 17, 19, + 21, 23, 25, + }); + + return QuantizeTestImpl(workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + inputData, + expectedOutputData, + desc); +} + +template > +LayerTestResult QuantizeClampTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + armnn::QuantizeQueueDescriptor desc; + + const armnn::TensorInfo inputTensorInfo({1, 1, 2, 1}, armnn::DataType::Float32); + const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0); + + const T max = std::numeric_limits::max(); + const T min = std::numeric_limits::lowest(); + + std::vector inputData = std::vector( + { + -100.0f, 100.0f + }); + + std::vector expectedOutputData = std::vector( + { + min, max + }); + + return QuantizeTestImpl(workloadFactory, + memoryManager, + inputTensorInfo, + outputTensorInfo, + inputData, + expectedOutputData, + desc); +} + +} // anonymous namespace + +LayerTestResult QuantizeSimpleUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return QuantizeSimpleTest(workloadFactory, memoryManager); +} + +LayerTestResult QuantizeClampUint8Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return QuantizeClampTest(workloadFactory, memoryManager); +} + +LayerTestResult QuantizeClampInt16Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return QuantizeClampTest(workloadFactory, memoryManager); +} -- cgit v1.2.1