diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-10-22 10:00:28 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-10-25 15:01:24 +0000 |
commit | 48623a0f6f4681ce0d9525b1587b7f96bfd58519 (patch) | |
tree | f5dbf25937e7b6641274e0953d09ca84acb51772 /src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp | |
parent | c82c8732fb514b412012002bd951a84039eca696 (diff) | |
download | armnn-48623a0f6f4681ce0d9525b1587b7f96bfd58519.tar.gz |
IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils
* Moved QuntizeHelper.hpp to armnnUtils
* Reordered parameters for QuantizedVector and added default
values for qScale and qOffset to make life easier when
using the function for non-quantized types such as Float16
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp')
-rw-r--r-- | src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp | 119 |
1 files changed, 65 insertions, 54 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp index bb2392ff01..56ce51a844 100644 --- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp @@ -8,6 +8,7 @@ #include "LayerTestResult.hpp" #include <Permute.hpp> +#include <QuantizeHelper.hpp> #include <ResolveType.hpp> #include <TensorUtils.hpp> @@ -76,9 +77,10 @@ LayerTestResult<T, 4> ResizeBilinearNopTest( inputData = tmp; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = input; @@ -174,15 +176,16 @@ LayerTestResult<T, 4> SimpleResizeBilinearTest( outputData = tmp1; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector<T>(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -278,15 +281,16 @@ LayerTestResult<T, 4> ResizeBilinearSqMinTest( outputData = tmp1; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector<T>(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -377,15 +381,16 @@ LayerTestResult<T, 4> ResizeBilinearMinTest( outputData = tmp1; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector<T>(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -484,15 +489,16 @@ LayerTestResult<T, 4> ResizeBilinearMagTest( outputData = tmp1; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector<T>(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -573,9 +579,10 @@ LayerTestResult<T, 4> ResizeNearestNeighborNopTest( inputData = tmp; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = input; @@ -670,15 +677,16 @@ LayerTestResult<T, 4> SimpleResizeNearestNeighborTest( outputData = tmp1; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector<T>(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -773,15 +781,16 @@ LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest( outputData = tmp1; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector<T>(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -871,15 +880,16 @@ LayerTestResult<T, 4> ResizeNearestNeighborMinTest( outputData = tmp1; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector<T>(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -978,15 +988,16 @@ LayerTestResult<T, 4> ResizeNearestNeighborMagTest( outputData = tmp1; } - auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor<T, 4>(inputTensorInfo, + armnnUtils::QuantizedVector<T>(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult<T, 4> result(outputTensorInfo); result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, - QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector<T>(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); |