From 48623a0f6f4681ce0d9525b1587b7f96bfd58519 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Tue, 22 Oct 2019 10:00:28 +0100 Subject: IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils * Moved QuntizeHelper.hpp to armnnUtils * Reordered parameters for QuantizedVector and added default values for qScale and qOffset to make life easier when using the function for non-quantized types such as Float16 Signed-off-by: Aron Virginas-Tar Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5 --- src/backends/cl/test/Fp16SupportTest.cpp | 1 - src/backends/cl/test/OpenClTimerTest.cpp | 43 +++++++++++--------------------- 2 files changed, 15 insertions(+), 29 deletions(-) (limited to 'src/backends/cl/test') diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp index dac1ebcd02..f117c92e49 100644 --- a/src/backends/cl/test/Fp16SupportTest.cpp +++ b/src/backends/cl/test/Fp16SupportTest.cpp @@ -12,7 +12,6 @@ #include #include #include -#include #include #include diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp index 1eeb9ed98f..13620c4311 100644 --- a/src/backends/cl/test/OpenClTimerTest.cpp +++ b/src/backends/cl/test/OpenClTimerTest.cpp @@ -51,40 +51,27 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm) const unsigned int height = 3; const unsigned int channels = 2; const unsigned int num = 1; - int32_t qOffset = 0; - float qScale = 0.f; - TensorInfo inputTensorInfo({num, channels, height, width}, DataType::Float32); + TensorInfo inputTensorInfo( {num, channels, height, width}, DataType::Float32); TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32); TensorInfo tensorInfo({channels}, DataType::Float32); - // Set quantization parameters if the requested type is a quantized type. - if(IsQuantizedType()) - { - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); - tensorInfo.SetQuantizationScale(qScale); - tensorInfo.SetQuantizationOffset(qOffset); - } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, - { - 1.f, 4.f, - 4.f, 2.f, - 1.f, 6.f, - - 1.f, 1.f, - 4.f, 1.f, - -2.f, 4.f - })); + { + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + 1.f, 1.f, + 4.f, 1.f, + -2.f, 4.f + }); + // these values are per-channel of the input - auto mean = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, -2})); - auto variance = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {4, 9})); - auto beta = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, 2})); - auto gamma = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {2, 1})); + auto mean = MakeTensor(tensorInfo, { 3.f, -2.f }); + auto variance = MakeTensor(tensorInfo, { 4.f, 9.f }); + auto beta = MakeTensor(tensorInfo, { 3.f, 2.f }); + auto gamma = MakeTensor(tensorInfo, { 2.f, 1.f }); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); -- cgit v1.2.1