diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-10-22 10:00:28 +0100 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2019-10-25 15:01:24 +0000 |
commit | 48623a0f6f4681ce0d9525b1587b7f96bfd58519 (patch) | |
tree | f5dbf25937e7b6641274e0953d09ca84acb51772 /src/backends/cl/test | |
parent | c82c8732fb514b412012002bd951a84039eca696 (diff) | |
download | armnn-48623a0f6f4681ce0d9525b1587b7f96bfd58519.tar.gz |
IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils
* Moved QuntizeHelper.hpp to armnnUtils
* Reordered parameters for QuantizedVector and added default
values for qScale and qOffset to make life easier when
using the function for non-quantized types such as Float16
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5
Diffstat (limited to 'src/backends/cl/test')
-rw-r--r-- | src/backends/cl/test/Fp16SupportTest.cpp | 1 | ||||
-rw-r--r-- | src/backends/cl/test/OpenClTimerTest.cpp | 43 |
2 files changed, 15 insertions, 29 deletions
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp index dac1ebcd02..f117c92e49 100644 --- a/src/backends/cl/test/Fp16SupportTest.cpp +++ b/src/backends/cl/test/Fp16SupportTest.cpp @@ -12,7 +12,6 @@ #include <Graph.hpp> #include <Optimizer.hpp> #include <backendsCommon/CpuTensorHandle.hpp> -#include <backendsCommon/test/QuantizeHelper.hpp> #include <boost/core/ignore_unused.hpp> #include <boost/test/unit_test.hpp> diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp index 1eeb9ed98f..13620c4311 100644 --- a/src/backends/cl/test/OpenClTimerTest.cpp +++ b/src/backends/cl/test/OpenClTimerTest.cpp @@ -51,40 +51,27 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm) const unsigned int height = 3; const unsigned int channels = 2; const unsigned int num = 1; - int32_t qOffset = 0; - float qScale = 0.f; - TensorInfo inputTensorInfo({num, channels, height, width}, DataType::Float32); + TensorInfo inputTensorInfo( {num, channels, height, width}, DataType::Float32); TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32); TensorInfo tensorInfo({channels}, DataType::Float32); - // Set quantization parameters if the requested type is a quantized type. - if(IsQuantizedType<float>()) - { - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); - tensorInfo.SetQuantizationScale(qScale); - tensorInfo.SetQuantizationOffset(qOffset); - } - auto input = MakeTensor<float, 4>(inputTensorInfo, - QuantizedVector<float>(qScale, qOffset, - { - 1.f, 4.f, - 4.f, 2.f, - 1.f, 6.f, - - 1.f, 1.f, - 4.f, 1.f, - -2.f, 4.f - })); + { + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + 1.f, 1.f, + 4.f, 1.f, + -2.f, 4.f + }); + // these values are per-channel of the input - auto mean = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {3, -2})); - auto variance = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {4, 9})); - auto beta = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {3, 2})); - auto gamma = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {2, 1})); + auto mean = MakeTensor<float, 1>(tensorInfo, { 3.f, -2.f }); + auto variance = MakeTensor<float, 1>(tensorInfo, { 4.f, 9.f }); + auto beta = MakeTensor<float, 1>(tensorInfo, { 3.f, 2.f }); + auto gamma = MakeTensor<float, 1>(tensorInfo, { 2.f, 1.f }); std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); |