aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-10-22 10:00:28 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-10-25 15:01:24 +0000
commit48623a0f6f4681ce0d9525b1587b7f96bfd58519 (patch)
treef5dbf25937e7b6641274e0953d09ca84acb51772 /src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
parentc82c8732fb514b412012002bd951a84039eca696 (diff)
downloadarmnn-48623a0f6f4681ce0d9525b1587b7f96bfd58519.tar.gz
IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils
* Moved QuntizeHelper.hpp to armnnUtils * Reordered parameters for QuantizedVector and added default values for qScale and qOffset to make life easier when using the function for non-quantized types such as Float16 Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5
Diffstat (limited to 'src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp')
-rw-r--r--src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp28
1 files changed, 12 insertions, 16 deletions
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index 5c75b6f540..569f5af227 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -6,6 +6,7 @@
#include "L2NormalizationTestImpl.hpp"
#include <Permute.hpp>
+#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
#include <TensorUtils.hpp>
@@ -44,10 +45,10 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
inputData = tmp;
}
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset(),
- inputData));
+ auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
+ armnnUtils::QuantizedVector<T>(inputData,
+ inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset()));
std::vector<float> expectedOutputData = expectedOutputValues;
if (layout == armnn::DataLayout::NHWC)
@@ -59,10 +60,11 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
}
LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset(),
- expectedOutputData));
+ result.outputExpected =
+ MakeTensor<T, 4>(outputTensorInfo,
+ armnnUtils::QuantizedVector<T>(expectedOutputData,
+ outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset()));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -693,16 +695,10 @@ LayerTestResult<float, 2> L2Normalization2dShapeTest(
const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
- auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset(),
- inputData));
+ auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
LayerTestResult<float, 2> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset(),
- expectedOutputData));
+ result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);