aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/test/Fp16SupportTest.cpp1
-rw-r--r--src/backends/cl/test/OpenClTimerTest.cpp43
2 files changed, 15 insertions, 29 deletions
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index dac1ebcd02..f117c92e49 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -12,7 +12,6 @@
#include <Graph.hpp>
#include <Optimizer.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 1eeb9ed98f..13620c4311 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -51,40 +51,27 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
const unsigned int height = 3;
const unsigned int channels = 2;
const unsigned int num = 1;
- int32_t qOffset = 0;
- float qScale = 0.f;
- TensorInfo inputTensorInfo({num, channels, height, width}, DataType::Float32);
+ TensorInfo inputTensorInfo( {num, channels, height, width}, DataType::Float32);
TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32);
TensorInfo tensorInfo({channels}, DataType::Float32);
- // Set quantization parameters if the requested type is a quantized type.
- if(IsQuantizedType<float>())
- {
- inputTensorInfo.SetQuantizationScale(qScale);
- inputTensorInfo.SetQuantizationOffset(qOffset);
- outputTensorInfo.SetQuantizationScale(qScale);
- outputTensorInfo.SetQuantizationOffset(qOffset);
- tensorInfo.SetQuantizationScale(qScale);
- tensorInfo.SetQuantizationOffset(qOffset);
- }
-
auto input = MakeTensor<float, 4>(inputTensorInfo,
- QuantizedVector<float>(qScale, qOffset,
- {
- 1.f, 4.f,
- 4.f, 2.f,
- 1.f, 6.f,
-
- 1.f, 1.f,
- 4.f, 1.f,
- -2.f, 4.f
- }));
+ {
+ 1.f, 4.f,
+ 4.f, 2.f,
+ 1.f, 6.f,
+
+ 1.f, 1.f,
+ 4.f, 1.f,
+ -2.f, 4.f
+ });
+
// these values are per-channel of the input
- auto mean = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {3, -2}));
- auto variance = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {4, 9}));
- auto beta = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {3, 2}));
- auto gamma = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {2, 1}));
+ auto mean = MakeTensor<float, 1>(tensorInfo, { 3.f, -2.f });
+ auto variance = MakeTensor<float, 1>(tensorInfo, { 4.f, 9.f });
+ auto beta = MakeTensor<float, 1>(tensorInfo, { 3.f, 2.f });
+ auto gamma = MakeTensor<float, 1>(tensorInfo, { 2.f, 1.f });
std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);