aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-02-18 20:08:02 +0000
committerGiuseppe Rossini <giuseppe.rossini@arm.com>2019-03-05 11:44:18 +0000
commit574775c7fa78a094bbeb7f9f87aca832936884e2 (patch)
treea405e7a265865acc1348860514de28de2835ce24 /tests/validation
parent79fa9a22022824735986f74557bf38095eb2284d (diff)
downloadComputeLibrary-574775c7fa78a094bbeb7f9f87aca832936884e2.tar.gz
COMPMID-1937: Adds support for DequantizationLayer for NEON/CL.
Change-Id: I4b73edd176a277294e0e42e642460bc61210778a Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/744 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/DequantizationLayer.cpp99
-rw-r--r--tests/validation/NEON/DequantizationLayer.cpp120
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h87
-rw-r--r--tests/validation/reference/DequantizationLayer.cpp32
-rw-r--r--tests/validation/reference/DequantizationLayer.h6
5 files changed, 128 insertions, 216 deletions
diff --git a/tests/validation/CL/DequantizationLayer.cpp b/tests/validation/CL/DequantizationLayer.cpp
index 5303566922..b1b0d81c6d 100644
--- a/tests/validation/CL/DequantizationLayer.cpp
+++ b/tests/validation/CL/DequantizationLayer.cpp
@@ -40,107 +40,94 @@ namespace test
{
namespace validation
{
-namespace
-{
-const auto DequantizationShapes = concat(datasets::Small3DShapes(),
- datasets::Small4DShapes());
-} // namespace
-
TEST_SUITE(CL)
TEST_SUITE(DequantizationLayer)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong input data type
- TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8), // Invalid shape
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8), // Wrong output data type
- TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::U8), // Missmatching shapes
- TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::U8), // Shrink window
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8), // Valid
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong input data type
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong output data type
+ TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::QASYMM8), // Missmatching shapes
+ TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Valid
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Valid
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8),
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8),
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
})),
- framework::dataset::make("MinMax",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::U8),
- TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::U8),
- TensorInfo(TensorShape(2U), 1, DataType::U8),
- })),
- framework::dataset::make("Expected", { false, false, false, false, false, true})),
- input_info, output_info, min_max, expected)
+ framework::dataset::make("Expected", { false, false, false, true, true})),
+ input_info, output_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLDequantizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), &min_max.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(CLDequantizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(DequantizationShapes, framework::dataset::make("DataType", DataType::U8)), shape, data_type)
+DATA_TEST_CASE(Configuration,
+ framework::DatasetMode::ALL,
+ combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16, DataType::F32 })),
+ shape, data_type)
{
- TensorShape shape_min_max = shape;
- shape_min_max.set(Window::DimX, 2);
-
- // Remove Y and Z dimensions and keep the batches
- shape_min_max.remove_dimension(1);
- shape_min_max.remove_dimension(1);
-
// Create tensors
- CLTensor src = create_tensor<CLTensor>(shape, data_type);
- CLTensor dst = create_tensor<CLTensor>(shape, DataType::F32);
- CLTensor min_max = create_tensor<CLTensor>(shape_min_max, DataType::F32);
+ CLTensor src = create_tensor<CLTensor>(shape, DataType::QASYMM8, 1, QuantizationInfo(0.5f, -10));
+ CLTensor dst = create_tensor<CLTensor>(shape, data_type);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
CLDequantizationLayer dequant_layer;
- dequant_layer.configure(&src, &dst, &min_max);
+ dequant_layer.configure(&src, &dst);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape);
validate(src.info()->valid_region(), valid_region);
validate(dst.info()->valid_region(), valid_region);
- // Validate valid region of min_max tensor
- const ValidRegion valid_region_min_max = shape_to_valid_region(shape_min_max);
- validate(min_max.info()->valid_region(), valid_region_min_max);
-
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-
- // Validate padding of min_max tensor
- const PaddingSize padding_min_max = PaddingCalculator(shape_min_max.x(), 2).required_padding();
- validate(min_max.info()->padding(), padding_min_max);
+ validate(src.info()->padding(), PaddingSize());
+ validate(dst.info()->padding(), PaddingSize());
}
template <typename T>
using CLDequantizationLayerFixture = DequantizationValidationFixture<CLTensor, CLAccessor, CLDequantizationLayer, T>;
-TEST_SUITE(Integer)
-TEST_SUITE(U8)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLDequantizationLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::U8)))
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDequantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDequantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLDequantizationLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
- framework::dataset::make("DataType", DataType::U8)))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-TEST_SUITE_END() // U8
-TEST_SUITE_END() // Integer
+TEST_SUITE_END() // FP32
TEST_SUITE_END() // DequantizationLayer
TEST_SUITE_END() // CL
diff --git a/tests/validation/NEON/DequantizationLayer.cpp b/tests/validation/NEON/DequantizationLayer.cpp
index 48a6b227c1..0ae20b7b5d 100644
--- a/tests/validation/NEON/DequantizationLayer.cpp
+++ b/tests/validation/NEON/DequantizationLayer.cpp
@@ -42,8 +42,11 @@ namespace validation
{
namespace
{
-/** Tolerance for float operations */
-constexpr AbsoluteTolerance<float> tolerance_f32(0.001f);
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+const auto data_types = framework::dataset::make("DataType", { DataType::F16, DataType::F32 });
+#else /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
} // namespace
TEST_SUITE(NEON)
@@ -51,96 +54,91 @@ TEST_SUITE(DequantizationLayer)
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong input data type
- TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8), // Invalid shape
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8), // Wrong output data type
- TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::U8), // Missmatching shapes
- TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::U8), // Shrink window
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8), // Valid
- }),
- framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8),
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8),
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
- })),
- framework::dataset::make("MinMax",{ TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::U8),
- TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::F32),
- TensorInfo(TensorShape(2U), 1, DataType::U8),
- TensorInfo(TensorShape(2U), 1, DataType::U8),
- })),
- framework::dataset::make("Expected", { false, false, false, false, false, true})),
- input_info, output_info, min_max, expected)
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong input data type
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong output data type
+ TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::QASYMM8), // Missmatching shapes
+ TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Valid
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Valid
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8),
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
+ })),
+ framework::dataset::make("Expected", { false, false, false, true, true})),
+ input_info, output_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(NEDequantizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), &min_max.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(NEDequantizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Small3DShapes(), framework::dataset::make("DataType", DataType::U8)), shape, data_type)
+DATA_TEST_CASE(Configuration,
+ framework::DatasetMode::ALL,
+ combine(datasets::SmallShapes(), data_types),
+ shape, data_type)
{
- TensorShape shape_min_max = shape;
- shape_min_max.set(Window::DimX, 2);
-
- // Remove Y and Z dimensions and keep the batches
- shape_min_max.remove_dimension(1);
- shape_min_max.remove_dimension(1);
-
// Create tensors
- Tensor src = create_tensor<Tensor>(shape, data_type);
- Tensor dst = create_tensor<Tensor>(shape, DataType::F32);
- Tensor min_max = create_tensor<Tensor>(shape_min_max, DataType::F32);
+ Tensor src = create_tensor<Tensor>(shape, DataType::QASYMM8, 1, QuantizationInfo(0.5f, -10));
+ Tensor dst = create_tensor<Tensor>(shape, data_type);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Create and configure function
NEDequantizationLayer dequant_layer;
- dequant_layer.configure(&src, &dst, &min_max);
+ dequant_layer.configure(&src, &dst);
// Validate valid region
const ValidRegion valid_region = shape_to_valid_region(shape);
validate(src.info()->valid_region(), valid_region);
validate(dst.info()->valid_region(), valid_region);
- // Validate valid region of min_max tensor
- const ValidRegion valid_region_min_max = shape_to_valid_region(shape_min_max);
- validate(min_max.info()->valid_region(), valid_region_min_max);
-
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 8).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
-
- // Validate padding of min_max tensor
- const PaddingSize padding_min_max = PaddingCalculator(shape_min_max.x(), 2).required_padding();
- validate(min_max.info()->padding(), padding_min_max);
+ validate(src.info()->padding(), PaddingSize());
+ validate(dst.info()->padding(), PaddingSize());
}
template <typename T>
using NEDequantizationLayerFixture = DequantizationValidationFixture<Tensor, Accessor, NEDequantizationLayer, T>;
-TEST_SUITE(Integer)
-TEST_SUITE(U8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::U8)))
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f32);
+ validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
- framework::dataset::make("DataType", DataType::U8)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f32);
+ validate(Accessor(_target), _reference);
}
-TEST_SUITE_END() // U8
-TEST_SUITE_END() // Integer
+TEST_SUITE_END() // FP32
TEST_SUITE_END() // DequantizationLayer
TEST_SUITE_END() // NEON
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index 0bf3522cd6..2e3712dff2 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,10 +47,10 @@ class DequantizationValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type)
+ void setup(TensorShape shape, DataType data_type, QuantizationInfo qinfo)
{
- _target = compute_target(shape, data_type);
- _reference = compute_reference(shape, data_type);
+ _target = compute_target(shape, data_type, qinfo);
+ _reference = compute_reference(shape, data_type, qinfo);
}
protected:
@@ -60,80 +60,28 @@ protected:
library->fill_tensor_uniform(tensor, 0);
}
- template <typename U>
- void fill_min_max(U &&tensor)
- {
- std::mt19937 gen(library->seed());
- std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
-
- Window window;
-
- window.set(0, Window::Dimension(0, tensor.shape()[0], 2));
-
- for(unsigned int d = 1; d < tensor.shape().num_dimensions(); ++d)
- {
- window.set(d, Window::Dimension(0, tensor.shape()[d], 1));
- }
-
- execute_window_loop(window, [&](const Coordinates & id)
- {
- const float n1 = distribution(gen);
- const float n2 = distribution(gen);
-
- float min = 0.0f;
- float max = 0.0f;
-
- if(n1 < n2)
- {
- min = n1;
- max = n2;
- }
- else
- {
- min = n2;
- max = n1;
- }
-
- auto out_ptr = reinterpret_cast<float *>(tensor(id));
- out_ptr[0] = min;
- out_ptr[1] = max;
- });
- }
-
- TensorType compute_target(const TensorShape &shape, DataType data_type)
+ TensorType compute_target(const TensorShape &shape, DataType data_type, QuantizationInfo qinfo)
{
- TensorShape shape_min_max = shape;
- shape_min_max.set(Window::DimX, 2);
-
- // Remove Y and Z dimensions and keep the batches
- shape_min_max.remove_dimension(1);
- shape_min_max.remove_dimension(1);
-
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type);
- TensorType dst = create_tensor<TensorType>(shape, DataType::F32);
- TensorType min_max = create_tensor<TensorType>(shape_min_max, DataType::F32);
+ TensorType src = create_tensor<TensorType>(shape, DataType::QASYMM8, 1, qinfo);
+ TensorType dst = create_tensor<TensorType>(shape, data_type);
// Create and configure function
FunctionType dequantization_layer;
- dequantization_layer.configure(&src, &dst, &min_max);
+ dequantization_layer.configure(&src, &dst);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
- min_max.allocator()->allocate();
ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!min_max.info()->is_resizable(), framework::LogLevel::ERRORS);
// Fill tensors
fill(AccessorType(src));
- fill_min_max(AccessorType(min_max));
// Compute function
dequantization_layer.run();
@@ -141,28 +89,19 @@ protected:
return dst;
}
- SimpleTensor<float> compute_reference(const TensorShape &shape, DataType data_type)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, QuantizationInfo qinfo)
{
- TensorShape shape_min_max = shape;
- shape_min_max.set(Window::DimX, 2);
-
- // Remove Y and Z dimensions and keep the batches
- shape_min_max.remove_dimension(1);
- shape_min_max.remove_dimension(1);
-
// Create reference
- SimpleTensor<T> src{ shape, data_type };
- SimpleTensor<float> min_max{ shape_min_max, data_type };
+ SimpleTensor<uint8_t> src{ shape, DataType::QASYMM8, 1, qinfo };
// Fill reference
fill(src);
- fill_min_max(min_max);
- return reference::dequantization_layer<T>(src, min_max);
+ return reference::dequantization_layer<T>(src);
}
- TensorType _target{};
- SimpleTensor<float> _reference{};
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
};
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DequantizationLayer.cpp b/tests/validation/reference/DequantizationLayer.cpp
index 33096a1d81..df50c14ec7 100644
--- a/tests/validation/reference/DequantizationLayer.cpp
+++ b/tests/validation/reference/DequantizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,36 +31,24 @@ namespace validation
{
namespace reference
{
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
-SimpleTensor<float> dequantization_layer(const SimpleTensor<T> &src, const SimpleTensor<float> &min_max)
+template <typename T>
+SimpleTensor<T> dequantization_layer(const SimpleTensor<uint8_t> &src)
{
- // Create reference
- SimpleTensor<float> dst{ src.shape(), DataType::F32 };
+ const DataType dst_data_type = std::is_same<T, float>::value ? DataType::F32 : DataType::F16;
+ const QuantizationInfo &quantization_info = src.quantization_info();
- // Compute reference
- const int width = src.shape().x();
- const int height = src.shape().y();
- const int depth = src.shape().z();
- const int stride_w = width * height * depth;
- const int num_batches = min_max.shape().total_size_upper(1);
+ SimpleTensor<T> dst{ src.shape(), dst_data_type };
- for(int k = 0; k < num_batches; ++k)
+ for(int i = 0; i < src.num_elements(); ++i)
{
- const float min = min_max[k * 2 + 0];
- const float max = min_max[k * 2 + 1];
- const float range = max - min;
- const float scaling = range / 255.0f;
-
- for(int i = 0; i < stride_w; ++i)
- {
- dst[i + k * stride_w] = (static_cast<float>(src[i + k * stride_w]) * scaling) + min;
- }
+ dst[i] = static_cast<T>(quantization_info.dequantize(src[i]));
}
return dst;
}
-template SimpleTensor<float> dequantization_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<float> &min_max);
+template SimpleTensor<half> dequantization_layer(const SimpleTensor<uint8_t> &src);
+template SimpleTensor<float> dequantization_layer(const SimpleTensor<uint8_t> &src);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/DequantizationLayer.h b/tests/validation/reference/DequantizationLayer.h
index 1a8adcf9d8..1d0e54b442 100644
--- a/tests/validation/reference/DequantizationLayer.h
+++ b/tests/validation/reference/DequantizationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,8 +35,8 @@ namespace validation
{
namespace reference
{
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
-SimpleTensor<float> dequantization_layer(const SimpleTensor<T> &src, const SimpleTensor<float> &min_max);
+template <typename T>
+SimpleTensor<T> dequantization_layer(const SimpleTensor<uint8_t> &src);
} // namespace reference
} // namespace validation
} // namespace test