aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorJohn Kesapides <john.kesapides@arm.com>2019-03-04 16:29:22 +0000
committerPablo Marquez <pablo.tello@arm.com>2019-03-13 13:34:48 +0000
commitadfb2737046028c042f0aecaff87733a442da29f (patch)
tree23b08fb9529075277e51dc1ae7e6489f690c9698 /tests
parent381fcf20c3ee028e14c154ff4b75bc7410f91168 (diff)
downloadComputeLibrary-adfb2737046028c042f0aecaff87733a442da29f.tar.gz
COMPMID-1935 Add support for QASYMM8 in NEQuantizeLayer
Change-Id: I2b63a644d8e34f91c830d9ac398debcbdca3e497 Signed-off-by: John Kesapides <john.kesapides@arm.com> Reviewed-on: https://review.mlplatform.org/c/829 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/NEON/QuantizationLayer.cpp53
-rw-r--r--tests/validation/fixtures/QuantizationLayerFixture.h65
-rw-r--r--tests/validation/reference/QuantizationLayer.cpp21
-rw-r--r--tests/validation/reference/QuantizationLayer.h5
4 files changed, 123 insertions, 21 deletions
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp
index 6526539d1a..487eb70120 100644
--- a/tests/validation/NEON/QuantizationLayer.cpp
+++ b/tests/validation/NEON/QuantizationLayer.cpp
@@ -55,21 +55,17 @@ TEST_SUITE(QuantizationLayer)
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8), // Wrong input data type
- TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8), // Invalid shape
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong input data type
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong output data type
- TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::U8), // Missmatching shapes
- TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::U8), // Shrink window
+ TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::F32), // Missmatching shapes
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Valid
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 5U, 16U), 1, DataType::U8),
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U16),
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8),
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8),
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8),
})),
- framework::dataset::make("Expected", { false, false, false, false, false, true})),
+ framework::dataset::make("Expected", { false, false, false, true})),
input_info, output_info, expected)
{
ARM_COMPUTE_EXPECT(bool(NEQuantizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
@@ -81,7 +77,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS
{
// Create tensors
Tensor src = create_tensor<Tensor>(shape, data_type);
- Tensor dst = create_tensor<Tensor>(shape, DataType::U8);
+ Tensor dst = create_tensor<Tensor>(shape, DataType::QASYMM8);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -96,30 +92,51 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS
validate(dst.info()->valid_region(), valid_region);
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 8).required_padding();
- validate(src.info()->padding(), padding);
- validate(dst.info()->padding(), padding);
+ validate(src.info()->padding(), PaddingSize());
+ validate(dst.info()->padding(), PaddingSize());
}
template <typename T>
-using NEQuantizationLayerFixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T>;
+using NEQuantizationLayerFixture = QAsymm8QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
- framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
- framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(Half)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Small4DShapes()),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Large4DShapes()),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u8);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // Half
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE_END() // QuantizationLayer
TEST_SUITE_END() // NEON
diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h
index 8590b7193b..65de405788 100644
--- a/tests/validation/fixtures/QuantizationLayerFixture.h
+++ b/tests/validation/fixtures/QuantizationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -103,6 +103,69 @@ protected:
TensorType _target{};
SimpleTensor<uint8_t> _reference{};
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class QAsymm8QuantizationValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType data_type, QuantizationInfo quant_info)
+ {
+ _target = compute_target(shape, data_type, quant_info);
+ _reference = compute_reference(shape, data_type, quant_info);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
+
+ TensorType compute_target(const TensorShape &shape, DataType data_type, QuantizationInfo quant_info)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(shape, data_type);
+ TensorType dst = create_tensor<TensorType>(shape, DataType::QASYMM8, 1, quant_info);
+
+ // Create and configure function
+ FunctionType quantization_layer;
+ quantization_layer.configure(&src, &dst);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ quantization_layer.run();
+
+ return dst;
+ }
+
+ SimpleTensor<uint8_t> compute_reference(const TensorShape &shape, DataType data_type, QuantizationInfo quant_info)
+ {
+ // Create reference
+ SimpleTensor<T> src{ shape, data_type };
+
+ // Fill reference
+ fill(src);
+
+ return reference::quantization_layer<T>(src, quant_info);
+ }
+
+ TensorType _target{};
+ SimpleTensor<uint8_t> _reference{};
+};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/reference/QuantizationLayer.cpp b/tests/validation/reference/QuantizationLayer.cpp
index d7ce490209..3d6c5bc13d 100644
--- a/tests/validation/reference/QuantizationLayer.cpp
+++ b/tests/validation/reference/QuantizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -81,6 +81,25 @@ SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<T> &src)
}
template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<float> &src);
+
+template <typename T>
+SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<T> &src, const QuantizationInfo quantization_info)
+{
+ // Create reference
+ SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, quantization_info };
+
+ for(int i = 0; i < src.num_elements(); ++i)
+ {
+#ifdef __aarch64__
+ dst[i] = quantization_info.quantize((src[i]), RoundingPolicy::TO_NEAREST_EVEN);
+#else // __aarch64__
+ dst[i] = quantization_info.quantize((src[i]), RoundingPolicy::TO_ZERO);
+#endif // __aarch64__
+ }
+ return dst;
+}
+template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<half> &src, const QuantizationInfo quantization_info);
+template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<float> &src, const QuantizationInfo quantization_info);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/QuantizationLayer.h b/tests/validation/reference/QuantizationLayer.h
index 7c5572ccf8..60d8ea4023 100644
--- a/tests/validation/reference/QuantizationLayer.h
+++ b/tests/validation/reference/QuantizationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,6 +37,9 @@ namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<T> &src);
+
+template <typename T>
+SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<T> &src, const QuantizationInfo quantization_info);
} // namespace reference
} // namespace validation
} // namespace test