From 4c5469b192665c94118a8a558787cb9cec2d0765 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 21 May 2019 13:32:43 +0100 Subject: COMPMID-2225: Add interface support for new quantized data types. Add support for: -QSYMM8, 8-bit quantized symmetric -QSYMM8_PER_CHANNEL, 8-bit quantized symmetric with per channel quantization Change-Id: I00c4ff98e44af37419470af61419ee95d0de2463 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/1236 Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins --- tests/validation/CL/UNIT/TensorAllocator.cpp | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'tests/validation/CL/UNIT/TensorAllocator.cpp') diff --git a/tests/validation/CL/UNIT/TensorAllocator.cpp b/tests/validation/CL/UNIT/TensorAllocator.cpp index e5b37d8387..4b8e105240 100644 --- a/tests/validation/CL/UNIT/TensorAllocator.cpp +++ b/tests/validation/CL/UNIT/TensorAllocator.cpp @@ -66,6 +66,7 @@ TEST_SUITE(CL) TEST_SUITE(UNIT) TEST_SUITE(TensorAllocator) +/** Validates import memory interface when importing cl buffer objects */ TEST_CASE(ImportMemoryBuffer, framework::DatasetMode::ALL) { // Init tensor info @@ -106,6 +107,7 @@ TEST_CASE(ImportMemoryBuffer, framework::DatasetMode::ALL) ARM_COMPUTE_EXPECT(t4.cl_buffer().get() != buf.get(), framework::LogLevel::ERRORS); } +/** Validates import memory interface when importing malloced memory */ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL) { // Check if import extension is supported @@ -168,6 +170,7 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL) } #if !defined(BARE_METAL) +/** Validates import memory interface when importing memory mapped objects */ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL) { // Check if import extension is supported @@ -235,6 +238,45 @@ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL) } #endif // !defined(BARE_METAL) +/** Validates symmetric per channel quantization */ +TEST_CASE(Symm8PerChannelQuantizationInfo, framework::DatasetMode::ALL) +{ + // Create tensor + CLTensor tensor; + const std::vector scale = { 0.25f, 1.4f, 3.2f, 2.3f, 4.7f }; + const TensorInfo info(TensorShape(32U, 16U), 1, DataType::QSYMM8_PER_CHANNEL, QuantizationInfo(scale)); + tensor.allocator()->init(info); + + // Check quantization information + ARM_COMPUTE_EXPECT(!tensor.info()->quantization_info().empty(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!tensor.info()->quantization_info().scale.empty(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(tensor.info()->quantization_info().scale.size() == scale.size(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(tensor.info()->quantization_info().offset.empty(), framework::LogLevel::ERRORS); + + CLQuantization quantization = tensor.quantization(); + ARM_COMPUTE_ASSERT(quantization.scale != nullptr); + ARM_COMPUTE_ASSERT(quantization.offset != nullptr); + + // Check OpenCL quantization arrays before allocating + ARM_COMPUTE_EXPECT(quantization.scale->max_num_values() == 0, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(quantization.offset->max_num_values() == 0, framework::LogLevel::ERRORS); + + // Check OpenCL quantization arrays after allocating + tensor.allocator()->allocate(); + ARM_COMPUTE_EXPECT(quantization.scale->max_num_values() == scale.size(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(quantization.offset->max_num_values() == 0, framework::LogLevel::ERRORS); + + // Validate that the scale values are the same + auto cl_scale_buffer = quantization.scale->cl_buffer(); + void *mapped_ptr = CLScheduler::get().queue().enqueueMapBuffer(cl_scale_buffer, CL_TRUE, CL_MAP_READ, 0, scale.size()); + auto cl_scale_ptr = static_cast(mapped_ptr); + for(unsigned int i = 0; i < scale.size(); ++i) + { + ARM_COMPUTE_EXPECT(cl_scale_ptr[i] == scale[i], framework::LogLevel::ERRORS); + } + CLScheduler::get().queue().enqueueUnmapMemObject(cl_scale_buffer, mapped_ptr); +} + TEST_SUITE_END() // TensorAllocator TEST_SUITE_END() // UNIT TEST_SUITE_END() // CL -- cgit v1.2.1