aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/UNIT/TensorAllocator.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-05-21 13:32:43 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-03 14:51:29 +0000
commit4c5469b192665c94118a8a558787cb9cec2d0765 (patch)
tree168aa969de8243bdbb1f25247dd9f54d037ae32c /tests/validation/CL/UNIT/TensorAllocator.cpp
parent43a129e94df41f9ac8bc78b702da5a387ada0494 (diff)
downloadComputeLibrary-4c5469b192665c94118a8a558787cb9cec2d0765.tar.gz
COMPMID-2225: Add interface support for new quantized data types.
Add support for: -QSYMM8, 8-bit quantized symmetric -QSYMM8_PER_CHANNEL, 8-bit quantized symmetric with per channel quantization Change-Id: I00c4ff98e44af37419470af61419ee95d0de2463 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1236 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/CL/UNIT/TensorAllocator.cpp')
-rw-r--r--tests/validation/CL/UNIT/TensorAllocator.cpp42
1 files changed, 42 insertions, 0 deletions
diff --git a/tests/validation/CL/UNIT/TensorAllocator.cpp b/tests/validation/CL/UNIT/TensorAllocator.cpp
index e5b37d8387..4b8e105240 100644
--- a/tests/validation/CL/UNIT/TensorAllocator.cpp
+++ b/tests/validation/CL/UNIT/TensorAllocator.cpp
@@ -66,6 +66,7 @@ TEST_SUITE(CL)
TEST_SUITE(UNIT)
TEST_SUITE(TensorAllocator)
+/** Validates import memory interface when importing cl buffer objects */
TEST_CASE(ImportMemoryBuffer, framework::DatasetMode::ALL)
{
// Init tensor info
@@ -106,6 +107,7 @@ TEST_CASE(ImportMemoryBuffer, framework::DatasetMode::ALL)
ARM_COMPUTE_EXPECT(t4.cl_buffer().get() != buf.get(), framework::LogLevel::ERRORS);
}
+/** Validates import memory interface when importing malloced memory */
TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
{
// Check if import extension is supported
@@ -168,6 +170,7 @@ TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
}
#if !defined(BARE_METAL)
+/** Validates import memory interface when importing memory mapped objects */
TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
{
// Check if import extension is supported
@@ -235,6 +238,45 @@ TEST_CASE(ImportMemoryMappedFile, framework::DatasetMode::ALL)
}
#endif // !defined(BARE_METAL)
+/** Validates symmetric per channel quantization */
+TEST_CASE(Symm8PerChannelQuantizationInfo, framework::DatasetMode::ALL)
+{
+ // Create tensor
+ CLTensor tensor;
+ const std::vector<float> scale = { 0.25f, 1.4f, 3.2f, 2.3f, 4.7f };
+ const TensorInfo info(TensorShape(32U, 16U), 1, DataType::QSYMM8_PER_CHANNEL, QuantizationInfo(scale));
+ tensor.allocator()->init(info);
+
+ // Check quantization information
+ ARM_COMPUTE_EXPECT(!tensor.info()->quantization_info().empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!tensor.info()->quantization_info().scale.empty(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(tensor.info()->quantization_info().scale.size() == scale.size(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(tensor.info()->quantization_info().offset.empty(), framework::LogLevel::ERRORS);
+
+ CLQuantization quantization = tensor.quantization();
+ ARM_COMPUTE_ASSERT(quantization.scale != nullptr);
+ ARM_COMPUTE_ASSERT(quantization.offset != nullptr);
+
+ // Check OpenCL quantization arrays before allocating
+ ARM_COMPUTE_EXPECT(quantization.scale->max_num_values() == 0, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(quantization.offset->max_num_values() == 0, framework::LogLevel::ERRORS);
+
+ // Check OpenCL quantization arrays after allocating
+ tensor.allocator()->allocate();
+ ARM_COMPUTE_EXPECT(quantization.scale->max_num_values() == scale.size(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(quantization.offset->max_num_values() == 0, framework::LogLevel::ERRORS);
+
+ // Validate that the scale values are the same
+ auto cl_scale_buffer = quantization.scale->cl_buffer();
+ void *mapped_ptr = CLScheduler::get().queue().enqueueMapBuffer(cl_scale_buffer, CL_TRUE, CL_MAP_READ, 0, scale.size());
+ auto cl_scale_ptr = static_cast<float *>(mapped_ptr);
+ for(unsigned int i = 0; i < scale.size(); ++i)
+ {
+ ARM_COMPUTE_EXPECT(cl_scale_ptr[i] == scale[i], framework::LogLevel::ERRORS);
+ }
+ CLScheduler::get().queue().enqueueUnmapMemObject(cl_scale_buffer, mapped_ptr);
+}
+
TEST_SUITE_END() // TensorAllocator
TEST_SUITE_END() // UNIT
TEST_SUITE_END() // CL