aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-04-03 15:11:16 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-04-09 16:51:03 +0000
commit4d0351cf322df51baa5a445f637008992aa37809 (patch)
tree411e748a6bb5b64920780382e0613596dcd55ccf /tests
parenta4bba9c594c4022c9f85192bb8fd3593ad1a8d3c (diff)
downloadComputeLibrary-4d0351cf322df51baa5a445f637008992aa37809.tar.gz
COMPMID-2057: Implement and test import memory interfaces.
Change-Id: I1559bea47ae6403177d248e2f7be47d5f1a6513f Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/956 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/UNIT/TensorAllocator.cpp121
-rw-r--r--tests/validation/NEON/UNIT/TensorAllocator.cpp77
2 files changed, 175 insertions, 23 deletions
diff --git a/tests/validation/CL/UNIT/TensorAllocator.cpp b/tests/validation/CL/UNIT/TensorAllocator.cpp
index 849eee84d0..abe06c544b 100644
--- a/tests/validation/CL/UNIT/TensorAllocator.cpp
+++ b/tests/validation/CL/UNIT/TensorAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,10 +25,16 @@
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/Globals.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include <memory>
+#include <random>
namespace arm_compute
{
@@ -36,14 +42,33 @@ namespace test
{
namespace validation
{
+namespace
+{
+cl_mem import_malloc_memory_helper(void *ptr, size_t size)
+{
+ const cl_import_properties_arm import_properties[] =
+ {
+ CL_IMPORT_TYPE_ARM,
+ CL_IMPORT_TYPE_HOST_ARM,
+ 0
+ };
+
+ cl_int err = CL_SUCCESS;
+ cl_mem buf = clImportMemoryARM(CLKernelLibrary::get().context().get(), CL_MEM_READ_WRITE, import_properties, ptr, size, &err);
+ ARM_COMPUTE_EXPECT(err == CL_SUCCESS, framework::LogLevel::ERRORS);
+
+ return buf;
+}
+} // namespace
+
TEST_SUITE(CL)
TEST_SUITE(UNIT)
TEST_SUITE(TensorAllocator)
-TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
+TEST_CASE(ImportMemoryBuffer, framework::DatasetMode::ALL)
{
// Init tensor info
- TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
+ const TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
// Allocate memory buffer
const size_t total_size = info.total_size();
@@ -62,20 +87,88 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS);
- // Positive case : Set raw pointer
- CLTensor t3;
- t3.allocator()->init(info);
- ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!t3.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.cl_buffer().get() == buf.get(), framework::LogLevel::ERRORS);
- t3.allocator()->free();
+ // Negative case : Invalid buffer size
+ CLTensor t3;
+ const TensorInfo info_neg(TensorShape(32U, 16U, 3U), 1, DataType::F32);
+ t3.allocator()->init(info_neg);
+ ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.cl_buffer().get() != buf.get(), framework::LogLevel::ERRORS);
+
+ // Positive case : Set raw pointer
+ CLTensor t4;
+ t4.allocator()->init(info);
+ ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(t4.cl_buffer().get() == buf.get(), framework::LogLevel::ERRORS);
+ t4.allocator()->free();
+ ARM_COMPUTE_EXPECT(t4.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(t4.cl_buffer().get() != buf.get(), framework::LogLevel::ERRORS);
+}
+
+TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
+{
+ // Check if import extension is supported
+ if(!device_supports_extension(CLKernelLibrary::get().get_device(), "cl_arm_import_memory"))
+ {
+ return;
+ }
+ else
+ {
+ const ActivationLayerInfo act_info(ActivationLayerInfo::ActivationFunction::RELU);
+ const TensorShape shape = TensorShape(24U, 16U, 3U);
+ const DataType data_type = DataType::F32;
+
+ // Create tensor
+ const TensorInfo info(shape, 1, data_type);
+ CLTensor tensor;
+ tensor.allocator()->init(info);
+
+ // Create and configure activation function
+ CLActivationLayer act_func;
+ act_func.configure(&tensor, nullptr, act_info);
+
+ // Allocate and import tensor
+ const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
+ const size_t total_size_in_bytes = tensor.info()->total_size();
+ const size_t alignment = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE>();
+ size_t space = total_size_in_bytes + alignment;
+ auto raw_data = support::cpp14::make_unique<uint8_t[]>(space);
+
+ void *aligned_ptr = raw_data.get();
+ support::cpp11::align(alignment, total_size_in_bytes, aligned_ptr, space);
+
+ cl::Buffer wrapped_buffer(import_malloc_memory_helper(aligned_ptr, total_size_in_bytes));
+ ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(wrapped_buffer)), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensor
+ std::uniform_real_distribution<float> distribution(-5.f, 5.f);
+ std::mt19937 gen(library->seed());
+ auto *typed_ptr = reinterpret_cast<float *>(aligned_ptr);
+ for(unsigned int i = 0; i < total_size_in_elems; ++i)
+ {
+ typed_ptr[i] = distribution(gen);
+ }
+
+ // Execute function and sync
+ act_func.run();
+ CLScheduler::get().sync();
+
+ // Validate result by checking that the input has no negative values
+ for(unsigned int i = 0; i < total_size_in_elems; ++i)
+ {
+ ARM_COMPUTE_EXPECT(typed_ptr[i] >= 0, framework::LogLevel::ERRORS);
+ }
+
+ // Release resources
+ tensor.allocator()->free();
+ ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
}
-TEST_SUITE_END()
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // TensorAllocator
+TEST_SUITE_END() // UNIT
+TEST_SUITE_END() // CL
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/NEON/UNIT/TensorAllocator.cpp b/tests/validation/NEON/UNIT/TensorAllocator.cpp
index 384a00855b..7ba83c11b3 100644
--- a/tests/validation/NEON/UNIT/TensorAllocator.cpp
+++ b/tests/validation/NEON/UNIT/TensorAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,12 +26,19 @@
#include "arm_compute/core/utils/misc/Utility.h"
#include "arm_compute/runtime/MemoryGroup.h"
#include "arm_compute/runtime/MemoryRegion.h"
+#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
#include "support/ToolchainSupport.h"
+#include "tests/Globals.h"
#include "tests/Utils.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/ActivationLayer.h"
+
+#include <memory>
+#include <random>
namespace arm_compute
{
@@ -52,29 +59,30 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
const size_t total_size = info.total_size();
auto data = support::cpp14::make_unique<uint8_t[]>(total_size);
- // Negative case : Import pointer with zero size
+ // Negative case : Import nullptr
Tensor t1;
t1.allocator()->init(info);
- ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(data.get(), 0)), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(nullptr)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS);
- // Negative case : Import nullptr
- Tensor t2;
- t2.allocator()->init(info);
- ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(nullptr, total_size)), framework::LogLevel::ERRORS);
+ // Negative case : Import misaligned pointer
+ Tensor t2;
+ const size_t required_alignment = 339;
+ t2.allocator()->init(info, required_alignment);
+ ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS);
// Negative case : Import memory to a tensor that is memory managed
Tensor t3;
MemoryGroup mg;
t3.allocator()->set_associated_memory_group(&mg);
- ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(data.get(), total_size)), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS);
// Positive case : Set raw pointer
Tensor t4;
t4.allocator()->init(info);
- ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(data.get(), total_size)), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(data.get())), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast<uint8_t *>(data.get()), framework::LogLevel::ERRORS);
t4.allocator()->free();
@@ -82,6 +90,57 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
ARM_COMPUTE_EXPECT(t4.buffer() == nullptr, framework::LogLevel::ERRORS);
}
+TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
+{
+ const ActivationLayerInfo act_info(ActivationLayerInfo::ActivationFunction::RELU);
+ const TensorShape shape = TensorShape(24U, 16U, 3U);
+ const DataType data_type = DataType::F32;
+
+ // Create tensor
+ const TensorInfo info(shape, 1, data_type);
+ const size_t required_alignment = 64;
+ Tensor tensor;
+ tensor.allocator()->init(info, required_alignment);
+
+ // Create and configure activation function
+ NEActivationLayer act_func;
+ act_func.configure(&tensor, nullptr, act_info);
+
+ // Allocate and import tensor
+ const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
+ const size_t total_size_in_bytes = tensor.info()->total_size();
+ size_t space = total_size_in_bytes + required_alignment;
+ auto raw_data = support::cpp14::make_unique<uint8_t[]>(space);
+
+ void *aligned_ptr = raw_data.get();
+ support::cpp11::align(required_alignment, total_size_in_bytes, aligned_ptr, space);
+
+ ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(aligned_ptr)), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensor
+ std::uniform_real_distribution<float> distribution(-5.f, 5.f);
+ std::mt19937 gen(library->seed());
+ auto *typed_ptr = reinterpret_cast<float *>(aligned_ptr);
+ for(unsigned int i = 0; i < total_size_in_elems; ++i)
+ {
+ typed_ptr[i] = distribution(gen);
+ }
+
+ // Execute function and sync
+ act_func.run();
+
+ // Validate result by checking that the input has no negative values
+ for(unsigned int i = 0; i < total_size_in_elems; ++i)
+ {
+ ARM_COMPUTE_EXPECT(typed_ptr[i] >= 0, framework::LogLevel::ERRORS);
+ }
+
+ // Release resources
+ tensor.allocator()->free();
+ ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+}
+
TEST_CASE(AlignedAlloc, framework::DatasetMode::ALL)
{
// Init tensor info