From df3103622b7de05f4e35b22a2c94b4a46eab4efc Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 14 Nov 2018 13:16:56 +0000 Subject: COMPMID-1088: Use IMemoryRegion in interfaces where possible -Simplifies import memory interface -Changes the used of void** handles with appropriate interfaces. Change-Id: I5918c855c11f46352058864623336b352162a4b7 --- tests/validation/CL/UNIT/TensorAllocator.cpp | 19 +++++++------- tests/validation/NEON/UNIT/TensorAllocator.cpp | 35 ++++++++++++-------------- 2 files changed, 26 insertions(+), 28 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/UNIT/TensorAllocator.cpp b/tests/validation/CL/UNIT/TensorAllocator.cpp index a34a37eb7b..849eee84d0 100644 --- a/tests/validation/CL/UNIT/TensorAllocator.cpp +++ b/tests/validation/CL/UNIT/TensorAllocator.cpp @@ -45,31 +45,32 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL) // Init tensor info TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32); - // Allocate memory - auto buf = std::make_shared(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, info.total_size()); + // Allocate memory buffer + const size_t total_size = info.total_size(); + auto buf = cl::Buffer(CLScheduler::get().context(), CL_MEM_READ_WRITE, total_size); - // Negative case : Import empty memory + // Negative case : Import nullptr CLTensor t1; t1.allocator()->init(info); - ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(CLMemory())), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(cl::Buffer())), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS); // Negative case : Import memory to a tensor that is memory managed CLTensor t2; CLMemoryGroup mg; t2.allocator()->set_associated_memory_group(&mg); - ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(CLMemory(buf))), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(buf)), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS); - // Positive case : Set managed pointer + // Positive case : Set raw pointer CLTensor t3; t3.allocator()->init(info); - ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(CLMemory(buf))), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(buf)), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!t3.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t3.cl_buffer().get() == buf->cl_data().get(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(t3.cl_buffer().get() == buf.get(), framework::LogLevel::ERRORS); t3.allocator()->free(); ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t3.buffer() == nullptr, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(t3.cl_buffer().get() != buf.get(), framework::LogLevel::ERRORS); } TEST_SUITE_END() diff --git a/tests/validation/NEON/UNIT/TensorAllocator.cpp b/tests/validation/NEON/UNIT/TensorAllocator.cpp index 7781107210..384a00855b 100644 --- a/tests/validation/NEON/UNIT/TensorAllocator.cpp +++ b/tests/validation/NEON/UNIT/TensorAllocator.cpp @@ -49,37 +49,34 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL) TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32); // Allocate memory buffer - auto buf = std::make_shared(info.total_size()); + const size_t total_size = info.total_size(); + auto data = support::cpp14::make_unique(total_size); - // Negative case : Import empty memory + // Negative case : Import pointer with zero size Tensor t1; t1.allocator()->init(info); - ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(Memory())), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!bool(t1.allocator()->import_memory(data.get(), 0)), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t1.info()->is_resizable(), framework::LogLevel::ERRORS); - // Negative case : Import memory to a tensor that is memory managed - Tensor t2; - MemoryGroup mg; - t2.allocator()->set_associated_memory_group(&mg); - ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(Memory(buf.get()))), framework::LogLevel::ERRORS); + // Negative case : Import nullptr + Tensor t2; + t2.allocator()->init(info); + ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(nullptr, total_size)), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS); - // Positive case : Set raw pointer - Tensor t3; - t3.allocator()->init(info); - ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(Memory(buf.get()))), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!t3.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t3.buffer() == reinterpret_cast(buf->buffer()), framework::LogLevel::ERRORS); - t3.allocator()->free(); + // Negative case : Import memory to a tensor that is memory managed + Tensor t3; + MemoryGroup mg; + t3.allocator()->set_associated_memory_group(&mg); + ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(data.get(), total_size)), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t3.buffer() == nullptr, framework::LogLevel::ERRORS); - // Positive case : Set managed pointer + // Positive case : Set raw pointer Tensor t4; t4.allocator()->init(info); - ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(Memory(buf))), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(data.get(), total_size)), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast(buf->buffer()), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(t4.buffer() == reinterpret_cast(data.get()), framework::LogLevel::ERRORS); t4.allocator()->free(); ARM_COMPUTE_EXPECT(t4.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(t4.buffer() == nullptr, framework::LogLevel::ERRORS); -- cgit v1.2.1