aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/UNIT/TensorAllocator.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/CL/UNIT/TensorAllocator.cpp')
-rw-r--r--tests/validation/CL/UNIT/TensorAllocator.cpp121
1 files changed, 107 insertions, 14 deletions
diff --git a/tests/validation/CL/UNIT/TensorAllocator.cpp b/tests/validation/CL/UNIT/TensorAllocator.cpp
index 849eee84d0..abe06c544b 100644
--- a/tests/validation/CL/UNIT/TensorAllocator.cpp
+++ b/tests/validation/CL/UNIT/TensorAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,10 +25,16 @@
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/Globals.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/ActivationLayer.h"
#include <memory>
+#include <random>
namespace arm_compute
{
@@ -36,14 +42,33 @@ namespace test
{
namespace validation
{
+namespace
+{
+cl_mem import_malloc_memory_helper(void *ptr, size_t size)
+{
+ const cl_import_properties_arm import_properties[] =
+ {
+ CL_IMPORT_TYPE_ARM,
+ CL_IMPORT_TYPE_HOST_ARM,
+ 0
+ };
+
+ cl_int err = CL_SUCCESS;
+ cl_mem buf = clImportMemoryARM(CLKernelLibrary::get().context().get(), CL_MEM_READ_WRITE, import_properties, ptr, size, &err);
+ ARM_COMPUTE_EXPECT(err == CL_SUCCESS, framework::LogLevel::ERRORS);
+
+ return buf;
+}
+} // namespace
+
TEST_SUITE(CL)
TEST_SUITE(UNIT)
TEST_SUITE(TensorAllocator)
-TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
+TEST_CASE(ImportMemoryBuffer, framework::DatasetMode::ALL)
{
// Init tensor info
- TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
+ const TensorInfo info(TensorShape(24U, 16U, 3U), 1, DataType::F32);
// Allocate memory buffer
const size_t total_size = info.total_size();
@@ -62,20 +87,88 @@ TEST_CASE(ImportMemory, framework::DatasetMode::ALL)
ARM_COMPUTE_EXPECT(!bool(t2.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t2.info()->is_resizable(), framework::LogLevel::ERRORS);
- // Positive case : Set raw pointer
- CLTensor t3;
- t3.allocator()->init(info);
- ARM_COMPUTE_EXPECT(bool(t3.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!t3.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.cl_buffer().get() == buf.get(), framework::LogLevel::ERRORS);
- t3.allocator()->free();
+ // Negative case : Invalid buffer size
+ CLTensor t3;
+ const TensorInfo info_neg(TensorShape(32U, 16U, 3U), 1, DataType::F32);
+ t3.allocator()->init(info_neg);
+ ARM_COMPUTE_EXPECT(!bool(t3.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(t3.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(t3.cl_buffer().get() != buf.get(), framework::LogLevel::ERRORS);
+
+ // Positive case : Set raw pointer
+ CLTensor t4;
+ t4.allocator()->init(info);
+ ARM_COMPUTE_EXPECT(bool(t4.allocator()->import_memory(buf)), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!t4.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(t4.cl_buffer().get() == buf.get(), framework::LogLevel::ERRORS);
+ t4.allocator()->free();
+ ARM_COMPUTE_EXPECT(t4.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(t4.cl_buffer().get() != buf.get(), framework::LogLevel::ERRORS);
+}
+
+TEST_CASE(ImportMemoryMalloc, framework::DatasetMode::ALL)
+{
+ // Check if import extension is supported
+ if(!device_supports_extension(CLKernelLibrary::get().get_device(), "cl_arm_import_memory"))
+ {
+ return;
+ }
+ else
+ {
+ const ActivationLayerInfo act_info(ActivationLayerInfo::ActivationFunction::RELU);
+ const TensorShape shape = TensorShape(24U, 16U, 3U);
+ const DataType data_type = DataType::F32;
+
+ // Create tensor
+ const TensorInfo info(shape, 1, data_type);
+ CLTensor tensor;
+ tensor.allocator()->init(info);
+
+ // Create and configure activation function
+ CLActivationLayer act_func;
+ act_func.configure(&tensor, nullptr, act_info);
+
+ // Allocate and import tensor
+ const size_t total_size_in_elems = tensor.info()->tensor_shape().total_size();
+ const size_t total_size_in_bytes = tensor.info()->total_size();
+ const size_t alignment = CLKernelLibrary::get().get_device().getInfo<CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE>();
+ size_t space = total_size_in_bytes + alignment;
+ auto raw_data = support::cpp14::make_unique<uint8_t[]>(space);
+
+ void *aligned_ptr = raw_data.get();
+ support::cpp11::align(alignment, total_size_in_bytes, aligned_ptr, space);
+
+ cl::Buffer wrapped_buffer(import_malloc_memory_helper(aligned_ptr, total_size_in_bytes));
+ ARM_COMPUTE_EXPECT(bool(tensor.allocator()->import_memory(wrapped_buffer)), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensor
+ std::uniform_real_distribution<float> distribution(-5.f, 5.f);
+ std::mt19937 gen(library->seed());
+ auto *typed_ptr = reinterpret_cast<float *>(aligned_ptr);
+ for(unsigned int i = 0; i < total_size_in_elems; ++i)
+ {
+ typed_ptr[i] = distribution(gen);
+ }
+
+ // Execute function and sync
+ act_func.run();
+ CLScheduler::get().sync();
+
+ // Validate result by checking that the input has no negative values
+ for(unsigned int i = 0; i < total_size_in_elems; ++i)
+ {
+ ARM_COMPUTE_EXPECT(typed_ptr[i] >= 0, framework::LogLevel::ERRORS);
+ }
+
+ // Release resources
+ tensor.allocator()->free();
+ ARM_COMPUTE_EXPECT(tensor.info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
}
-TEST_SUITE_END()
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // TensorAllocator
+TEST_SUITE_END() // UNIT
+TEST_SUITE_END() // CL
} // namespace validation
} // namespace test
} // namespace arm_compute