aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/CLTensorAllocator.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/CL/CLTensorAllocator.cpp')
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp18
1 files changed, 15 insertions, 3 deletions
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index fc789fa4b9..c82e9dfc67 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,9 +31,11 @@
namespace arm_compute
{
const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer();
-
namespace
{
+/** Global user-defined allocator that can be used for all internal allocations of a CLTensor */
+static IAllocator *static_global_cl_allocator = nullptr;
+
/** Helper function used to allocate the backing memory of a tensor
*
* @param[in] context OpenCL context to use
@@ -130,7 +132,11 @@ void CLTensorAllocator::allocate()
if(_associated_memory_group == nullptr)
{
// Perform memory allocation
- if(_ctx == nullptr)
+ if(static_global_cl_allocator != nullptr)
+ {
+ _memory.set_owned_region(static_global_cl_allocator->make_region(info().total_size(), 0));
+ }
+ else if(_ctx == nullptr)
{
auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue());
_memory.set_owned_region(allocate_region(&legacy_ctx, info().total_size(), 0));
@@ -142,6 +148,7 @@ void CLTensorAllocator::allocate()
}
else
{
+ // Finalize memory management instead
_associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment());
}
@@ -194,6 +201,11 @@ void CLTensorAllocator::set_associated_memory_group(IMemoryGroup *associated_mem
_associated_memory_group = associated_memory_group;
}
+void CLTensorAllocator::set_global_allocator(IAllocator *allocator)
+{
+ static_global_cl_allocator = allocator;
+}
+
uint8_t *CLTensorAllocator::lock()
{
if(_ctx)