From 035004e65dbffb6534ad4183cf8f95da0544fd28 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 13 Apr 2021 19:44:17 +0100 Subject: Add support for a global allocator for OpenCL tensors Give the ability to the user to specify an allocator that can be used by all the internal function tensors. This being a global needs to outlive all the tensors/functions that are using it. Resolves: COMPMID-4212, COMPMID-4213 Signed-off-by: Georgios Pinitas Change-Id: I251871c242879976819ebca1452404133a8e62d7 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5420 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- src/runtime/CL/CLTensorAllocator.cpp | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'src/runtime/CL/CLTensorAllocator.cpp') diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp index fc789fa4b9..c82e9dfc67 100644 --- a/src/runtime/CL/CLTensorAllocator.cpp +++ b/src/runtime/CL/CLTensorAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020 Arm Limited. + * Copyright (c) 2016-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,9 +31,11 @@ namespace arm_compute { const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer(); - namespace { +/** Global user-defined allocator that can be used for all internal allocations of a CLTensor */ +static IAllocator *static_global_cl_allocator = nullptr; + /** Helper function used to allocate the backing memory of a tensor * * @param[in] context OpenCL context to use @@ -130,7 +132,11 @@ void CLTensorAllocator::allocate() if(_associated_memory_group == nullptr) { // Perform memory allocation - if(_ctx == nullptr) + if(static_global_cl_allocator != nullptr) + { + _memory.set_owned_region(static_global_cl_allocator->make_region(info().total_size(), 0)); + } + else if(_ctx == nullptr) { auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue()); _memory.set_owned_region(allocate_region(&legacy_ctx, info().total_size(), 0)); @@ -142,6 +148,7 @@ void CLTensorAllocator::allocate() } else { + // Finalize memory management instead _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment()); } @@ -194,6 +201,11 @@ void CLTensorAllocator::set_associated_memory_group(IMemoryGroup *associated_mem _associated_memory_group = associated_memory_group; } +void CLTensorAllocator::set_global_allocator(IAllocator *allocator) +{ + static_global_cl_allocator = allocator; +} + uint8_t *CLTensorAllocator::lock() { if(_ctx) -- cgit v1.2.1