diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-04-13 19:44:17 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-04-14 14:37:44 +0000 |
commit | 035004e65dbffb6534ad4183cf8f95da0544fd28 (patch) | |
tree | 6ba1c45e6fc39c27672448cd5f5ed7e86cd61eed /src/runtime/CL | |
parent | 327225d3b2f716d5c62d801a7fafc7d377521f34 (diff) | |
download | ComputeLibrary-035004e65dbffb6534ad4183cf8f95da0544fd28.tar.gz |
Add support for a global allocator for OpenCL tensors
Give the ability to the user to specify an allocator that can be used by
all the internal function tensors. This being a global needs to outlive
all the tensors/functions that are using it.
Resolves: COMPMID-4212, COMPMID-4213
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I251871c242879976819ebca1452404133a8e62d7
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5420
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/CL')
-rw-r--r-- | src/runtime/CL/CLTensorAllocator.cpp | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp index fc789fa4b9..c82e9dfc67 100644 --- a/src/runtime/CL/CLTensorAllocator.cpp +++ b/src/runtime/CL/CLTensorAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020 Arm Limited. + * Copyright (c) 2016-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -31,9 +31,11 @@ namespace arm_compute { const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer(); - namespace { +/** Global user-defined allocator that can be used for all internal allocations of a CLTensor */ +static IAllocator *static_global_cl_allocator = nullptr; + /** Helper function used to allocate the backing memory of a tensor * * @param[in] context OpenCL context to use @@ -130,7 +132,11 @@ void CLTensorAllocator::allocate() if(_associated_memory_group == nullptr) { // Perform memory allocation - if(_ctx == nullptr) + if(static_global_cl_allocator != nullptr) + { + _memory.set_owned_region(static_global_cl_allocator->make_region(info().total_size(), 0)); + } + else if(_ctx == nullptr) { auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue()); _memory.set_owned_region(allocate_region(&legacy_ctx, info().total_size(), 0)); @@ -142,6 +148,7 @@ void CLTensorAllocator::allocate() } else { + // Finalize memory management instead _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment()); } @@ -194,6 +201,11 @@ void CLTensorAllocator::set_associated_memory_group(IMemoryGroup *associated_mem _associated_memory_group = associated_memory_group; } +void CLTensorAllocator::set_global_allocator(IAllocator *allocator) +{ + static_global_cl_allocator = allocator; +} + uint8_t *CLTensorAllocator::lock() { if(_ctx) |