aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/CLTensorAllocator.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-05-04 21:39:57 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-05-12 05:50:43 +0000
commit2cd5b31d729984f938e2253532424daf157029c4 (patch)
tree00ea795cfd043742507feafd04ae9b9cce01adca /src/runtime/CL/CLTensorAllocator.cpp
parentee22030441e869a7c4ff632d27a7d45c6168a14e (diff)
downloadComputeLibrary-2cd5b31d729984f938e2253532424daf157029c4.tar.gz
Remove unused CLCoreRuntimeContext
CLCoreRuntime context is currently unused and is planned to be replaced by the Context infrastructure Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: Ic2874800960ca954f647e8867e7db951ce823e1c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5571 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/CL/CLTensorAllocator.cpp')
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp27
1 files changed, 6 insertions, 21 deletions
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index c82e9dfc67..f85b8ae777 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -38,29 +38,27 @@ static IAllocator *static_global_cl_allocator = nullptr;
/** Helper function used to allocate the backing memory of a tensor
*
- * @param[in] context OpenCL context to use
* @param[in] size Size of the allocation
* @param[in] alignment Alignment of the allocation
*
* @return A wrapped memory region
*/
-std::unique_ptr<ICLMemoryRegion> allocate_region(CLCoreRuntimeContext *ctx, size_t size, cl_uint alignment)
+std::unique_ptr<ICLMemoryRegion> allocate_region(size_t size, cl_uint alignment)
{
// Try fine-grain SVM
- std::unique_ptr<ICLMemoryRegion> region = std::make_unique<CLFineSVMMemoryRegion>(ctx,
- CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER,
+ std::unique_ptr<ICLMemoryRegion> region = std::make_unique<CLFineSVMMemoryRegion>(CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER,
size,
alignment);
// Try coarse-grain SVM in case of failure
if(region != nullptr && region->ptr() == nullptr)
{
- region = std::make_unique<CLCoarseSVMMemoryRegion>(ctx, CL_MEM_READ_WRITE, size, alignment);
+ region = std::make_unique<CLCoarseSVMMemoryRegion>(CL_MEM_READ_WRITE, size, alignment);
}
// Try legacy buffer memory in case of failure
if(region != nullptr && region->ptr() == nullptr)
{
- region = std::make_unique<CLBufferMemoryRegion>(ctx, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
+ region = std::make_unique<CLBufferMemoryRegion>(CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
}
return region;
}
@@ -136,14 +134,9 @@ void CLTensorAllocator::allocate()
{
_memory.set_owned_region(static_global_cl_allocator->make_region(info().total_size(), 0));
}
- else if(_ctx == nullptr)
- {
- auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue());
- _memory.set_owned_region(allocate_region(&legacy_ctx, info().total_size(), 0));
- }
else
{
- _memory.set_owned_region(allocate_region(_ctx->core_runtime_context(), info().total_size(), 0));
+ _memory.set_owned_region(allocate_region(info().total_size(), 0));
}
}
else
@@ -178,15 +171,7 @@ Status CLTensorAllocator::import_memory(cl::Buffer buffer)
ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_CONTEXT>().get() != CLScheduler::get().context().get());
ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
- if(_ctx == nullptr)
- {
- auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue());
- _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer, &legacy_ctx));
- }
- else
- {
- _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer, _ctx->core_runtime_context()));
- }
+ _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer));
info().set_is_resizable(false);
return Status{};