diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-05-04 21:39:57 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-05-12 05:50:43 +0000 |
commit | 2cd5b31d729984f938e2253532424daf157029c4 (patch) | |
tree | 00ea795cfd043742507feafd04ae9b9cce01adca /src/runtime | |
parent | ee22030441e869a7c4ff632d27a7d45c6168a14e (diff) | |
download | ComputeLibrary-2cd5b31d729984f938e2253532424daf157029c4.tar.gz |
Remove unused CLCoreRuntimeContext
CLCoreRuntime context is currently unused and is planned to be replaced
by the Context infrastructure
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: Ic2874800960ca954f647e8867e7db951ce823e1c
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5571
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r-- | src/runtime/CL/CLBufferAllocator.cpp | 20 | ||||
-rw-r--r-- | src/runtime/CL/CLMemoryRegion.cpp | 35 | ||||
-rw-r--r-- | src/runtime/CL/CLRuntimeContext.cpp | 8 | ||||
-rw-r--r-- | src/runtime/CL/CLTensorAllocator.cpp | 27 |
4 files changed, 27 insertions, 63 deletions
diff --git a/src/runtime/CL/CLBufferAllocator.cpp b/src/runtime/CL/CLBufferAllocator.cpp index 3673d65111..e06ef3d37d 100644 --- a/src/runtime/CL/CLBufferAllocator.cpp +++ b/src/runtime/CL/CLBufferAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,7 +23,6 @@ */ #include "arm_compute/runtime/CL/CLBufferAllocator.h" -#include "arm_compute/core/CL/CLCoreRuntimeContext.h" #include "arm_compute/core/CL/OpenCL.h" #include "arm_compute/core/Error.h" #include "arm_compute/runtime/CL/CLMemoryRegion.h" @@ -33,23 +32,10 @@ namespace arm_compute { -CLBufferAllocator::CLBufferAllocator(CLCoreRuntimeContext *ctx) - : _ctx(ctx) -{ -} - void *CLBufferAllocator::allocate(size_t size, size_t alignment) { ARM_COMPUTE_UNUSED(alignment); - cl_mem buf; - if(_ctx == nullptr) - { - buf = clCreateBuffer(CLScheduler::get().context().get(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size, nullptr, nullptr); - } - else - { - buf = clCreateBuffer(_ctx->context().get(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size, nullptr, nullptr); - } + cl_mem buf{ clCreateBuffer(CLScheduler::get().context().get(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size, nullptr, nullptr) }; return static_cast<void *>(buf); } @@ -62,6 +48,6 @@ void CLBufferAllocator::free(void *ptr) std::unique_ptr<IMemoryRegion> CLBufferAllocator::make_region(size_t size, size_t alignment) { ARM_COMPUTE_UNUSED(alignment); - return std::make_unique<CLBufferMemoryRegion>(_ctx, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size); + return std::make_unique<CLBufferMemoryRegion>(CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size); } } // namespace arm_compute diff --git a/src/runtime/CL/CLMemoryRegion.cpp b/src/runtime/CL/CLMemoryRegion.cpp index 0952139a8b..780a563d63 100644 --- a/src/runtime/CL/CLMemoryRegion.cpp +++ b/src/runtime/CL/CLMemoryRegion.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,16 +23,15 @@ */ #include "arm_compute/runtime/CL/CLMemoryRegion.h" -#include "arm_compute/core/CL/CLCoreRuntimeContext.h" #include "arm_compute/core/Error.h" #include "arm_compute/runtime/CL/CLScheduler.h" namespace arm_compute { -ICLMemoryRegion::ICLMemoryRegion(CLCoreRuntimeContext *ctx, size_t size) +ICLMemoryRegion::ICLMemoryRegion(size_t size) : IMemoryRegion(size), - _queue((ctx != nullptr) ? ctx->queue() : CLScheduler::get().queue()), - _ctx((ctx != nullptr) ? ctx->context() : CLScheduler::get().context()), + _queue(CLScheduler::get().queue()), + _ctx(CLScheduler::get().context()), _mapping(nullptr), _mem() { @@ -59,17 +58,17 @@ std::unique_ptr<IMemoryRegion> ICLMemoryRegion::extract_subregion(size_t offset, return nullptr; } -CLBufferMemoryRegion::CLBufferMemoryRegion(CLCoreRuntimeContext *ctx, cl_mem_flags flags, size_t size) - : ICLMemoryRegion(ctx, size) +CLBufferMemoryRegion::CLBufferMemoryRegion(cl_mem_flags flags, size_t size) + : ICLMemoryRegion(size) { if(_size != 0) { - _mem = cl::Buffer((ctx != nullptr) ? ctx->context() : CLScheduler::get().context(), flags, _size); + _mem = cl::Buffer(CLScheduler::get().context(), flags, _size); } } -CLBufferMemoryRegion::CLBufferMemoryRegion(const cl::Buffer &buffer, CLCoreRuntimeContext *ctx) - : ICLMemoryRegion(ctx, buffer.getInfo<CL_MEM_SIZE>()) +CLBufferMemoryRegion::CLBufferMemoryRegion(const cl::Buffer &buffer) + : ICLMemoryRegion(buffer.getInfo<CL_MEM_SIZE>()) { _mem = buffer; } @@ -93,15 +92,15 @@ void CLBufferMemoryRegion::unmap(cl::CommandQueue &q) _mapping = nullptr; } -ICLSVMMemoryRegion::ICLSVMMemoryRegion(CLCoreRuntimeContext *ctx, cl_mem_flags flags, size_t size, size_t alignment) - : ICLMemoryRegion(ctx, size), _ptr(nullptr) +ICLSVMMemoryRegion::ICLSVMMemoryRegion(cl_mem_flags flags, size_t size, size_t alignment) + : ICLMemoryRegion(size), _ptr(nullptr) { if(size != 0) { - _ptr = clSVMAlloc((ctx != nullptr) ? ctx->context().get() : CLScheduler::get().context().get(), flags, size, alignment); + _ptr = clSVMAlloc(CLScheduler::get().context().get(), flags, size, alignment); if(_ptr != nullptr) { - _mem = cl::Buffer((ctx != nullptr) ? ctx->context() : CLScheduler::get().context(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, _size, _ptr); + _mem = cl::Buffer(CLScheduler::get().context(), CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR, _size, _ptr); } } } @@ -127,8 +126,8 @@ void *ICLSVMMemoryRegion::ptr() return _ptr; } -CLCoarseSVMMemoryRegion::CLCoarseSVMMemoryRegion(CLCoreRuntimeContext *ctx, cl_mem_flags flags, size_t size, size_t alignment) - : ICLSVMMemoryRegion(ctx, flags, size, alignment) +CLCoarseSVMMemoryRegion::CLCoarseSVMMemoryRegion(cl_mem_flags flags, size_t size, size_t alignment) + : ICLSVMMemoryRegion(flags, size, alignment) { } @@ -147,8 +146,8 @@ void CLCoarseSVMMemoryRegion::unmap(cl::CommandQueue &q) _mapping = nullptr; } -CLFineSVMMemoryRegion::CLFineSVMMemoryRegion(CLCoreRuntimeContext *ctx, cl_mem_flags flags, size_t size, size_t alignment) - : ICLSVMMemoryRegion(ctx, flags, size, alignment) +CLFineSVMMemoryRegion::CLFineSVMMemoryRegion(cl_mem_flags flags, size_t size, size_t alignment) + : ICLSVMMemoryRegion(flags, size, alignment) { } diff --git a/src/runtime/CL/CLRuntimeContext.cpp b/src/runtime/CL/CLRuntimeContext.cpp index 0c1d011f9a..5083b4b0c5 100644 --- a/src/runtime/CL/CLRuntimeContext.cpp +++ b/src/runtime/CL/CLRuntimeContext.cpp @@ -29,7 +29,7 @@ namespace arm_compute { CLRuntimeContext::CLRuntimeContext() - : _gpu_owned_scheduler(std::make_unique<CLScheduler>()), _gpu_scheduler(_gpu_owned_scheduler.get()), _symbols(), _core_context(), _backend_type() + : _gpu_owned_scheduler(std::make_unique<CLScheduler>()), _gpu_scheduler(_gpu_owned_scheduler.get()), _symbols(), _backend_type() { _symbols.load_default(); auto ctx_dev_err = create_opencl_context_and_device(_backend_type); @@ -40,7 +40,6 @@ CLRuntimeContext::CLRuntimeContext() _gpu_owned_scheduler->init(ctx, queue, dev, &_tuner); const std::string cl_kernels_folder("./cl_kernels"); CLKernelLibrary::get().init(cl_kernels_folder, ctx, dev); - _core_context = CLCoreRuntimeContext(&CLKernelLibrary::get(), _gpu_owned_scheduler->context(), _gpu_owned_scheduler->queue()); } CLKernelLibrary &CLRuntimeContext::kernel_library() @@ -48,11 +47,6 @@ CLKernelLibrary &CLRuntimeContext::kernel_library() return CLKernelLibrary::get(); } -CLCoreRuntimeContext *CLRuntimeContext::core_runtime_context() -{ - return &_core_context; -} - void CLRuntimeContext::set_gpu_scheduler(CLScheduler *scheduler) { ARM_COMPUTE_ERROR_ON_NULLPTR(scheduler); diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp index c82e9dfc67..f85b8ae777 100644 --- a/src/runtime/CL/CLTensorAllocator.cpp +++ b/src/runtime/CL/CLTensorAllocator.cpp @@ -38,29 +38,27 @@ static IAllocator *static_global_cl_allocator = nullptr; /** Helper function used to allocate the backing memory of a tensor * - * @param[in] context OpenCL context to use * @param[in] size Size of the allocation * @param[in] alignment Alignment of the allocation * * @return A wrapped memory region */ -std::unique_ptr<ICLMemoryRegion> allocate_region(CLCoreRuntimeContext *ctx, size_t size, cl_uint alignment) +std::unique_ptr<ICLMemoryRegion> allocate_region(size_t size, cl_uint alignment) { // Try fine-grain SVM - std::unique_ptr<ICLMemoryRegion> region = std::make_unique<CLFineSVMMemoryRegion>(ctx, - CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER, + std::unique_ptr<ICLMemoryRegion> region = std::make_unique<CLFineSVMMemoryRegion>(CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER, size, alignment); // Try coarse-grain SVM in case of failure if(region != nullptr && region->ptr() == nullptr) { - region = std::make_unique<CLCoarseSVMMemoryRegion>(ctx, CL_MEM_READ_WRITE, size, alignment); + region = std::make_unique<CLCoarseSVMMemoryRegion>(CL_MEM_READ_WRITE, size, alignment); } // Try legacy buffer memory in case of failure if(region != nullptr && region->ptr() == nullptr) { - region = std::make_unique<CLBufferMemoryRegion>(ctx, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size); + region = std::make_unique<CLBufferMemoryRegion>(CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size); } return region; } @@ -136,14 +134,9 @@ void CLTensorAllocator::allocate() { _memory.set_owned_region(static_global_cl_allocator->make_region(info().total_size(), 0)); } - else if(_ctx == nullptr) - { - auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue()); - _memory.set_owned_region(allocate_region(&legacy_ctx, info().total_size(), 0)); - } else { - _memory.set_owned_region(allocate_region(_ctx->core_runtime_context(), info().total_size(), 0)); + _memory.set_owned_region(allocate_region(info().total_size(), 0)); } } else @@ -178,15 +171,7 @@ Status CLTensorAllocator::import_memory(cl::Buffer buffer) ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_CONTEXT>().get() != CLScheduler::get().context().get()); ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr); - if(_ctx == nullptr) - { - auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue()); - _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer, &legacy_ctx)); - } - else - { - _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer, _ctx->core_runtime_context())); - } + _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer)); info().set_is_resizable(false); return Status{}; |