aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-11-14 13:16:56 +0000
committerIsabella Gottardi <isabella.gottardi@arm.com>2018-11-21 09:52:04 +0000
commitdf3103622b7de05f4e35b22a2c94b4a46eab4efc (patch)
tree17e10253e7a069c69d10bea0882b699b99d74b86 /src
parentc47ef20d69e8ea0f519fdc679435cd7037fc18fe (diff)
downloadComputeLibrary-df3103622b7de05f4e35b22a2c94b4a46eab4efc.tar.gz
COMPMID-1088: Use IMemoryRegion in interfaces where possible
-Simplifies import memory interface -Changes the used of void** handles with appropriate interfaces. Change-Id: I5918c855c11f46352058864623336b352162a4b7
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/OpenCL.cpp19
-rw-r--r--src/runtime/BlobMemoryPool.cpp14
-rw-r--r--src/runtime/CL/CLMemory.cpp34
-rw-r--r--src/runtime/CL/CLMemoryRegion.cpp11
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp73
-rw-r--r--src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp18
-rw-r--r--src/runtime/GLES_COMPUTE/GCMemory.cpp81
-rw-r--r--src/runtime/GLES_COMPUTE/GCMemoryRegion.cpp96
-rw-r--r--src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp40
-rw-r--r--src/runtime/ISimpleLifetimeManager.cpp5
-rw-r--r--src/runtime/Memory.cpp18
-rw-r--r--src/runtime/OffsetMemoryPool.cpp16
-rw-r--r--src/runtime/TensorAllocator.cpp26
13 files changed, 326 insertions, 125 deletions
diff --git a/src/core/CL/OpenCL.cpp b/src/core/CL/OpenCL.cpp
index 486bb6a1bd..6725f36a5d 100644
--- a/src/core/CL/OpenCL.cpp
+++ b/src/core/CL/OpenCL.cpp
@@ -106,6 +106,7 @@ bool CLSymbols::load(const std::string &library)
LOAD_FUNCTION_PTR(clReleaseMemObject, handle);
LOAD_FUNCTION_PTR(clGetDeviceInfo, handle);
LOAD_FUNCTION_PTR(clGetDeviceIDs, handle);
+ LOAD_FUNCTION_PTR(clGetMemObjectInfo, handle);
LOAD_FUNCTION_PTR(clRetainEvent, handle);
LOAD_FUNCTION_PTR(clGetPlatformIDs, handle);
LOAD_FUNCTION_PTR(clGetKernelWorkGroupInfo, handle);
@@ -796,6 +797,24 @@ cl_int clGetDeviceInfo(cl_device_id device,
}
}
+cl_int clGetMemObjectInfo(cl_mem memobj,
+ cl_mem_info param_name,
+ size_t param_value_size,
+ void *param_value,
+ size_t *param_value_size_ret)
+{
+ arm_compute::CLSymbols::get().load_default();
+ auto func = arm_compute::CLSymbols::get().clGetMemObjectInfo_ptr;
+ if(func != nullptr)
+ {
+ return func(memobj, param_name, param_value_size, param_value, param_value_size_ret);
+ }
+ else
+ {
+ return CL_OUT_OF_RESOURCES;
+ }
+}
+
cl_int clRetainEvent(cl_event event)
{
arm_compute::CLSymbols::get().load_default();
diff --git a/src/runtime/BlobMemoryPool.cpp b/src/runtime/BlobMemoryPool.cpp
index 29505e57fc..e09451cd62 100644
--- a/src/runtime/BlobMemoryPool.cpp
+++ b/src/runtime/BlobMemoryPool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,7 +52,7 @@ void BlobMemoryPool::acquire(MemoryMappings &handles)
for(auto &handle : handles)
{
ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
- *handle.first = _blobs[handle.second];
+ handle.first->set_region(_blobs[handle.second].get());
}
}
@@ -61,7 +61,7 @@ void BlobMemoryPool::release(MemoryMappings &handles)
for(auto &handle : handles)
{
ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
- *handle.first = nullptr;
+ handle.first->set_region(nullptr);
}
}
@@ -82,17 +82,11 @@ void BlobMemoryPool::allocate_blobs(const std::vector<size_t> &sizes)
for(const auto &size : sizes)
{
- _blobs.push_back(_allocator->allocate(size, 0));
+ _blobs.push_back(_allocator->make_region(size, 0));
}
}
void BlobMemoryPool::free_blobs()
{
- ARM_COMPUTE_ERROR_ON(!_allocator);
-
- for(auto &blob : _blobs)
- {
- _allocator->free(blob);
- }
_blobs.clear();
} \ No newline at end of file
diff --git a/src/runtime/CL/CLMemory.cpp b/src/runtime/CL/CLMemory.cpp
index bbc513d783..5bea85cfae 100644
--- a/src/runtime/CL/CLMemory.cpp
+++ b/src/runtime/CL/CLMemory.cpp
@@ -24,23 +24,20 @@
#include "arm_compute/runtime/CL/CLMemory.h"
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/utils/misc/Cast.h"
namespace arm_compute
{
CLMemory::CLMemory()
: _region(nullptr), _region_owned(nullptr)
{
- create_empty_region();
}
CLMemory::CLMemory(std::shared_ptr<ICLMemoryRegion> memory)
: _region(nullptr), _region_owned(std::move(memory))
{
- if(_region_owned == nullptr)
- {
- create_empty_region();
- }
- _region = _region_owned.get();
+ _region_owned = memory;
+ _region = _region_owned.get();
}
CLMemory::CLMemory(ICLMemoryRegion *memory)
@@ -49,19 +46,36 @@ CLMemory::CLMemory(ICLMemoryRegion *memory)
_region = memory;
}
-ICLMemoryRegion *CLMemory::region()
+ICLMemoryRegion *CLMemory::cl_region()
+{
+ return _region;
+}
+
+ICLMemoryRegion *CLMemory::cl_region() const
+{
+ return _region;
+}
+
+IMemoryRegion *CLMemory::region()
{
return _region;
}
-ICLMemoryRegion *CLMemory::region() const
+IMemoryRegion *CLMemory::region() const
{
return _region;
}
-void CLMemory::create_empty_region()
+void CLMemory::set_region(IMemoryRegion *region)
+{
+ auto cl_region = utils::cast::polymorphic_downcast<ICLMemoryRegion *>(region);
+ _region_owned = nullptr;
+ _region = cl_region;
+}
+
+void CLMemory::set_owned_region(std::unique_ptr<IMemoryRegion> region)
{
- _region_owned = std::make_shared<CLBufferMemoryRegion>(cl::Context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, 0);
+ _region_owned = utils::cast::polymorphic_downcast_unique_ptr<ICLMemoryRegion>(std::move(region));
_region = _region_owned.get();
}
} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/CLMemoryRegion.cpp b/src/runtime/CL/CLMemoryRegion.cpp
index 15fd7f333e..9578d73934 100644
--- a/src/runtime/CL/CLMemoryRegion.cpp
+++ b/src/runtime/CL/CLMemoryRegion.cpp
@@ -48,9 +48,10 @@ void *ICLMemoryRegion::buffer() const
return _mapping;
}
-void **ICLMemoryRegion::handle()
+std::unique_ptr<IMemoryRegion> ICLMemoryRegion::extract_subregion(size_t offset, size_t size)
{
- return reinterpret_cast<void **>(&_mem);
+ ARM_COMPUTE_UNUSED(offset, size);
+ return nullptr;
}
CLBufferMemoryRegion::CLBufferMemoryRegion(cl::Context ctx, cl_mem_flags flags, size_t size)
@@ -62,6 +63,12 @@ CLBufferMemoryRegion::CLBufferMemoryRegion(cl::Context ctx, cl_mem_flags flags,
}
}
+CLBufferMemoryRegion::CLBufferMemoryRegion(const cl::Buffer &buffer)
+ : ICLMemoryRegion(buffer.getInfo<CL_MEM_CONTEXT>(), buffer.getInfo<CL_MEM_SIZE>())
+{
+ _mem = buffer;
+}
+
void *CLBufferMemoryRegion::ptr()
{
return nullptr;
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index dd716f77ff..0307498335 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -28,86 +28,87 @@
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
-using namespace arm_compute;
+namespace arm_compute
+{
+const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer();
namespace
{
-std::shared_ptr<arm_compute::ICLMemoryRegion> allocate_region(cl::Context context, size_t size, cl_uint alignment)
+std::unique_ptr<ICLMemoryRegion> allocate_region(cl::Context context, size_t size, cl_uint alignment)
{
// Try fine-grain SVM
- std::shared_ptr<ICLMemoryRegion> region = std::make_shared<CLFineSVMMemoryRegion>(context, CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER, size, alignment);
+ std::unique_ptr<ICLMemoryRegion> region = support::cpp14::make_unique<CLFineSVMMemoryRegion>(context,
+ CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER,
+ size,
+ alignment);
// Try coarse-grain SVM in case of failure
if(region != nullptr && region->ptr() == nullptr)
{
- region = std::make_shared<CLCoarseSVMMemoryRegion>(context, CL_MEM_READ_WRITE, size, alignment);
+ region = support::cpp14::make_unique<CLCoarseSVMMemoryRegion>(context, CL_MEM_READ_WRITE, size, alignment);
}
// Try legacy buffer memory in case of failure
if(region != nullptr && region->ptr() == nullptr)
{
- region = std::make_shared<CLBufferMemoryRegion>(context, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
+ region = support::cpp14::make_unique<CLBufferMemoryRegion>(context, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
}
return region;
}
} // namespace
CLTensorAllocator::CLTensorAllocator(CLTensor *owner)
- : _associated_memory_group(nullptr), _memory(), _owner(owner)
+ : _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _owner(owner)
{
}
uint8_t *CLTensorAllocator::data()
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
+ return _mapping;
}
const cl::Buffer &CLTensorAllocator::cl_data() const
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- return _memory.region()->cl_data();
+ return _memory.region() == nullptr ? _empty_buffer : _memory.cl_region()->cl_data();
}
void CLTensorAllocator::allocate()
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
-
if(_associated_memory_group == nullptr)
{
- if(_memory.region()->cl_data().get() != nullptr)
+ if(_memory.region() != nullptr && _memory.cl_region()->cl_data().get() != nullptr)
{
// Memory is already allocated. Reuse it if big enough, otherwise fire an assertion
- ARM_COMPUTE_ERROR_ON_MSG(info().total_size() > _memory.region()->size(), "Reallocation of a bigger memory region is not allowed!");
+ ARM_COMPUTE_ERROR_ON_MSG(info().total_size() > _memory.region()->size(),
+ "Reallocation of a bigger memory region is not allowed!");
}
else
{
// Perform memory allocation
- _memory = CLMemory(allocate_region(CLScheduler::get().context(), info().total_size(), 0));
+ _memory.set_owned_region(allocate_region(CLScheduler::get().context(), info().total_size(), 0));
}
}
else
{
- _associated_memory_group->finalize_memory(_owner, _memory.region()->handle(), info().total_size());
- _memory.region()->set_size(info().total_size());
+ _associated_memory_group->finalize_memory(_owner, _memory, info().total_size());
}
info().set_is_resizable(false);
}
void CLTensorAllocator::free()
{
- if(_associated_memory_group == nullptr)
- {
- _memory = CLMemory();
- info().set_is_resizable(true);
- }
+ _mapping = nullptr;
+ _memory.set_region(nullptr);
+ info().set_is_resizable(true);
}
-arm_compute::Status CLTensorAllocator::import_memory(CLMemory memory)
+arm_compute::Status CLTensorAllocator::import_memory(cl::Buffer buffer)
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- ARM_COMPUTE_RETURN_ERROR_ON(memory.region()->cl_data().get() == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(buffer.get() == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_SIZE>() == 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_CONTEXT>().get() != CLScheduler::get().context().get());
ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
- _memory = memory;
+
+ _memory.set_owned_region(support::cpp14::make_unique<CLBufferMemoryRegion>(buffer));
info().set_is_resizable(false);
return Status{};
@@ -115,11 +116,10 @@ arm_compute::Status CLTensorAllocator::import_memory(CLMemory memory)
void CLTensorAllocator::set_associated_memory_group(CLMemoryGroup *associated_memory_group)
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
- ARM_COMPUTE_ERROR_ON(_memory.region()->cl_data().get() != nullptr);
- _memory = CLMemory(std::make_shared<CLBufferMemoryRegion>(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, 0));
+ ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.cl_region()->cl_data().get() != nullptr);
+
_associated_memory_group = associated_memory_group;
}
@@ -136,16 +136,23 @@ void CLTensorAllocator::unlock()
uint8_t *CLTensorAllocator::map(cl::CommandQueue &q, bool blocking)
{
+ ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
- _memory.region()->map(q, blocking);
- return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
+
+ _mapping = reinterpret_cast<uint8_t *>(_memory.cl_region()->map(q, blocking));
+ return _mapping;
}
void CLTensorAllocator::unmap(cl::CommandQueue &q, uint8_t *mapping)
{
- ARM_COMPUTE_UNUSED(mapping);
+ ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
+ ARM_COMPUTE_ERROR_ON(_mapping != mapping);
ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() == nullptr);
- _memory.region()->unmap(q);
+ ARM_COMPUTE_UNUSED(mapping);
+
+ _memory.cl_region()->unmap(q);
+ _mapping = nullptr;
}
+} // namespace arm_compute
diff --git a/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp b/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
index cdd12c3ad5..70a1f4f8ff 100644
--- a/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
+++ b/src/runtime/GLES_COMPUTE/GCBufferAllocator.cpp
@@ -22,10 +22,10 @@
* SOFTWARE.
*/
#include "arm_compute/runtime/GLES_COMPUTE/GCBufferAllocator.h"
-#include "arm_compute/runtime/GLES_COMPUTE/GCTensorAllocator.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
#include <cstddef>
@@ -34,24 +34,26 @@ namespace arm_compute
void *GCBufferAllocator::allocate(size_t size, size_t alignment)
{
ARM_COMPUTE_UNUSED(alignment);
- auto *gl_buffer = new GLBufferWrapper();
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, gl_buffer->_ssbo_name));
+
+ auto *gl_ssbo_name = new GLuint;
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, *gl_ssbo_name));
ARM_COMPUTE_GL_CHECK(glBufferData(GL_SHADER_STORAGE_BUFFER, static_cast<GLsizeiptr>(size), nullptr, GL_STATIC_DRAW));
ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
- return reinterpret_cast<void *>(gl_buffer);
+ return reinterpret_cast<void *>(gl_ssbo_name);
}
void GCBufferAllocator::free(void *ptr)
{
ARM_COMPUTE_ERROR_ON(ptr == nullptr);
- auto *gl_buffer = reinterpret_cast<GLBufferWrapper *>(ptr);
- delete gl_buffer;
+ auto *gl_ssbo_name = reinterpret_cast<GLuint *>(ptr);
+ ARM_COMPUTE_GL_CHECK(glDeleteBuffers(1, gl_ssbo_name));
+ delete gl_ssbo_name;
}
std::unique_ptr<IMemoryRegion> GCBufferAllocator::make_region(size_t size, size_t alignment)
{
- ARM_COMPUTE_UNUSED(size, alignment);
- return nullptr;
+ ARM_COMPUTE_UNUSED(alignment);
+ return arm_compute::support::cpp14::make_unique<GCBufferMemoryRegion>(size);
}
} // namespace arm_compute
diff --git a/src/runtime/GLES_COMPUTE/GCMemory.cpp b/src/runtime/GLES_COMPUTE/GCMemory.cpp
new file mode 100644
index 0000000000..fed4a158a3
--- /dev/null
+++ b/src/runtime/GLES_COMPUTE/GCMemory.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemory.h"
+
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
+
+namespace arm_compute
+{
+GCMemory::GCMemory()
+ : _region(nullptr), _region_owned(nullptr)
+{
+}
+
+GCMemory::GCMemory(std::shared_ptr<IGCMemoryRegion> memory)
+ : _region(nullptr), _region_owned(std::move(memory))
+{
+ _region_owned = memory;
+ _region = _region_owned.get();
+}
+
+GCMemory::GCMemory(IGCMemoryRegion *memory)
+ : _region(memory), _region_owned(nullptr)
+{
+ _region = memory;
+}
+
+IGCMemoryRegion *GCMemory::gc_region()
+{
+ return _region;
+}
+
+IGCMemoryRegion *GCMemory::gc_region() const
+{
+ return _region;
+}
+
+IMemoryRegion *GCMemory::region()
+{
+ return _region;
+}
+
+IMemoryRegion *GCMemory::region() const
+{
+ return _region;
+}
+
+void GCMemory::set_region(IMemoryRegion *region)
+{
+ auto gc_region = utils::cast::polymorphic_downcast<IGCMemoryRegion *>(region);
+ _region_owned = nullptr;
+ _region = gc_region;
+}
+
+void GCMemory::set_owned_region(std::unique_ptr<IMemoryRegion> region)
+{
+ _region_owned = utils::cast::polymorphic_downcast_unique_ptr<IGCMemoryRegion>(std::move(region));
+ _region = _region_owned.get();
+}
+} // namespace arm_compute
diff --git a/src/runtime/GLES_COMPUTE/GCMemoryRegion.cpp b/src/runtime/GLES_COMPUTE/GCMemoryRegion.cpp
new file mode 100644
index 0000000000..45fd6e8944
--- /dev/null
+++ b/src/runtime/GLES_COMPUTE/GCMemoryRegion.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
+
+#include "arm_compute/core/Error.h"
+
+namespace arm_compute
+{
+IGCMemoryRegion::IGCMemoryRegion(size_t size)
+ : IMemoryRegion(size), _mapping(nullptr), _ssbo_name(0)
+{
+}
+
+const GLuint &IGCMemoryRegion::gc_ssbo_name() const
+{
+ return _ssbo_name;
+}
+
+void *IGCMemoryRegion::buffer()
+{
+ return _mapping;
+}
+
+void *IGCMemoryRegion::buffer() const
+{
+ return _mapping;
+}
+
+GCBufferMemoryRegion::GCBufferMemoryRegion(size_t size)
+ : IGCMemoryRegion(size)
+{
+ ARM_COMPUTE_GL_CHECK(glGenBuffers(1, &_ssbo_name));
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _ssbo_name));
+ ARM_COMPUTE_GL_CHECK(glBufferData(GL_SHADER_STORAGE_BUFFER, static_cast<GLsizeiptr>(size), nullptr, GL_STATIC_DRAW));
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
+}
+
+GCBufferMemoryRegion::~GCBufferMemoryRegion()
+{
+ ARM_COMPUTE_GL_CHECK(glDeleteBuffers(1, &_ssbo_name));
+}
+
+void *GCBufferMemoryRegion::ptr()
+{
+ return nullptr;
+}
+
+void *GCBufferMemoryRegion::map(bool blocking)
+{
+ ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
+ ARM_COMPUTE_UNUSED(blocking);
+
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _ssbo_name));
+ void *p = ARM_COMPUTE_GL_CHECK(glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, static_cast<GLsizeiptr>(size()), GL_MAP_READ_BIT | GL_MAP_WRITE_BIT));
+ _mapping = reinterpret_cast<uint8_t *>(p);
+
+ return _mapping;
+}
+
+void GCBufferMemoryRegion::unmap()
+{
+ ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
+
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _ssbo_name));
+ ARM_COMPUTE_GL_CHECK(glUnmapBuffer(GL_SHADER_STORAGE_BUFFER));
+ ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
+ _mapping = nullptr;
+}
+
+std::unique_ptr<IMemoryRegion> GCBufferMemoryRegion::extract_subregion(size_t offset, size_t size)
+{
+ ARM_COMPUTE_UNUSED(offset, size);
+ return nullptr;
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp b/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
index abd2b483d3..a0dd540a7c 100644
--- a/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
+++ b/src/runtime/GLES_COMPUTE/GCTensorAllocator.cpp
@@ -26,21 +26,17 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryRegion.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
#include "support/ToolchainSupport.h"
using namespace arm_compute;
GCTensorAllocator::GCTensorAllocator(GCTensor *owner)
- : _associated_memory_group(nullptr), _gl_buffer(), _mapping(nullptr), _owner(owner)
+ : _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _owner(owner)
{
}
-GCTensorAllocator::~GCTensorAllocator()
-{
- _gl_buffer = support::cpp14::make_unique<GLBufferWrapper>();
-}
-
uint8_t *GCTensorAllocator::data()
{
return _mapping;
@@ -50,32 +46,28 @@ void GCTensorAllocator::allocate()
{
if(_associated_memory_group == nullptr)
{
- _gl_buffer = support::cpp14::make_unique<GLBufferWrapper>();
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _gl_buffer->_ssbo_name));
- ARM_COMPUTE_GL_CHECK(glBufferData(GL_SHADER_STORAGE_BUFFER, static_cast<GLsizeiptr>(info().total_size()), nullptr, GL_STATIC_DRAW));
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
+ _memory.set_owned_region(support::cpp14::make_unique<GCBufferMemoryRegion>(info().total_size()));
}
else
{
- _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(&_gl_buffer), info().total_size());
+ _associated_memory_group->finalize_memory(_owner, _memory, info().total_size());
}
info().set_is_resizable(false);
}
void GCTensorAllocator::free()
{
- if(_associated_memory_group == nullptr)
- {
- _gl_buffer.reset();
- info().set_is_resizable(true);
- }
+ _mapping = nullptr;
+ _memory.set_region(nullptr);
+ info().set_is_resizable(true);
}
void GCTensorAllocator::set_associated_memory_group(GCMemoryGroup *associated_memory_group)
{
ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
- ARM_COMPUTE_ERROR_ON(_gl_buffer.get() != nullptr);
+ ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.gc_region()->gc_ssbo_name() != 0);
+
_associated_memory_group = associated_memory_group;
}
@@ -91,27 +83,23 @@ void GCTensorAllocator::unlock()
GLuint GCTensorAllocator::get_gl_ssbo_name() const
{
- return _gl_buffer->_ssbo_name;
+ return (_memory.region() == nullptr) ? static_cast<GLuint>(0) : _memory.gc_region()->gc_ssbo_name();
}
uint8_t *GCTensorAllocator::map(bool blocking)
{
ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
- ARM_COMPUTE_UNUSED(blocking);
-
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _gl_buffer->_ssbo_name));
- void *p = ARM_COMPUTE_GL_CHECK(glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, static_cast<GLsizeiptr>(info().total_size()), GL_MAP_READ_BIT | GL_MAP_WRITE_BIT));
- _mapping = reinterpret_cast<uint8_t *>(p);
+ ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
+ _mapping = reinterpret_cast<uint8_t *>(_memory.gc_region()->map(blocking));
return _mapping;
}
void GCTensorAllocator::unmap()
{
ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
+ ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, _gl_buffer->_ssbo_name));
- ARM_COMPUTE_GL_CHECK(glUnmapBuffer(GL_SHADER_STORAGE_BUFFER));
- ARM_COMPUTE_GL_CHECK(glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0));
+ _memory.gc_region()->unmap();
_mapping = nullptr;
} \ No newline at end of file
diff --git a/src/runtime/ISimpleLifetimeManager.cpp b/src/runtime/ISimpleLifetimeManager.cpp
index faaff8a63e..7d928d6a7a 100644
--- a/src/runtime/ISimpleLifetimeManager.cpp
+++ b/src/runtime/ISimpleLifetimeManager.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/IAllocator.h"
+#include "arm_compute/runtime/IMemory.h"
#include "arm_compute/runtime/IMemoryGroup.h"
#include "arm_compute/runtime/IMemoryPool.h"
#include "support/ToolchainSupport.h"
@@ -70,7 +71,7 @@ void ISimpleLifetimeManager::start_lifetime(void *obj)
_active_elements.insert(std::make_pair(obj, obj));
}
-void ISimpleLifetimeManager::end_lifetime(void *obj, void **handle, size_t size)
+void ISimpleLifetimeManager::end_lifetime(void *obj, IMemory &obj_memory, size_t size)
{
ARM_COMPUTE_ERROR_ON(obj == nullptr);
@@ -80,7 +81,7 @@ void ISimpleLifetimeManager::end_lifetime(void *obj, void **handle, size_t size)
// Update object fields and mark object as complete
Element &el = active_object_it->second;
- el.handle = handle;
+ el.handle = &obj_memory;
el.size = size;
el.status = true;
diff --git a/src/runtime/Memory.cpp b/src/runtime/Memory.cpp
index 15bbb17675..d116624679 100644
--- a/src/runtime/Memory.cpp
+++ b/src/runtime/Memory.cpp
@@ -30,17 +30,13 @@ namespace arm_compute
Memory::Memory()
: _region(nullptr), _region_owned(nullptr)
{
- create_empty_region();
}
Memory::Memory(std::shared_ptr<IMemoryRegion> memory)
: _region(nullptr), _region_owned(std::move(memory))
{
- if(_region_owned == nullptr)
- {
- create_empty_region();
- }
- _region = _region_owned.get();
+ _region_owned = memory;
+ _region = _region_owned.get();
}
Memory::Memory(IMemoryRegion *memory)
@@ -59,9 +55,15 @@ IMemoryRegion *Memory::region() const
return _region;
}
-void Memory::create_empty_region()
+void Memory::set_region(IMemoryRegion *region)
+{
+ _region_owned = nullptr;
+ _region = region;
+}
+
+void Memory::set_owned_region(std::unique_ptr<IMemoryRegion> region)
{
- _region_owned = std::make_shared<MemoryRegion>(0);
+ _region_owned = std::move(region);
_region = _region_owned.get();
}
} // namespace arm_compute
diff --git a/src/runtime/OffsetMemoryPool.cpp b/src/runtime/OffsetMemoryPool.cpp
index 96f54f890f..36eaf0ba1a 100644
--- a/src/runtime/OffsetMemoryPool.cpp
+++ b/src/runtime/OffsetMemoryPool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,6 +28,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/IAllocator.h"
#include "arm_compute/runtime/IMemoryPool.h"
+#include "arm_compute/runtime/MemoryRegion.h"
#include "arm_compute/runtime/Types.h"
#include "support/ToolchainSupport.h"
@@ -37,14 +38,7 @@ OffsetMemoryPool::OffsetMemoryPool(IAllocator *allocator, size_t blob_size)
: _allocator(allocator), _blob(), _blob_size(blob_size)
{
ARM_COMPUTE_ERROR_ON(!allocator);
- _blob = _allocator->allocate(_blob_size, 0);
-}
-
-OffsetMemoryPool::~OffsetMemoryPool()
-{
- ARM_COMPUTE_ERROR_ON(!_allocator);
- _allocator->free(_blob);
- _blob = nullptr;
+ _blob = _allocator->make_region(blob_size, 0);
}
void OffsetMemoryPool::acquire(MemoryMappings &handles)
@@ -55,7 +49,7 @@ void OffsetMemoryPool::acquire(MemoryMappings &handles)
for(auto &handle : handles)
{
ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
- *handle.first = reinterpret_cast<uint8_t *>(_blob) + handle.second;
+ handle.first->set_owned_region(_blob->extract_subregion(handle.second, _blob_size - handle.second));
}
}
@@ -64,7 +58,7 @@ void OffsetMemoryPool::release(MemoryMappings &handles)
for(auto &handle : handles)
{
ARM_COMPUTE_ERROR_ON(handle.first == nullptr);
- *handle.first = nullptr;
+ handle.first->set_region(nullptr);
}
}
diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp
index c84a2719d8..5fa51d7140 100644
--- a/src/runtime/TensorAllocator.cpp
+++ b/src/runtime/TensorAllocator.cpp
@@ -127,39 +127,35 @@ void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &
uint8_t *TensorAllocator::data() const
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- return reinterpret_cast<uint8_t *>(_memory.region()->buffer());
+ return (_memory.region() == nullptr) ? nullptr : reinterpret_cast<uint8_t *>(_memory.region()->buffer());
}
void TensorAllocator::allocate()
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
-
if(_associated_memory_group == nullptr)
{
- _memory = Memory(std::make_shared<MemoryRegion>(info().total_size(), alignment()));
+ _memory.set_owned_region(support::cpp14::make_unique<MemoryRegion>(info().total_size(), alignment()));
}
else
{
- _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(_memory.region()->handle()), info().total_size());
- _memory.region()->set_size(info().total_size());
+ _associated_memory_group->finalize_memory(_owner, _memory, info().total_size());
}
info().set_is_resizable(false);
}
void TensorAllocator::free()
{
- _memory = Memory();
+ _memory.set_region(nullptr);
info().set_is_resizable(true);
}
-arm_compute::Status TensorAllocator::import_memory(Memory memory)
+arm_compute::Status TensorAllocator::import_memory(void *memory, size_t size)
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
- ARM_COMPUTE_RETURN_ERROR_ON(memory.region()->buffer() == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(memory == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON(size == 0);
ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
- _memory = memory;
+
+ _memory.set_owned_region(support::cpp14::make_unique<MemoryRegion>(memory, info().total_size()));
info().set_is_resizable(false);
return Status{};
@@ -167,10 +163,10 @@ arm_compute::Status TensorAllocator::import_memory(Memory memory)
void TensorAllocator::set_associated_memory_group(MemoryGroup *associated_memory_group)
{
- ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
- ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
+ ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.region()->buffer() != nullptr);
+
_associated_memory_group = associated_memory_group;
}