aboutsummaryrefslogtreecommitdiff
path: root/src/graph/backends
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-05-03 20:47:16 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:51:50 +0000
commit3d1489de593574e65ef1e64a7ae64e4e56c2978b (patch)
treef87f3df521cb5ed8bd383dad89cbeb92c49670ac /src/graph/backends
parent54d6fae4dbb4f556cc5ec484c51681ad84c015a7 (diff)
downloadComputeLibrary-3d1489de593574e65ef1e64a7ae64e4e56c2978b.tar.gz
COMPMID-605: Transition buffer memory manager
Change-Id: Ide7c6124eb19f13f15f517e62d705646a0cd1ecd Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/130184 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/graph/backends')
-rw-r--r--src/graph/backends/CL/CLDeviceBackend.cpp12
-rw-r--r--src/graph/backends/CL/CLSubTensorHandle.cpp33
-rw-r--r--src/graph/backends/CL/CLTensorHandle.cpp35
-rw-r--r--src/graph/backends/GLES/GCDeviceBackend.cpp12
-rw-r--r--src/graph/backends/GLES/GCTensorHandle.cpp35
-rw-r--r--src/graph/backends/NEON/NEDeviceBackend.cpp12
-rw-r--r--src/graph/backends/NEON/NESubTensorHandle.cpp35
-rw-r--r--src/graph/backends/NEON/NETensorHandle.cpp35
8 files changed, 180 insertions, 29 deletions
diff --git a/src/graph/backends/CL/CLDeviceBackend.cpp b/src/graph/backends/CL/CLDeviceBackend.cpp
index 37cbcd72d7..7f2be674f6 100644
--- a/src/graph/backends/CL/CLDeviceBackend.cpp
+++ b/src/graph/backends/CL/CLDeviceBackend.cpp
@@ -37,6 +37,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/BlobLifetimeManager.h"
#include "arm_compute/runtime/CL/CLBufferAllocator.h"
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/MemoryManagerOnDemand.h"
#include "arm_compute/runtime/PoolManager.h"
@@ -107,8 +108,10 @@ void CLDeviceBackend::setup_backend_context(GraphContext &ctx)
if(ctx.memory_management_ctx(Target::CL) == nullptr)
{
MemoryManagerContext mm_ctx;
- mm_ctx.target = Target::CL;
- mm_ctx.mm = create_memory_manager(MemoryManagerAffinity::Buffer);
+ mm_ctx.target = Target::CL;
+ mm_ctx.intra_mm = create_memory_manager(MemoryManagerAffinity::Buffer);
+ mm_ctx.cross_mm = create_memory_manager(MemoryManagerAffinity::Buffer);
+ mm_ctx.cross_group = std::make_shared<CLMemoryGroup>(mm_ctx.cross_mm);
ctx.insert_memory_management_ctx(std::move(mm_ctx));
}
@@ -119,6 +122,11 @@ bool CLDeviceBackend::is_backend_supported()
return arm_compute::opencl_is_available();
}
+IAllocator *CLDeviceBackend::backend_allocator()
+{
+ return &_allocator;
+}
+
std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tensor)
{
// Get tensor descriptor
diff --git a/src/graph/backends/CL/CLSubTensorHandle.cpp b/src/graph/backends/CL/CLSubTensorHandle.cpp
index a1bc8a1dd3..016dca753b 100644
--- a/src/graph/backends/CL/CLSubTensorHandle.cpp
+++ b/src/graph/backends/CL/CLSubTensorHandle.cpp
@@ -32,11 +32,12 @@ namespace graph
namespace backends
{
CLSubTensorHandle::CLSubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords, bool extend_parent)
- : _sub_tensor()
+ : _sub_tensor(), _parent_handle(nullptr)
{
ARM_COMPUTE_ERROR_ON(!parent_handle);
auto parent_tensor = arm_compute::utils::cast::polymorphic_downcast<ICLTensor *>(&parent_handle->tensor());
_sub_tensor = arm_compute::CLSubTensor(parent_tensor, shape, coords, extend_parent);
+ _parent_handle = parent_handle;
}
void CLSubTensorHandle::allocate()
@@ -44,14 +45,15 @@ void CLSubTensorHandle::allocate()
// noop
}
-const arm_compute::ITensor &CLSubTensorHandle::tensor() const
+void CLSubTensorHandle::free()
{
- return _sub_tensor;
+ // noop
}
-arm_compute::ITensor &CLSubTensorHandle::tensor()
+void CLSubTensorHandle::manage(IMemoryGroup *mg)
{
- return _sub_tensor;
+ ARM_COMPUTE_UNUSED(mg);
+ // noop
}
void CLSubTensorHandle::map(bool blocking)
@@ -69,10 +71,31 @@ void CLSubTensorHandle::release_if_unused()
// noop
}
+const arm_compute::ITensor &CLSubTensorHandle::tensor() const
+{
+ return _sub_tensor;
+}
+
+arm_compute::ITensor &CLSubTensorHandle::tensor()
+{
+ return _sub_tensor;
+}
+
+ITensorHandle *CLSubTensorHandle::parent_handle()
+{
+ ARM_COMPUTE_ERROR_ON(_parent_handle == nullptr);
+ return _parent_handle->parent_handle();
+}
+
bool CLSubTensorHandle::is_subtensor() const
{
return true;
}
+
+Target CLSubTensorHandle::target() const
+{
+ return Target::CL;
+}
} // namespace backends
} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/backends/CL/CLTensorHandle.cpp b/src/graph/backends/CL/CLTensorHandle.cpp
index 563c4d9ac6..219d9d0301 100644
--- a/src/graph/backends/CL/CLTensorHandle.cpp
+++ b/src/graph/backends/CL/CLTensorHandle.cpp
@@ -23,6 +23,9 @@
*/
#include "arm_compute/graph/backends/CL/CLTensorHandle.h"
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
+
namespace arm_compute
{
namespace graph
@@ -40,14 +43,18 @@ void CLTensorHandle::allocate()
_tensor.allocator()->allocate();
}
-const arm_compute::ITensor &CLTensorHandle::tensor() const
+void CLTensorHandle::free()
{
- return _tensor;
+ _tensor.allocator()->free();
}
-arm_compute::ITensor &CLTensorHandle::tensor()
+void CLTensorHandle::manage(IMemoryGroup *mg)
{
- return _tensor;
+ if(mg != nullptr)
+ {
+ auto *cl_mg = arm_compute::utils::cast::polymorphic_downcast<CLMemoryGroup *>(mg);
+ cl_mg->manage(&_tensor);
+ }
}
void CLTensorHandle::map(bool blocking)
@@ -69,10 +76,30 @@ void CLTensorHandle::release_if_unused()
}
}
+const arm_compute::ITensor &CLTensorHandle::tensor() const
+{
+ return _tensor;
+}
+
+arm_compute::ITensor &CLTensorHandle::tensor()
+{
+ return _tensor;
+}
+
+ITensorHandle *CLTensorHandle::parent_handle()
+{
+ return this;
+}
+
bool CLTensorHandle::is_subtensor() const
{
return false;
}
+
+Target CLTensorHandle::target() const
+{
+ return Target::CL;
+}
} // namespace backends
} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/backends/GLES/GCDeviceBackend.cpp b/src/graph/backends/GLES/GCDeviceBackend.cpp
index 0185598965..770cca5d42 100644
--- a/src/graph/backends/GLES/GCDeviceBackend.cpp
+++ b/src/graph/backends/GLES/GCDeviceBackend.cpp
@@ -36,6 +36,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/BlobLifetimeManager.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCBufferAllocator.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryGroup.h"
#include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h"
#include "arm_compute/runtime/MemoryManagerOnDemand.h"
#include "arm_compute/runtime/PoolManager.h"
@@ -68,8 +69,10 @@ void GCDeviceBackend::setup_backend_context(GraphContext &ctx)
if(ctx.memory_management_ctx(Target::GC) == nullptr)
{
MemoryManagerContext mm_ctx;
- mm_ctx.target = Target::GC;
- mm_ctx.mm = create_memory_manager(MemoryManagerAffinity::Buffer);
+ mm_ctx.target = Target::GC;
+ mm_ctx.intra_mm = create_memory_manager(MemoryManagerAffinity::Buffer);
+ mm_ctx.cross_mm = create_memory_manager(MemoryManagerAffinity::Buffer);
+ mm_ctx.cross_group = std::make_shared<GCMemoryGroup>(mm_ctx.cross_mm);
ctx.insert_memory_management_ctx(std::move(mm_ctx));
}
@@ -80,6 +83,11 @@ bool GCDeviceBackend::is_backend_supported()
return arm_compute::opengles31_is_available();
}
+IAllocator *GCDeviceBackend::backend_allocator()
+{
+ return &_allocator;
+}
+
std::unique_ptr<ITensorHandle> GCDeviceBackend::create_tensor(const Tensor &tensor)
{
// Get tensor descriptor
diff --git a/src/graph/backends/GLES/GCTensorHandle.cpp b/src/graph/backends/GLES/GCTensorHandle.cpp
index ae7c778130..4e5c652120 100644
--- a/src/graph/backends/GLES/GCTensorHandle.cpp
+++ b/src/graph/backends/GLES/GCTensorHandle.cpp
@@ -23,6 +23,9 @@
*/
#include "arm_compute/graph/backends/GLES/GCTensorHandle.h"
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/runtime/GLES_COMPUTE/GCMemoryGroup.h"
+
namespace arm_compute
{
namespace graph
@@ -40,14 +43,18 @@ void GCTensorHandle::allocate()
_tensor.allocator()->allocate();
}
-const arm_compute::ITensor &GCTensorHandle::tensor() const
+void GCTensorHandle::free()
{
- return _tensor;
+ _tensor.allocator()->free();
}
-arm_compute::ITensor &GCTensorHandle::tensor()
+void GCTensorHandle::manage(IMemoryGroup *mg)
{
- return _tensor;
+ if(mg != nullptr)
+ {
+ auto *gc_mg = arm_compute::utils::cast::polymorphic_downcast<GCMemoryGroup *>(mg);
+ gc_mg->manage(&_tensor);
+ }
}
void GCTensorHandle::map(bool blocking)
@@ -69,10 +76,30 @@ void GCTensorHandle::release_if_unused()
}
}
+const arm_compute::ITensor &GCTensorHandle::tensor() const
+{
+ return _tensor;
+}
+
+arm_compute::ITensor &GCTensorHandle::tensor()
+{
+ return _tensor;
+}
+
+ITensorHandle *GCTensorHandle::parent_handle()
+{
+ return this;
+}
+
bool GCTensorHandle::is_subtensor() const
{
return false;
}
+
+Target GCTensorHandle::target() const
+{
+ return Target::GC;
+}
} // namespace backends
} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/backends/NEON/NEDeviceBackend.cpp b/src/graph/backends/NEON/NEDeviceBackend.cpp
index def6c39003..7c2db40260 100644
--- a/src/graph/backends/NEON/NEDeviceBackend.cpp
+++ b/src/graph/backends/NEON/NEDeviceBackend.cpp
@@ -37,6 +37,7 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/runtime/Allocator.h"
#include "arm_compute/runtime/BlobLifetimeManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
#include "arm_compute/runtime/MemoryManagerOnDemand.h"
#include "arm_compute/runtime/OffsetLifetimeManager.h"
#include "arm_compute/runtime/PoolManager.h"
@@ -74,8 +75,10 @@ void NEDeviceBackend::setup_backend_context(GraphContext &ctx)
if(ctx.memory_management_ctx(Target::NEON) == nullptr)
{
MemoryManagerContext mm_ctx;
- mm_ctx.target = Target::NEON;
- mm_ctx.mm = create_memory_manager(MemoryManagerAffinity::Buffer);
+ mm_ctx.target = Target::NEON;
+ mm_ctx.intra_mm = create_memory_manager(MemoryManagerAffinity::Offset);
+ mm_ctx.cross_mm = create_memory_manager(MemoryManagerAffinity::Offset);
+ mm_ctx.cross_group = std::make_shared<MemoryGroup>(mm_ctx.cross_mm);
ctx.insert_memory_management_ctx(std::move(mm_ctx));
}
@@ -86,6 +89,11 @@ bool NEDeviceBackend::is_backend_supported()
return true;
}
+IAllocator *NEDeviceBackend::backend_allocator()
+{
+ return &_allocator;
+}
+
std::unique_ptr<ITensorHandle> NEDeviceBackend::create_tensor(const Tensor &tensor)
{
// Get tensor descriptor
diff --git a/src/graph/backends/NEON/NESubTensorHandle.cpp b/src/graph/backends/NEON/NESubTensorHandle.cpp
index c48ba6b9d6..c0acedd9f2 100644
--- a/src/graph/backends/NEON/NESubTensorHandle.cpp
+++ b/src/graph/backends/NEON/NESubTensorHandle.cpp
@@ -30,10 +30,11 @@ namespace graph
namespace backends
{
NESubTensorHandle::NESubTensorHandle(ITensorHandle *parent_handle, const TensorShape &shape, const Coordinates &coords, bool extend_parent)
- : _sub_tensor()
+ : _sub_tensor(), _parent_handle(nullptr)
{
ARM_COMPUTE_ERROR_ON(!parent_handle);
- _sub_tensor = arm_compute::SubTensor(&parent_handle->tensor(), shape, coords, extend_parent);
+ _sub_tensor = arm_compute::SubTensor(&parent_handle->tensor(), shape, coords, extend_parent);
+ _parent_handle = parent_handle;
}
void NESubTensorHandle::allocate()
@@ -41,14 +42,15 @@ void NESubTensorHandle::allocate()
// noop
}
-const arm_compute::ITensor &NESubTensorHandle::tensor() const
+void NESubTensorHandle::free()
{
- return _sub_tensor;
+ // noop
}
-arm_compute::ITensor &NESubTensorHandle::tensor()
+void NESubTensorHandle::manage(IMemoryGroup *mg)
{
- return _sub_tensor;
+ ARM_COMPUTE_UNUSED(mg);
+ // noop
}
void NESubTensorHandle::map(bool blocking)
@@ -66,10 +68,31 @@ void NESubTensorHandle::release_if_unused()
// noop
}
+const arm_compute::ITensor &NESubTensorHandle::tensor() const
+{
+ return _sub_tensor;
+}
+
+arm_compute::ITensor &NESubTensorHandle::tensor()
+{
+ return _sub_tensor;
+}
+
+ITensorHandle *NESubTensorHandle::parent_handle()
+{
+ ARM_COMPUTE_ERROR_ON(_parent_handle == nullptr);
+ return _parent_handle->parent_handle();
+}
+
bool NESubTensorHandle::is_subtensor() const
{
return true;
}
+
+Target NESubTensorHandle::target() const
+{
+ return Target::NEON;
+}
} // namespace backends
} // namespace graph
} // namespace arm_compute \ No newline at end of file
diff --git a/src/graph/backends/NEON/NETensorHandle.cpp b/src/graph/backends/NEON/NETensorHandle.cpp
index 8508ac9511..5892116caf 100644
--- a/src/graph/backends/NEON/NETensorHandle.cpp
+++ b/src/graph/backends/NEON/NETensorHandle.cpp
@@ -23,6 +23,9 @@
*/
#include "arm_compute/graph/backends/NEON/NETensorHandle.h"
+#include "arm_compute/core/utils/misc/Cast.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+
namespace arm_compute
{
namespace graph
@@ -40,14 +43,18 @@ void NETensorHandle::allocate()
_tensor.allocator()->allocate();
}
-const arm_compute::ITensor &NETensorHandle::tensor() const
+void NETensorHandle::free()
{
- return _tensor;
+ _tensor.allocator()->free();
}
-arm_compute::ITensor &NETensorHandle::tensor()
+void NETensorHandle::manage(IMemoryGroup *mg)
{
- return _tensor;
+ if(mg != nullptr)
+ {
+ auto *ne_mg = arm_compute::utils::cast::polymorphic_downcast<MemoryGroup *>(mg);
+ ne_mg->manage(&_tensor);
+ }
}
void NETensorHandle::map(bool blocking)
@@ -68,10 +75,30 @@ void NETensorHandle::release_if_unused()
}
}
+const arm_compute::ITensor &NETensorHandle::tensor() const
+{
+ return _tensor;
+}
+
+arm_compute::ITensor &NETensorHandle::tensor()
+{
+ return _tensor;
+}
+
+ITensorHandle *NETensorHandle::parent_handle()
+{
+ return this;
+}
+
bool NETensorHandle::is_subtensor() const
{
return false;
}
+
+Target NETensorHandle::target() const
+{
+ return Target::NEON;
+}
} // namespace backends
} // namespace graph
} // namespace arm_compute \ No newline at end of file