aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-05-03 20:47:16 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:51:50 +0000
commit3d1489de593574e65ef1e64a7ae64e4e56c2978b (patch)
treef87f3df521cb5ed8bd383dad89cbeb92c49670ac /arm_compute/graph
parent54d6fae4dbb4f556cc5ec484c51681ad84c015a7 (diff)
downloadComputeLibrary-3d1489de593574e65ef1e64a7ae64e4e56c2978b.tar.gz
COMPMID-605: Transition buffer memory manager
Change-Id: Ide7c6124eb19f13f15f517e62d705646a0cd1ecd Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/130184 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/graph')
-rw-r--r--arm_compute/graph/GraphContext.h11
-rw-r--r--arm_compute/graph/IDeviceBackend.h5
-rw-r--r--arm_compute/graph/ITensorHandle.h29
-rw-r--r--arm_compute/graph/Types.h8
-rw-r--r--arm_compute/graph/Workload.h17
-rw-r--r--arm_compute/graph/backends/CL/CLDeviceBackend.h1
-rw-r--r--arm_compute/graph/backends/CL/CLSubTensorHandle.h21
-rw-r--r--arm_compute/graph/backends/CL/CLTensorHandle.h14
-rw-r--r--arm_compute/graph/backends/GLES/GCDeviceBackend.h1
-rw-r--r--arm_compute/graph/backends/GLES/GCTensorHandle.h14
-rw-r--r--arm_compute/graph/backends/NEON/NEDeviceBackend.h1
-rw-r--r--arm_compute/graph/backends/NEON/NESubTensorHandle.h21
-rw-r--r--arm_compute/graph/backends/NEON/NETensorHandle.h14
-rw-r--r--arm_compute/graph/backends/Utils.h2
-rw-r--r--arm_compute/graph/detail/CrossLayerMemoryManagerHelpers.h52
15 files changed, 165 insertions, 46 deletions
diff --git a/arm_compute/graph/GraphContext.h b/arm_compute/graph/GraphContext.h
index 2f9ab665ce..1831cc2c8b 100644
--- a/arm_compute/graph/GraphContext.h
+++ b/arm_compute/graph/GraphContext.h
@@ -38,8 +38,10 @@ namespace graph
/** Contains structs required for memory management */
struct MemoryManagerContext
{
- Target target = { Target::UNSPECIFIED }; /**< Target */
- std::shared_ptr<arm_compute::IMemoryManager> mm = { nullptr }; /**< Memory manager */
+ Target target = { Target::UNSPECIFIED }; /**< Target */
+ std::shared_ptr<arm_compute::IMemoryManager> intra_mm = { nullptr }; /**< Intra-function memory manager */
+ std::shared_ptr<arm_compute::IMemoryManager> cross_mm = { nullptr }; /**< Cross-function memory manager */
+ std::shared_ptr<arm_compute::IMemoryGroup> cross_group = { nullptr }; /**< Cross-function memory group */
};
/** Graph context **/
@@ -82,6 +84,11 @@ public:
* @return Management context for the target if exists else nullptr
*/
MemoryManagerContext *memory_management_ctx(Target target);
+ /** Gets the memory managers map
+ *
+ * @return Memory manager contexts
+ */
+ std::map<Target, MemoryManagerContext> &memory_managers();
/** Finalizes memory managers in graph context */
void finalize();
diff --git a/arm_compute/graph/IDeviceBackend.h b/arm_compute/graph/IDeviceBackend.h
index fa6fbae1ea..f28cb1ab42 100644
--- a/arm_compute/graph/IDeviceBackend.h
+++ b/arm_compute/graph/IDeviceBackend.h
@@ -61,6 +61,11 @@ public:
* @return True if the backend is supported else false
*/
virtual bool is_backend_supported() = 0;
+ /** Gets a backend memory allocator
+ *
+ * @return Backend memory allocator
+ */
+ virtual IAllocator *backend_allocator() = 0;
/** Create a backend Tensor
*
* @param[in] tensor The tensor we want to create a backend tensor for
diff --git a/arm_compute/graph/ITensorHandle.h b/arm_compute/graph/ITensorHandle.h
index cc7132e316..261ebf5474 100644
--- a/arm_compute/graph/ITensorHandle.h
+++ b/arm_compute/graph/ITensorHandle.h
@@ -25,9 +25,13 @@
#define __ARM_COMPUTE_GRAPH_ITENSORHANDLE_H__
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/graph/Types.h"
namespace arm_compute
{
+// Forward declarations
+class IMemoryGroup;
+
namespace graph
{
/** Tensor handle interface object */
@@ -38,10 +42,13 @@ public:
virtual ~ITensorHandle() = default;
/** Allocates backend memory for the handle */
virtual void allocate() = 0;
- /** Backend tensor object accessor */
- virtual arm_compute::ITensor &tensor() = 0;
- /** Backend tensor object const accessor */
- virtual const arm_compute::ITensor &tensor() const = 0;
+ /** Allocates backend memory for the handle */
+ virtual void free() = 0;
+ /** Set backend tensor to be managed by a memory group
+ *
+ * @param[in] mg Memory group
+ */
+ virtual void manage(IMemoryGroup *mg) = 0;
/** Maps backend tensor object
*
* @param[in] blocking Flags if the mapping operations should be blocking
@@ -58,11 +65,25 @@ public:
* on the other hand if a sub-tensor is marked as unused then the parent tensor won't be released
*/
virtual void release_if_unused() = 0;
+ /** Backend tensor object accessor */
+ virtual arm_compute::ITensor &tensor() = 0;
+ /** Backend tensor object const accessor */
+ virtual const arm_compute::ITensor &tensor() const = 0;
+ /** Return the parent tensor handle if is a subtensor else this
+ *
+ * @return Parent tensor handle
+ */
+ virtual ITensorHandle *parent_handle() = 0;
/** Checks if a backing tensor is a sub-tensor object or not
*
* @return True if the backend tensor is a sub-tensor else false
*/
virtual bool is_subtensor() const = 0;
+ /** Returns target type
+ *
+ * @return Target type
+ */
+ virtual Target target() const = 0;
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 02e5d92983..b195ed7eda 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -76,10 +76,10 @@ class TensorDescriptor;
/** Graph configuration structure */
struct GraphConfig
{
- bool use_function_memory_manager{ false }; /**< Use a memory manager to manage per-funcion auxilary memory */
- bool use_transition_memory_manager{ false }; /**< Use a memory manager to manager transition buffer memory */
- bool use_tuner{ false }; /**< Use a tuner in tunable backends */
- int num_threads{ -1 }; /**< Number of threads to use (thread capable backends), if 0 the backend will auto-initialize, if -1 the backend will stay as it is. */
+ bool use_function_memory_manager{ true }; /**< Use a memory manager to manage per-funcion auxilary memory */
+ bool use_transition_memory_manager{ true }; /**< Use a memory manager to manager transition buffer memory */
+ bool use_tuner{ false }; /**< Use a tuner in tunable backends */
+ int num_threads{ -1 }; /**< Number of threads to use (thread capable backends), if 0 the backend will auto-initialize, if -1 the backend will stay as it is. */
};
/**< Device target types */
diff --git a/arm_compute/graph/Workload.h b/arm_compute/graph/Workload.h
index 35066c474d..e9368eefd0 100644
--- a/arm_compute/graph/Workload.h
+++ b/arm_compute/graph/Workload.h
@@ -24,7 +24,9 @@
#ifndef __ARM_COMPUTE_GRAPH_WORKLOAD_H__
#define __ARM_COMPUTE_GRAPH_WORKLOAD_H__
+#include "arm_compute/graph/GraphContext.h"
#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryGroup.h"
#include <functional>
#include <memory>
@@ -68,10 +70,8 @@ public:
struct ExecutionTask
{
// TODO (geopin01) : Support vector of functions?
- std::unique_ptr<arm_compute::IFunction> task = {}; /**< Task to execute */
- INode *node = {}; /**< Node bound to this workload */
- std::vector<ITensorHandle *> commit_handles = {}; /**< Handles needs to sync for this task to execute */
- std::vector<ITensorHandle *> release_handles = {}; /**< Handles that can be released after this node execution */
+ std::unique_ptr<arm_compute::IFunction> task = {}; /**< Task to execute */
+ INode *node = {}; /**< Node bound to this workload */
/** Function operator */
void operator()();
@@ -83,10 +83,11 @@ struct ExecutionTask
/** Execution workload */
struct ExecutionWorkload
{
- std::vector<Tensor *> inputs = {}; /**< Input handles */
- std::vector<Tensor *> outputs = {}; /**< Output handles */
- std::vector<ExecutionTask> tasks = {}; /**< Execution workload */
- Graph *graph = nullptr; /**< Graph bound to the workload */
+ std::vector<Tensor *> inputs = {}; /**< Input handles */
+ std::vector<Tensor *> outputs = {}; /**< Output handles */
+ std::vector<ExecutionTask> tasks = {}; /**< Execution workload */
+ Graph *graph = { nullptr }; /**< Graph bound to the workload */
+ GraphContext *ctx = { nullptr }; /**< Graph execution context */
};
} // namespace graph
} // namespace arm_compute
diff --git a/arm_compute/graph/backends/CL/CLDeviceBackend.h b/arm_compute/graph/backends/CL/CLDeviceBackend.h
index 5adbe0e1a8..ab39d0fb1b 100644
--- a/arm_compute/graph/backends/CL/CLDeviceBackend.h
+++ b/arm_compute/graph/backends/CL/CLDeviceBackend.h
@@ -55,6 +55,7 @@ public:
void initialize_backend() override;
void setup_backend_context(GraphContext &ctx) override;
bool is_backend_supported() override;
+ IAllocator *backend_allocator() override;
std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
diff --git a/arm_compute/graph/backends/CL/CLSubTensorHandle.h b/arm_compute/graph/backends/CL/CLSubTensorHandle.h
index 4be5842c70..0c515a1c53 100644
--- a/arm_compute/graph/backends/CL/CLSubTensorHandle.h
+++ b/arm_compute/graph/backends/CL/CLSubTensorHandle.h
@@ -52,18 +52,27 @@ public:
CLSubTensorHandle(CLSubTensorHandle &&) = default;
/** Allow instances of this class to be moved */
CLSubTensorHandle &operator=(CLSubTensorHandle &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLSubTensorHandle(const CLSubTensorHandle &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLSubTensorHandle &operator=(const CLSubTensorHandle &) = delete;
// Inherited overridden methods
- void allocate() override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
+ void unmap() override;
+ void release_if_unused() override;
arm_compute::ITensor &tensor() override;
const arm_compute::ITensor &tensor() const override;
- void map(bool blocking) override;
- void unmap() override;
- void release_if_unused() override;
- bool is_subtensor() const override;
+ ITensorHandle *parent_handle() override;
+ bool is_subtensor() const override;
+ Target target() const override;
private:
- arm_compute::CLSubTensor _sub_tensor; /**< Backend Sub-Tensor */
+ arm_compute::CLSubTensor _sub_tensor; /**< Backend Sub-Tensor */
+ ITensorHandle *_parent_handle; /**< Parent handle */
};
} // namespace backends
} // namespace graph
diff --git a/arm_compute/graph/backends/CL/CLTensorHandle.h b/arm_compute/graph/backends/CL/CLTensorHandle.h
index 8f5a70cbbb..23997325d8 100644
--- a/arm_compute/graph/backends/CL/CLTensorHandle.h
+++ b/arm_compute/graph/backends/CL/CLTensorHandle.h
@@ -51,13 +51,17 @@ public:
CLTensorHandle &operator=(CLTensorHandle &&) = default;
// Inherited overridden methods
- void allocate() override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
+ void unmap() override;
+ void release_if_unused() override;
arm_compute::ITensor &tensor() override;
const arm_compute::ITensor &tensor() const override;
- void map(bool blocking) override;
- void unmap() override;
- void release_if_unused() override;
- bool is_subtensor() const override;
+ ITensorHandle *parent_handle() override;
+ bool is_subtensor() const override;
+ Target target() const override;
private:
arm_compute::CLTensor _tensor; /**< Backend Tensor */
diff --git a/arm_compute/graph/backends/GLES/GCDeviceBackend.h b/arm_compute/graph/backends/GLES/GCDeviceBackend.h
index be81a8f1f2..dc0e2b07dc 100644
--- a/arm_compute/graph/backends/GLES/GCDeviceBackend.h
+++ b/arm_compute/graph/backends/GLES/GCDeviceBackend.h
@@ -45,6 +45,7 @@ public:
void initialize_backend() override;
void setup_backend_context(GraphContext &ctx) override;
bool is_backend_supported() override;
+ IAllocator *backend_allocator() override;
std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
diff --git a/arm_compute/graph/backends/GLES/GCTensorHandle.h b/arm_compute/graph/backends/GLES/GCTensorHandle.h
index 774268fd3f..29b0319d15 100644
--- a/arm_compute/graph/backends/GLES/GCTensorHandle.h
+++ b/arm_compute/graph/backends/GLES/GCTensorHandle.h
@@ -51,13 +51,17 @@ public:
GCTensorHandle &operator=(GCTensorHandle &&) = default;
// Inherited overridden methods
- void allocate() override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
+ void unmap() override;
+ void release_if_unused() override;
arm_compute::ITensor &tensor() override;
const arm_compute::ITensor &tensor() const override;
- void map(bool blocking) override;
- void unmap() override;
- void release_if_unused() override;
- bool is_subtensor() const override;
+ ITensorHandle *parent_handle() override;
+ bool is_subtensor() const override;
+ Target target() const override;
private:
arm_compute::GCTensor _tensor; /**< Backend Tensor */
diff --git a/arm_compute/graph/backends/NEON/NEDeviceBackend.h b/arm_compute/graph/backends/NEON/NEDeviceBackend.h
index b23c83adea..c1e2e0c078 100644
--- a/arm_compute/graph/backends/NEON/NEDeviceBackend.h
+++ b/arm_compute/graph/backends/NEON/NEDeviceBackend.h
@@ -44,6 +44,7 @@ public:
void initialize_backend() override;
void setup_backend_context(GraphContext &ctx) override;
bool is_backend_supported() override;
+ IAllocator *backend_allocator() override;
std::unique_ptr<ITensorHandle> create_tensor(const Tensor &tensor) override;
std::unique_ptr<ITensorHandle> create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords, bool extend_parent) override;
std::unique_ptr<arm_compute::IFunction> configure_node(INode &node, GraphContext &ctx) override;
diff --git a/arm_compute/graph/backends/NEON/NESubTensorHandle.h b/arm_compute/graph/backends/NEON/NESubTensorHandle.h
index 11dcec60f3..101d3e6644 100644
--- a/arm_compute/graph/backends/NEON/NESubTensorHandle.h
+++ b/arm_compute/graph/backends/NEON/NESubTensorHandle.h
@@ -52,18 +52,27 @@ public:
NESubTensorHandle(NESubTensorHandle &&) = default;
/** Allow instances of this class to be moved */
NESubTensorHandle &operator=(NESubTensorHandle &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NESubTensorHandle(const NESubTensorHandle &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NESubTensorHandle &operator=(const NESubTensorHandle &) = delete;
// Inherited overridden methods
- void allocate() override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
+ void unmap() override;
+ void release_if_unused() override;
arm_compute::ITensor &tensor() override;
const arm_compute::ITensor &tensor() const override;
- void map(bool blocking) override;
- void unmap() override;
- void release_if_unused() override;
- bool is_subtensor() const override;
+ ITensorHandle *parent_handle() override;
+ bool is_subtensor() const override;
+ Target target() const override;
private:
- arm_compute::SubTensor _sub_tensor; /**< Backend Sub-Tensor */
+ arm_compute::SubTensor _sub_tensor; /**< Backend Sub-Tensor */
+ ITensorHandle *_parent_handle; /**< Parent handle */
};
} // namespace backends
} // namespace graph
diff --git a/arm_compute/graph/backends/NEON/NETensorHandle.h b/arm_compute/graph/backends/NEON/NETensorHandle.h
index 06ccdd83cc..150e0c97c8 100644
--- a/arm_compute/graph/backends/NEON/NETensorHandle.h
+++ b/arm_compute/graph/backends/NEON/NETensorHandle.h
@@ -51,13 +51,17 @@ public:
NETensorHandle &operator=(NETensorHandle &&) = default;
// Inherited overridden methods
- void allocate() override;
+ void allocate() override;
+ void free() override;
+ void manage(IMemoryGroup *mg) override;
+ void map(bool blocking) override;
+ void unmap() override;
+ void release_if_unused() override;
arm_compute::ITensor &tensor() override;
const arm_compute::ITensor &tensor() const override;
- void map(bool blocking) override;
- void unmap() override;
- void release_if_unused() override;
- bool is_subtensor() const override;
+ ITensorHandle *parent_handle() override;
+ bool is_subtensor() const override;
+ Target target() const override;
private:
arm_compute::Tensor _tensor; /**< Backend Tensor */
diff --git a/arm_compute/graph/backends/Utils.h b/arm_compute/graph/backends/Utils.h
index b902d17c0e..c7a50d93c6 100644
--- a/arm_compute/graph/backends/Utils.h
+++ b/arm_compute/graph/backends/Utils.h
@@ -88,7 +88,7 @@ inline bool is_in_place_operation(void *input, void *output)
inline std::shared_ptr<IMemoryManager> get_memory_manager(GraphContext &ctx, Target target)
{
bool enabled = ctx.config().use_function_memory_manager && (ctx.memory_management_ctx(target) != nullptr);
- return enabled ? ctx.memory_management_ctx(target)->mm : nullptr;
+ return enabled ? ctx.memory_management_ctx(target)->intra_mm : nullptr;
}
} // namespace backends
} // namespace graph
diff --git a/arm_compute/graph/detail/CrossLayerMemoryManagerHelpers.h b/arm_compute/graph/detail/CrossLayerMemoryManagerHelpers.h
new file mode 100644
index 0000000000..b7424c8e88
--- /dev/null
+++ b/arm_compute/graph/detail/CrossLayerMemoryManagerHelpers.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_DETAIL_CROSS_LAYER_MEMORY_MANAGER_HELPERS_H__
+#define __ARM_COMPUTE_GRAPH_DETAIL_CROSS_LAYER_MEMORY_MANAGER_HELPERS_H__
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace graph
+{
+// Forward declarations
+class Graph;
+class GraphContext;
+class ExecutionWorkload;
+class ITransMemoryManager;
+class ITensorHandle;
+
+namespace detail
+{
+/** Configures transition manager and execution workload
+ *
+ * @param[in] g Graph to configure
+ * @param[in] ctx Graph context
+ * @param[in] workload Workload to configure
+ */
+void configure_transition_manager(Graph &g, GraphContext &ctx, ExecutionWorkload &workload);
+} // namespace detail
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_DETAIL_CROSS_LAYER_MEMORY_MANAGER_HELPERS_H__ */