aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2023-01-12 12:54:49 +0000
committerSiCong Li <sicong.li@arm.com>2023-01-20 15:21:31 +0000
commit5a2bc0169d942f7029d73a3afff1eab18b6f65ef (patch)
treeefa7850199b40109b73b0ee35bb8f57531c983e9 /arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
parenta18d85c6d2c0025938c2dc10e553eb82c01922f2 (diff)
downloadComputeLibrary-5a2bc0169d942f7029d73a3afff1eab18b6f65ef.tar.gz
Add Auxiliary tensors
The asssign_memory_descriptors method could not automatically assign Auxiliary tensors. Therefore changes are made to allow developers to explicitly mark auxiliary tensors. However, to avoid ambiguity between auxiliary and "intermediate" tensors, we solidify the definitions of both: Intermediate tensors are a strictly topological term. They are defined as "inner" tensors within a workload, hidden from the user, as opposed to input and output tensors exposed to the users. Auxiliary tensors are a subcategory of Intermediate tensors, and are also about memory allocation. They are intermediate tensors that need real memory backing. For more details please see the documentation of MemoryType enum Rename MemoryType::NoAlloc to MemoryType::Virtual Partially resolves: COMPMID-5523 Signed-off-by: SiCong Li <sicong.li@arm.com> Change-Id: Ibde44c2ec1570be9423e0fb38b53bb136ffc36dd Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8940 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h')
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h17
1 files changed, 6 insertions, 11 deletions
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
index f19ad6dfc5..422edb35f1 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
@@ -70,18 +70,9 @@ public:
TensorInfo create_tensor_info(Args &&... args)
{
auto tensor_info = TensorInfo(std::forward<Args>(args)...);
- tensor_info.set_id(allocate_new_tensor_id());
+ register_new_tensor(tensor_info);
return tensor_info;
}
- /** Create a @ref TensorInfo associated with the workload sketch by copying from an existing tensor info
- * @note The newly copied tensor will have a different identity within the workload than the one copied from
- * To copy the identity of @p tensor_info as well, use @ref TensorInfo 's copy constructors instead
- *
- * @param[in] tensor_info @ref ITensorInfo to copy from
- *
- * @return TensorInfo Newly created tensor info
- */
- TensorInfo create_tensor_info(const ITensorInfo &tensor_info);
/** Create a default @ref TensorInfo associated with the workload sketch
* It is usually used by user input or output tensors
*
@@ -90,7 +81,11 @@ public:
TensorInfo create_tensor_info();
private:
- ITensorInfo::Id allocate_new_tensor_id();
+ /** Register a new tensor by setting a new id to it and register its memory descriptor in the sketch
+ *
+ * @param[in,out] tensor_info @ref ITensorInfo that will be registered
+ */
+ void register_new_tensor(ITensorInfo &tensor_info);
std::unique_ptr<Implementation> _impl; /**< Internal opaque implementation*/
};