aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/utils/CpuAuxTensorHandler.h
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2024-03-04 14:55:24 +0000
committerGunes Bayir <gunes.bayir@arm.com>2024-03-04 16:37:14 +0000
commitbf053734c468ebc9fd2e535ab8c357b55fdaad43 (patch)
tree85a292b8a31c3bec2c3f4d32a5cec6ac48741a36 /src/cpu/utils/CpuAuxTensorHandler.h
parent6fe9eafe0707387e65f9b3c188f4145f64415ce3 (diff)
downloadComputeLibrary-bf053734c468ebc9fd2e535ab8c357b55fdaad43.tar.gz
Fix performance regression in fixed-format kernels
Fix the performance regression in CpuGemmConv2d caused by importing memory at every run for fixed-format kernels. This has been done by adding an bypass_import parameter to the aux. tensor handler class (CpuAuxTensorHandler) and using it in CpuGemmConv2d so that the memory import happens if and only when the associated tensor is used in the gemm pack. Also, improve the documentation of CpuAuxTensorHandler. Resolves: ARMCL-1126 Co-authored by: SiCong Li <sicong.li@arm.com> Change-Id: Idb26bdb2d19419074a6e7f2497a1741ae200603f Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11240 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu/utils/CpuAuxTensorHandler.h')
-rw-r--r--src/cpu/utils/CpuAuxTensorHandler.h78
1 files changed, 72 insertions, 6 deletions
diff --git a/src/cpu/utils/CpuAuxTensorHandler.h b/src/cpu/utils/CpuAuxTensorHandler.h
index 0a39fdba81..3b980ce60b 100644
--- a/src/cpu/utils/CpuAuxTensorHandler.h
+++ b/src/cpu/utils/CpuAuxTensorHandler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,12 +35,74 @@ namespace arm_compute
{
namespace cpu
{
-/* Tensor handler to wrap and handle tensor allocations on workspace buffers */
+/** Tensor handler to wrap and handle tensor allocations on workspace buffers
+ *
+ * @note Important: Despite the impression given by its name, the handler owns, rather than merely points to, the
+ * underlying tensor memory.
+ *
+ * @note About memory handling using bypass_* flags
+ * The bypass_alloc / bypass_import flags are meant to skip the expensive auxiliary tensor memory allocations or
+ * imports that are not needed during runtime, e.g. when the handler is not used at all in some branch of execution.
+ *
+ * If not handled correctly, these two flags can lead to performance issues (not bypass when needed to), or memory
+ * bugs (bypass when should not to).
+ *
+ * Make sure:
+ *
+ * 1. The aux tensor handlers must always be declared at the root level, or the same level as the run/prepare
+ * methods that potentially use them.
+ *
+ * Once the handler is destroyed (e.g. when going out of scope), the memory it owns (returned by the get()
+ * method) will also be destroyed.
+ *
+ * Thus it's important to ensure the handler is always in-scope when it is being used by a operator / kernel.
+ *
+ * 2. The handler's bypass_alloc and bypass_import flags should always be inverse of whether the handler is used in
+ * its surrounding scope by run/prepare. (This usually means being added to some tensor pack)
+ *
+ * This ensures we only bypass if and only if the aux tensor is not used by the op / kernel later.
+ *
+ *
+ * So the general usage pattern goes like this:
+ *
+ * bool use_aux_tensor = some_condition_about_when_to_use_the_aux_tensor
+ *
+ * CpuAuxTensorHandler aux_handler {..., !use_aux_tensor || bypass_alloc / bypass_import ||};
+ *
+ * if (use_aux_tensor)
+ * {
+ * tensor_pack.add_tensor(aux_handler.get());
+ * }
+ * op.run(tensor_pack);
+ */
class CpuAuxTensorHandler
{
public:
- CpuAuxTensorHandler(
- int slot_id, TensorInfo &info, ITensorPack &pack, bool pack_inject = false, bool bypass_alloc = false)
+ /** Create a temporary tensor handle, by either important an existing tensor from a tensor pack, or allocating a
+ * new one.
+ *
+ * @param[in] slot_id Slot id of the tensor to be retrieved in the tensor pack
+ * If no such tensor exists in the tensor pack, a new tensor will be allocated.
+ * @param[in] info Tensor info containing requested size of the new tensor.
+ * If requested size is larger than the tensor retrieved from the tensor pack,
+ * a new tensor will be allocated.
+ * @param[in,out] pack Tensor pack to retrieve the old tensor. When @p pack_inject is true, the new
+ * tensor will also be added here.
+ * @param[in] pack_inject In case of a newly allocated tensor, whether to add this tensor back to the
+ * @p pack
+ * @param[in] bypass_alloc Bypass allocation in case of a new tensor
+ * This is to prevent unnecessary memory operations when the handler object is not
+ * used
+ * @param[in] bypass_import Bypass importation in case of a retrieved tensor
+ * This is to prevent unnecessary memory operations when the handler object is not
+ * used
+ */
+ CpuAuxTensorHandler(int slot_id,
+ TensorInfo &info,
+ ITensorPack &pack,
+ bool pack_inject = false,
+ bool bypass_alloc = false,
+ bool bypass_import = false)
: _tensor()
{
if (info.total_size() == 0)
@@ -67,7 +129,10 @@ public:
}
else
{
- _tensor.allocator()->import_memory(packed_tensor->buffer());
+ if (!bypass_import)
+ {
+ _tensor.allocator()->import_memory(packed_tensor->buffer());
+ }
}
}
@@ -76,7 +141,8 @@ public:
*
* @param[in] info New tensor info to "assign" to @p tensor
* @param[in] tensor Tensor to be assigned a new @ref TensorInfo
- * @param[in] bypass_import Bypass importing @p tensor's memory into the handler
+ * @param[in] bypass_import Bypass importing @p tensor's memory into the handler.
+ * This is to prevent unnecessary memory operations when the handler object is not used
*/
CpuAuxTensorHandler(TensorInfo &info, const ITensor &tensor, bool bypass_import = false) : _tensor()
{