aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2023-11-10 12:16:32 +0000
committerAnitha Raj <anitha.raj@arm.com>2023-11-10 14:44:07 +0000
commit0e1ccebfd52248fd8ead2614eaf45828d1fab340 (patch)
treedefdbe671552dfb97336daaa9df0ccca9a95c643
parenta63ece730acda74df26281e6341f6fedfb209554 (diff)
downloadComputeLibrary-0e1ccebfd52248fd8ead2614eaf45828d1fab340.tar.gz
Fix CpuGemmConv2d int8 segfault
Bypass importation of memory of the original weights into the reinterpreted_weights auxiliary tensor if other weight transformation path is selected (which would've freed the original weights and its tensor info) Resolves COMPMID-6635 Signed-off-by: SiCong Li <sicong.li@arm.com> Change-Id: Ib8a345c3ac542bc3745d6a67db822b55df37e827 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10698 Benchmark: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Anitha Raj <Anitha.Raj@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/cpu/operators/CpuGemmConv2d.cpp9
-rw-r--r--src/cpu/utils/CpuAuxTensorHandler.h15
2 files changed, 16 insertions, 8 deletions
diff --git a/src/cpu/operators/CpuGemmConv2d.cpp b/src/cpu/operators/CpuGemmConv2d.cpp
index 117527ccc1..31c873c2ba 100644
--- a/src/cpu/operators/CpuGemmConv2d.cpp
+++ b/src/cpu/operators/CpuGemmConv2d.cpp
@@ -836,10 +836,13 @@ void CpuGemmConv2d::run(ITensorPack &tensors)
gemm_pack.add_const_tensor(TensorType::ACL_SRC_0, gemm_input_to_use);
gemm_pack.add_tensor(TensorType::ACL_DST, gemm_output_to_use);
// Allocate reshaped weights if required
- auto weights = gemm_pack.get_const_tensor(TensorType::ACL_SRC_1);
+ auto weights = gemm_pack.get_const_tensor(TensorType::ACL_SRC_1);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
+ // Re-interpreted weights. Only tensor shape is changed. Only memory import, no allocation
CpuAuxTensorHandler reinterpreted_wei(
- _weights_reshaped,
- *weights); // Re-interpreted weights. Only tensor shape is changed. No allocation
+ _weights_reshaped, *weights,
+ /* import only if we chose the ReinterpretThenTranspose path, because otherwise the weight may have been freed */
+ !(_run_wt && _wt_method == WeightTransformMethod::ReinterpretThenTranspose));
CpuAuxTensorHandler reshaped_wei(offset_int_vec(WeightsReshaped), _weights_reshaped, tensors);
// Update the weights to use if it has been reshaped
if (_run_wt)
diff --git a/src/cpu/utils/CpuAuxTensorHandler.h b/src/cpu/utils/CpuAuxTensorHandler.h
index 627216837b..0a39fdba81 100644
--- a/src/cpu/utils/CpuAuxTensorHandler.h
+++ b/src/cpu/utils/CpuAuxTensorHandler.h
@@ -74,15 +74,20 @@ public:
/** Create a temporary handle to the original tensor with a new @ref TensorInfo
* This is useful if we want to change a tensor's tensor info at run time without modifying the original tensor
*
- * @param[in] info New tensor info to "assign" to @p tensor
- * @param[in] tensor Tensor to be assigned a new @ref TensorInfo
+ * @param[in] info New tensor info to "assign" to @p tensor
+ * @param[in] tensor Tensor to be assigned a new @ref TensorInfo
+ * @param[in] bypass_import Bypass importing @p tensor's memory into the handler
*/
- CpuAuxTensorHandler(TensorInfo &info, const ITensor &tensor) : _tensor()
+ CpuAuxTensorHandler(TensorInfo &info, const ITensor &tensor, bool bypass_import = false) : _tensor()
{
_tensor.allocator()->soft_init(info);
- if (info.total_size() <= tensor.info()->total_size())
+ if (!bypass_import)
{
- _tensor.allocator()->import_memory(tensor.buffer());
+ ARM_COMPUTE_ERROR_ON(tensor.info() == nullptr);
+ if (info.total_size() <= tensor.info()->total_size())
+ {
+ _tensor.allocator()->import_memory(tensor.buffer());
+ }
}
}