From 0e1ccebfd52248fd8ead2614eaf45828d1fab340 Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Fri, 10 Nov 2023 12:16:32 +0000 Subject: Fix CpuGemmConv2d int8 segfault Bypass importation of memory of the original weights into the reinterpreted_weights auxiliary tensor if other weight transformation path is selected (which would've freed the original weights and its tensor info) Resolves COMPMID-6635 Signed-off-by: SiCong Li Change-Id: Ib8a345c3ac542bc3745d6a67db822b55df37e827 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10698 Benchmark: Arm Jenkins Reviewed-by: Anitha Raj Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- src/cpu/operators/CpuGemmConv2d.cpp | 9 ++++++--- src/cpu/utils/CpuAuxTensorHandler.h | 15 ++++++++++----- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/cpu/operators/CpuGemmConv2d.cpp b/src/cpu/operators/CpuGemmConv2d.cpp index 117527ccc1..31c873c2ba 100644 --- a/src/cpu/operators/CpuGemmConv2d.cpp +++ b/src/cpu/operators/CpuGemmConv2d.cpp @@ -836,10 +836,13 @@ void CpuGemmConv2d::run(ITensorPack &tensors) gemm_pack.add_const_tensor(TensorType::ACL_SRC_0, gemm_input_to_use); gemm_pack.add_tensor(TensorType::ACL_DST, gemm_output_to_use); // Allocate reshaped weights if required - auto weights = gemm_pack.get_const_tensor(TensorType::ACL_SRC_1); + auto weights = gemm_pack.get_const_tensor(TensorType::ACL_SRC_1); + ARM_COMPUTE_ERROR_ON_NULLPTR(weights); + // Re-interpreted weights. Only tensor shape is changed. Only memory import, no allocation CpuAuxTensorHandler reinterpreted_wei( - _weights_reshaped, - *weights); // Re-interpreted weights. Only tensor shape is changed. No allocation + _weights_reshaped, *weights, + /* import only if we chose the ReinterpretThenTranspose path, because otherwise the weight may have been freed */ + !(_run_wt && _wt_method == WeightTransformMethod::ReinterpretThenTranspose)); CpuAuxTensorHandler reshaped_wei(offset_int_vec(WeightsReshaped), _weights_reshaped, tensors); // Update the weights to use if it has been reshaped if (_run_wt) diff --git a/src/cpu/utils/CpuAuxTensorHandler.h b/src/cpu/utils/CpuAuxTensorHandler.h index 627216837b..0a39fdba81 100644 --- a/src/cpu/utils/CpuAuxTensorHandler.h +++ b/src/cpu/utils/CpuAuxTensorHandler.h @@ -74,15 +74,20 @@ public: /** Create a temporary handle to the original tensor with a new @ref TensorInfo * This is useful if we want to change a tensor's tensor info at run time without modifying the original tensor * - * @param[in] info New tensor info to "assign" to @p tensor - * @param[in] tensor Tensor to be assigned a new @ref TensorInfo + * @param[in] info New tensor info to "assign" to @p tensor + * @param[in] tensor Tensor to be assigned a new @ref TensorInfo + * @param[in] bypass_import Bypass importing @p tensor's memory into the handler */ - CpuAuxTensorHandler(TensorInfo &info, const ITensor &tensor) : _tensor() + CpuAuxTensorHandler(TensorInfo &info, const ITensor &tensor, bool bypass_import = false) : _tensor() { _tensor.allocator()->soft_init(info); - if (info.total_size() <= tensor.info()->total_size()) + if (!bypass_import) { - _tensor.allocator()->import_memory(tensor.buffer()); + ARM_COMPUTE_ERROR_ON(tensor.info() == nullptr); + if (info.total_size() <= tensor.info()->total_size()) + { + _tensor.allocator()->import_memory(tensor.buffer()); + } } } -- cgit v1.2.1