aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/cpu/operators/CpuGemmConv2d.cpp9
-rw-r--r--src/cpu/utils/CpuAuxTensorHandler.h15
2 files changed, 16 insertions, 8 deletions
diff --git a/src/cpu/operators/CpuGemmConv2d.cpp b/src/cpu/operators/CpuGemmConv2d.cpp
index 117527ccc1..31c873c2ba 100644
--- a/src/cpu/operators/CpuGemmConv2d.cpp
+++ b/src/cpu/operators/CpuGemmConv2d.cpp
@@ -836,10 +836,13 @@ void CpuGemmConv2d::run(ITensorPack &tensors)
gemm_pack.add_const_tensor(TensorType::ACL_SRC_0, gemm_input_to_use);
gemm_pack.add_tensor(TensorType::ACL_DST, gemm_output_to_use);
// Allocate reshaped weights if required
- auto weights = gemm_pack.get_const_tensor(TensorType::ACL_SRC_1);
+ auto weights = gemm_pack.get_const_tensor(TensorType::ACL_SRC_1);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(weights);
+ // Re-interpreted weights. Only tensor shape is changed. Only memory import, no allocation
CpuAuxTensorHandler reinterpreted_wei(
- _weights_reshaped,
- *weights); // Re-interpreted weights. Only tensor shape is changed. No allocation
+ _weights_reshaped, *weights,
+ /* import only if we chose the ReinterpretThenTranspose path, because otherwise the weight may have been freed */
+ !(_run_wt && _wt_method == WeightTransformMethod::ReinterpretThenTranspose));
CpuAuxTensorHandler reshaped_wei(offset_int_vec(WeightsReshaped), _weights_reshaped, tensors);
// Update the weights to use if it has been reshaped
if (_run_wt)
diff --git a/src/cpu/utils/CpuAuxTensorHandler.h b/src/cpu/utils/CpuAuxTensorHandler.h
index 627216837b..0a39fdba81 100644
--- a/src/cpu/utils/CpuAuxTensorHandler.h
+++ b/src/cpu/utils/CpuAuxTensorHandler.h
@@ -74,15 +74,20 @@ public:
/** Create a temporary handle to the original tensor with a new @ref TensorInfo
* This is useful if we want to change a tensor's tensor info at run time without modifying the original tensor
*
- * @param[in] info New tensor info to "assign" to @p tensor
- * @param[in] tensor Tensor to be assigned a new @ref TensorInfo
+ * @param[in] info New tensor info to "assign" to @p tensor
+ * @param[in] tensor Tensor to be assigned a new @ref TensorInfo
+ * @param[in] bypass_import Bypass importing @p tensor's memory into the handler
*/
- CpuAuxTensorHandler(TensorInfo &info, const ITensor &tensor) : _tensor()
+ CpuAuxTensorHandler(TensorInfo &info, const ITensor &tensor, bool bypass_import = false) : _tensor()
{
_tensor.allocator()->soft_init(info);
- if (info.total_size() <= tensor.info()->total_size())
+ if (!bypass_import)
{
- _tensor.allocator()->import_memory(tensor.buffer());
+ ARM_COMPUTE_ERROR_ON(tensor.info() == nullptr);
+ if (info.total_size() <= tensor.info()->total_size())
+ {
+ _tensor.allocator()->import_memory(tensor.buffer());
+ }
}
}