From d80792a2d342439d5577b9bc0c9410fbeb70ea5d Mon Sep 17 00:00:00 2001 From: Sheri Zhang Date: Thu, 5 Nov 2020 10:43:37 +0000 Subject: COMPMID-3887: NEGEMMConvolutionLayer hangs up on num_threads>18 When store the Tensor shape information, the size was clamped to 32bit, which cause the memory allocated is not big enough. Signed-off-by: Sheri Zhang Change-Id: I9f7dfcd5595a143b0ed4f6973e20bcd9d776b673 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4331 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/core/TensorShape.h | 4 ++-- arm_compute/core/Window.inl | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/TensorShape.h b/arm_compute/core/TensorShape.h index 218774360e..b455a07767 100644 --- a/arm_compute/core/TensorShape.h +++ b/arm_compute/core/TensorShape.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 Arm Limited. + * Copyright (c) 2016-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -36,7 +36,7 @@ namespace arm_compute { /** Shape of a tensor */ -class TensorShape : public Dimensions +class TensorShape : public Dimensions { public: /** Constructor to initialize the tensor shape. diff --git a/arm_compute/core/Window.inl b/arm_compute/core/Window.inl index 14a432a0c0..6100d09a1c 100644 --- a/arm_compute/core/Window.inl +++ b/arm_compute/core/Window.inl @@ -197,15 +197,15 @@ inline Window Window::split_window(size_t dimension, size_t id, size_t total) co { if(d == dimension) { - int start = _dims[d].start(); - int end = _dims[d].end(); - const int step = _dims[d].step(); + int start = _dims[d].start(); + int end = _dims[d].end(); + const int step = _dims[d].step(); const int num_it = num_iterations(d); const int rem = num_it % total; - int work = num_it / total; + int work = num_it / total; - int it_start = work * id; + int it_start = work * id; if(int(id) < rem) { @@ -277,7 +277,7 @@ inline void Window::use_tensor_dimensions(const TensorShape &shape, size_t first { for(unsigned int n = first_dimension; n < shape.num_dimensions(); ++n) { - set(n, Window::Dimension(0, std::max(shape[n], static_cast(1)))); + set(n, Window::Dimension(0, std::max(shape[n], static_cast(1)))); } } -- cgit v1.2.1