From 96880cf00707d394938ec7fe31c21c79a2ac3f0c Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 20 Oct 2017 18:52:20 +0100 Subject: COMPMID-640: FullyConnectedLayer failures on both NEON/CL Change-Id: Idd830cff054114123229c189e423b753b8064146 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/92623 Reviewed-by: Robert Hughes Tested-by: Kaizen Reviewed-by: Anthony Barbier --- src/core/AccessWindowTranspose.cpp | 4 ++-- src/core/CL/cl_kernels/gemm.cl | 6 +++--- src/runtime/CL/functions/CLFullyConnectedLayer.cpp | 2 +- src/runtime/NEON/functions/NEFullyConnectedLayer.cpp | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/core/AccessWindowTranspose.cpp b/src/core/AccessWindowTranspose.cpp index b1043305b8..4506a0b44c 100644 --- a/src/core/AccessWindowTranspose.cpp +++ b/src/core/AccessWindowTranspose.cpp @@ -201,8 +201,8 @@ bool AccessWindowTranspose::update_padding_if_needed(const Window &window) const PaddingSize padding; padding.left = std::max(0, -min_x); padding.right = std::max(0, max_x - shape[0]); - padding.top = shape.num_dimensions() == 1 ? 0 : std::max(0, -min_y); - padding.bottom = shape.num_dimensions() == 1 ? 0 : std::max(0, max_y - shape[1]); + padding.top = std::max(0, -min_y); + padding.bottom = std::max(0, max_y - shape[1]); // Update strides in tensor info return _info->extend_padding(padding); diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl index 35a2e4704f..7f2a08bc2c 100644 --- a/src/core/CL/cl_kernels/gemm.cl +++ b/src/core/CL/cl_kernels/gemm.cl @@ -1040,7 +1040,7 @@ __kernel void gemm_mm_floating_point(IMAGE_DECLARATION(src0), VECTOR_TYPE acc3 = 0.0f; #endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3 - for(; src_addr.s0 <= (end_row_vec_a - 2 * sizeof(DATA_TYPE)); src_addr += (int2)(2 * sizeof(DATA_TYPE), 2 * src1_stride_y)) + for(; src_addr.s0 <= (end_row_vec_a - 2 * (int)sizeof(DATA_TYPE)); src_addr += (int2)(2 * sizeof(DATA_TYPE), 2 * src1_stride_y)) { // Load values from matrix A VEC_DATA_TYPE(DATA_TYPE, 2) @@ -1344,7 +1344,7 @@ __kernel void gemm_mm_qs16(IMAGE_DECLARATION(src0), #endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3 // This for loop performs 4 accumulations per iteration - for(; src_addr.s0 <= (end_row_vec_a - 2 * sizeof(short)); src_addr += (int2)(2 * sizeof(short), 2 * src1_stride_y)) + for(; src_addr.s0 <= (end_row_vec_a - 2 * (int)sizeof(short)); src_addr += (int2)(2 * sizeof(short), 2 * src1_stride_y)) { short2 a0 = vload2(0, (__global short *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); #if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1 @@ -1629,7 +1629,7 @@ __kernel void gemm_lc_vm_f32(IMAGE_DECLARATION(src0), float4 acc = 0.0f; - for(; src_addr.s0 <= (end_row_vec_a - 2 * sizeof(float)); src_addr += (int2)(2 * sizeof(float), 2 * src1_stride_y)) + for(; src_addr.s0 <= (end_row_vec_a - 2 * (int)sizeof(float)); src_addr += (int2)(2 * sizeof(float), 2 * src1_stride_y)) { float2 a0 = vload2(0, (__global float *)(src0_ptr + src_addr.s0)); float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1)); diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp index ee1558fe71..03d5dbdfd1 100644 --- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp +++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp @@ -85,7 +85,7 @@ void CLFullyConnectedLayer::configure(const ICLTensor *input, const ICLTensor *w { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, output); - ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() != 2); + ARM_COMPUTE_ERROR_ON(weights->info()->num_dimensions() > 2); _are_weights_reshaped = transpose_weights ? are_weights_reshaped : true; _is_fc_after_conv = true; diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp index 2e8d10598d..fc04e28972 100644 --- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp +++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp @@ -133,7 +133,7 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh const int num_input_dimensions = input->info()->tensor_shape().num_dimensions() - num_batch_dimensions; const size_t linear_input_size = input->info()->tensor_shape().total_size_lower(num_input_dimensions); - _linearize_input = input->info()->tensor_shape().x() != linear_input_size; + _linearize_input = (input->info()->tensor_shape().x() != linear_input_size) || (num_input_dimensions > 1 && linear_input_size == 1); _are_weights_reshaped = are_weights_reshaped; _accumulate_biases = biases != nullptr; _is_batched_fc_layer = num_batch_dimensions > 0; -- cgit v1.2.1