From 358ca205c9e41f523517ffa55a9057308b736040 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 7 Dec 2017 16:47:52 +0000 Subject: COMPMID-617: Adds CLFullyConnectionLayer validation support Change-Id: I4d2eb9872a3165fdcaa7784596e441cbe563dbc2 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112577 Tested-by: Jenkins Reviewed-by: Ioan-Cristian Szabo Reviewed-by: Anthony Barbier --- .../CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp | 104 ++++++++++++++------- 1 file changed, 70 insertions(+), 34 deletions(-) (limited to 'src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp') diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp index 5c6f5b4ed0..ddcab6a256 100644 --- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp +++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp @@ -29,9 +29,11 @@ #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Validate.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/CL/CLScheduler.h" using namespace arm_compute; +using namespace arm_compute::misc::shape_calculator; CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr memory_manager) : _memory_group(std::move(memory_manager)), _mm_kernel(), _mtx_a_reshape_kernel(), _mtx_b_reshape_kernel(), _mtx_a_reduction_kernel(), _mtx_b_reduction_kernel(), _offset_contribution_kernel(), @@ -41,14 +43,9 @@ CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptrinfo()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); - ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A"); - ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The output matrix must have the same number of columns as the matrix B"); - ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported"); - ARM_COMPUTE_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported"); + ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, output); + ARM_COMPUTE_UNUSED(gemm_info); + ARM_COMPUTE_ERROR_THROW_ON(CLGEMMLowpMatrixMultiplyCore::validate(a->info(), b->info(), output->info(), gemm_info)); _reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run(); _a_offset = a->info()->quantization_info().offset; @@ -65,18 +62,8 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor matrix_a = &_tmp_a; matrix_b = &_tmp_b; - // The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] - TensorShape shape_tmp_a = a->info()->tensor_shape(); - shape_tmp_a.set(0, a->info()->dimension(0) * 4); - shape_tmp_a.set(1, std::ceil(a->info()->dimension(1) / 4.f)); - - // The transpose1xW output matrix will have the following shape: [ b_height * 16, ceil(b_width / 16.0f) ] - TensorShape shape_tmp_b = b->info()->tensor_shape(); - shape_tmp_b.set(0, b->info()->dimension(1) * 16); - shape_tmp_b.set(1, std::ceil(b->info()->dimension(0) / 16.f)); - - TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type()); - TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type()); + TensorInfo info_a(compute_interleaved_shape(*a->info()), 1, a->info()->data_type()); + TensorInfo info_b(compute_transpose1xW_shape(*b->info()), 1, b->info()->data_type()); _tmp_a.allocator()->init(info_a); _tmp_b.allocator()->init(info_b); _memory_group.manage(&_tmp_a); @@ -95,13 +82,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor // Initialize matrix B reduction kernel only if _a_offset is not equal to 0 if(_a_offset != 0) { - TensorShape shape_vector_sum_col = b->info()->tensor_shape(); - - if(shape_vector_sum_col.num_dimensions() > 1) - { - shape_vector_sum_col.remove_dimension(1); - } - TensorInfo info_vector_sum_col(shape_vector_sum_col, 1, DataType::S32); + TensorInfo info_vector_sum_col(compute_reductionA_shape(*b->info()), 1, DataType::S32); _vector_sum_col.allocator()->init(info_vector_sum_col); _memory_group.manage(&_vector_sum_col); @@ -112,13 +93,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0 if(_b_offset != 0) { - TensorShape shape_vector_sum_row = a->info()->tensor_shape(); - shape_vector_sum_row.set(Window::DimX, a->info()->dimension(1)); - if(a->info()->num_dimensions() > 1) - { - shape_vector_sum_row.remove_dimension(1); - } - TensorInfo info_vector_sum_row(shape_vector_sum_row, 1, DataType::S32); + TensorInfo info_vector_sum_row(compute_reductionB_shape(*a->info()), 1, DataType::S32); _vector_sum_row.allocator()->init(info_vector_sum_row); _memory_group.manage(&_vector_sum_row); @@ -147,6 +122,67 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor } } +Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(0) != (b)->dimension(1), + "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((a)->dimension(1) != (output)->dimension(1), + "The output matrix must have the same number of rows as the matrix A"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((b)->dimension(0) != (output)->dimension(0), + "The output matrix must have the same number of columns as the matrix B"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported"); + + int32_t a_offset = a->quantization_info().offset; + int32_t b_offset = b->quantization_info().offset; + bool is_interleaved_transposed = a->dimension(1) > 16; + + if(is_interleaved_transposed) + { + TensorInfo info_a(compute_interleaved_shape(*a), 1, a->data_type()); + TensorInfo info_b(compute_transpose1xW_shape(*b), 1, b->data_type()); + + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMInterleave4x4Kernel::validate(a, &info_a)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMTranspose1xWKernel::validate(b, &info_b)); + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(&info_a, &info_b, output)); + } + else + { + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyKernel::validate(a, b, output)); + } + + TensorInfo info_vector_sum_col, info_vector_sum_row; + + // Validate matrix B reduction kernel only if _a_offset is not equal to 0 + if(a_offset != 0) + { + info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32); + + // Configure Matrix B reduction kernel + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixBReductionKernel::validate(b, &info_vector_sum_col)); + } + + // Validate Matrix A reduction kernel only if _b_offset is not equal to 0 + if(b_offset != 0) + { + info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32); + + // Configure matrix A reduction kernel + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixAReductionKernel::validate(a, &info_vector_sum_row)); + } + + // Validate offset contribution kernel + ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionKernel::validate(output, + a_offset == 0 ? nullptr : &info_vector_sum_col, + b_offset == 0 ? nullptr : &info_vector_sum_row, + a_offset, b_offset)); + + return Status{}; +} + void CLGEMMLowpMatrixMultiplyCore::run() { _memory_group.acquire(); -- cgit v1.2.1