diff options
Diffstat (limited to 'src/cpu/kernels')
6 files changed, 402 insertions, 29 deletions
diff --git a/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp b/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp index e290783021..2a76a5958d 100644 --- a/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp +++ b/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2022,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -51,17 +51,19 @@ Status validate_arguments(const ITensorInfo *mm_result, int32_t a_offset, int32_t b_offset) { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32, DataType::F32); - // If a_offset == 0, vector_sum_col can be a nullptr - if (a_offset != 0) + // We run if the offset is nonzero or a sum col has been provided, we need + // the second option in case the QuantizationInfo is dynamic + if (a_offset != 0 || vector_sum_col != nullptr) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32); ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0)); } - // If b_offset == 0, vector_sum_row can be a nullptr - if (b_offset != 0) + // We run if the offset is nonzero or a sum row has been provided, we need + // the second option in case the QuantizationInfo is dynamic + if (b_offset != 0 || vector_sum_row != nullptr) { ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32); @@ -86,7 +88,7 @@ Status validate_arguments(const ITensorInfo *mm_result, ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx], "mm_result tensor must have the same number of batches of output tensor"); - if (a_offset != 0) + if (vector_sum_col != nullptr) { TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape(); vector_sum_col_shape.collapse_from(1); @@ -102,6 +104,275 @@ Status validate_arguments(const ITensorInfo *mm_result, return Status{}; } +void run_offset_contribution_float(const Window &window, + ITensor *mm_result, + const ITensor *vector_sum_col, + const ITensor *vector_sum_row, + int32_t a_offset, + int32_t b_offset, + int32_t k_offset, + float scale, + bool slide_vector_sum_col, + bool is_gemm3d) +{ + Window collapsed_window = window.collapse_if_possible(window, Window::DimZ); + collapsed_window.set(Window::DimX, Window::Dimension(0, 1, 1)); + + const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0; + const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1; + + const int window_start_x = window.x().start(); + const int window_end_x = window.x().end(); + const int window_step_x = 16; + + // if vector_sum_col is nullptr then stride_y is 0, else get stride_y + const size_t sum_col_stride_y = (vector_sum_col != nullptr) ? (vector_sum_col->info()->strides_in_bytes().y()) : 0; + Iterator mm_result_it(mm_result, collapsed_window); + + if ((a_offset != 0) && (b_offset != 0) && (vector_sum_col != nullptr) && (vector_sum_row != nullptr)) // true, true + { + // Set window for vector_sum_col + Window win_vector_sum_col(collapsed_window); + win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0)); + win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0)); + + // Set window for vector_sum_row + Window win_vector_sum_row(collapsed_window); + win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0)); + win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0)); + win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0)); + + Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col); + Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row); + + const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y(); + + // Offset in case vector_sum_col is batched + const int vector_sum_col_batch_offset = + slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0; + + execute_window_loop( + collapsed_window, + [&](const Coordinates &id) + { + const int batch_id = id.z() / depth_input; + const size_t batch_offset_col = batch_id * (sum_col_stride_y); + auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_offset_col + + batch_id * vector_sum_col_batch_offset); + auto mm_result_ptr = reinterpret_cast<float *>(mm_result_it.ptr()); + + // Compute the leftover term due to b_offset. + int32_t b_offset_term_s32 = + *(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) + + id.y() + (id.z() % depth_input) * height_input); + b_offset_term_s32 *= b_offset; + + const int32x4_t b_offset_term_s32_vec = vdupq_n_s32(b_offset_term_s32); + + int x = window_start_x; + for (; x <= (window_end_x - window_step_x); x += window_step_x) + { + // Compute the leftover term due to a_offset. + int32x4x4_t a_offset_term_s32 = { + {vld1q_s32(vector_sum_col_ptr + x + 0), vld1q_s32(vector_sum_col_ptr + x + 4), + vld1q_s32(vector_sum_col_ptr + x + 8), vld1q_s32(vector_sum_col_ptr + x + 12)}}; + + a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset); + a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset); + a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset); + a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset); + + // Add a_offset_term_s32 and b_offset_term_s32 + int32x4x4_t offset_term_s32 = { + {vdupq_n_s32(k_offset), vdupq_n_s32(k_offset), vdupq_n_s32(k_offset), vdupq_n_s32(k_offset)}}; + + offset_term_s32.val[0] = + vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32_vec)); + offset_term_s32.val[1] = + vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32_vec)); + offset_term_s32.val[2] = + vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32_vec)); + offset_term_s32.val[3] = + vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32_vec)); + + float32x4x4_t in_f32 = {{vld1q_f32(mm_result_ptr + x + 0), vld1q_f32(mm_result_ptr + x + 4), + vld1q_f32(mm_result_ptr + x + 8), vld1q_f32(mm_result_ptr + x + 12)}}; + + // Convert and scale the S32 offsets to match the already scaled GEMM results + float32x4x4_t offset_terms_scaled = {{ + vmulq_n_f32(vcvtq_f32_s32(offset_term_s32.val[0]), scale), + vmulq_n_f32(vcvtq_f32_s32(offset_term_s32.val[1]), scale), + vmulq_n_f32(vcvtq_f32_s32(offset_term_s32.val[2]), scale), + vmulq_n_f32(vcvtq_f32_s32(offset_term_s32.val[3]), scale), + }}; + + // Add the offset terms to the GEMM result + in_f32.val[0] = vaddq_f32(in_f32.val[0], offset_terms_scaled.val[0]); + in_f32.val[1] = vaddq_f32(in_f32.val[1], offset_terms_scaled.val[1]); + in_f32.val[2] = vaddq_f32(in_f32.val[2], offset_terms_scaled.val[2]); + in_f32.val[3] = vaddq_f32(in_f32.val[3], offset_terms_scaled.val[3]); + + // Store the result with the offset contribution + vst1q_f32(mm_result_ptr + x + 0, in_f32.val[0]); + vst1q_f32(mm_result_ptr + x + 4, in_f32.val[1]); + vst1q_f32(mm_result_ptr + x + 8, in_f32.val[2]); + vst1q_f32(mm_result_ptr + x + 12, in_f32.val[3]); + } + + // Left-overs loop + for (; x < window_end_x; ++x) + { + // Compute the leftover term due to a_offset. + int32_t a_offset_term_s32 = *(vector_sum_col_ptr + x); + + a_offset_term_s32 *= a_offset; + + // Add the offset terms to GEMM's result + // Store the result with the offset contribution + mm_result_ptr[x] += (k_offset + a_offset_term_s32 + b_offset_term_s32) * scale; + } + }, + vector_sum_col_it, vector_sum_row_it, mm_result_it); + } + else if ((a_offset == 0) && (b_offset != 0) && (vector_sum_row != nullptr)) // false, true + { + ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row); + + // Set window for vector_sum_row + Window win_vector_sum_row(collapsed_window); + win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0)); + win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0)); + win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0)); + + Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row); + + const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y(); + + execute_window_loop( + collapsed_window, + [&](const Coordinates &id) + { + const int batch_id = id.z() / depth_input; + auto mm_result_ptr = reinterpret_cast<float *>(mm_result_it.ptr()); + + // Compute the leftover term due to b_offset. + int32_t row_sum = + *(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) + + id.y() + (id.z() % depth_input) * height_input); + float scaled_b_offset_term_f32 = row_sum * b_offset * scale; + + const float32x4_t b_offset_term_f32_vec = vdupq_n_f32(scaled_b_offset_term_f32); + + int x = window_start_x; + for (; x <= (window_end_x - window_step_x); x += window_step_x) + { + float32x4x4_t in_f32 = {{vld1q_f32(mm_result_ptr + x + 0), vld1q_f32(mm_result_ptr + x + 4), + vld1q_f32(mm_result_ptr + x + 8), vld1q_f32(mm_result_ptr + x + 12)}}; + + // Add the offset terms to GEMM's result + in_f32.val[0] = vaddq_f32(in_f32.val[0], b_offset_term_f32_vec); + in_f32.val[1] = vaddq_f32(in_f32.val[1], b_offset_term_f32_vec); + in_f32.val[2] = vaddq_f32(in_f32.val[2], b_offset_term_f32_vec); + in_f32.val[3] = vaddq_f32(in_f32.val[3], b_offset_term_f32_vec); + + // Store the result with the offset contribution + vst1q_f32(mm_result_ptr + x + 0, in_f32.val[0]); + vst1q_f32(mm_result_ptr + x + 4, in_f32.val[1]); + vst1q_f32(mm_result_ptr + x + 8, in_f32.val[2]); + vst1q_f32(mm_result_ptr + x + 12, in_f32.val[3]); + } + + // Left-overs loop + for (; x < window_end_x; ++x) + { + // Add the offset terms to GEMM's result + // Store the result with the offset contribution + mm_result_ptr[x] += scaled_b_offset_term_f32; + } + }, + vector_sum_row_it, mm_result_it); + } + else if ((a_offset != 0) && (b_offset == 0) && (vector_sum_col != nullptr)) // true, false + { + // Set window for vector_sum_col + Window win_vector_sum_col(collapsed_window); + win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0)); + win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0)); + + Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col); + + // Offset in case vector_sum_col is batched + const int vector_sum_col_batch_offset = + slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0; + + execute_window_loop( + collapsed_window, + [&](const Coordinates &id) + { + const int batch_id = id.z() / depth_input; + const size_t batch_offset_col = + batch_id * + (sum_col_stride_y); // Value to offset vector_sum_col_ptr to allow for iteration of y values in tensor + auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_offset_col + + batch_id * vector_sum_col_batch_offset); + auto mm_result_ptr = reinterpret_cast<float *>(mm_result_it.ptr()); + + int x = window_start_x; + for (; x <= (window_end_x - window_step_x); x += window_step_x) + { + // Compute the leftover term due to a_offset. + int32x4x4_t a_offset_term_s32 = { + {vld1q_s32(vector_sum_col_ptr + x + 0), vld1q_s32(vector_sum_col_ptr + x + 4), + vld1q_s32(vector_sum_col_ptr + x + 8), vld1q_s32(vector_sum_col_ptr + x + 12)}}; + + a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset); + a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset); + a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset); + a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset); + + float32x4x4_t a_offset_term_scaled = {{ + vmulq_n_f32(vcvtq_f32_s32(a_offset_term_s32.val[0]), scale), + vmulq_n_f32(vcvtq_f32_s32(a_offset_term_s32.val[1]), scale), + vmulq_n_f32(vcvtq_f32_s32(a_offset_term_s32.val[2]), scale), + vmulq_n_f32(vcvtq_f32_s32(a_offset_term_s32.val[3]), scale), + }}; + + float32x4x4_t in_f32 = {{vld1q_f32(mm_result_ptr + x + 0), vld1q_f32(mm_result_ptr + x + 4), + vld1q_f32(mm_result_ptr + x + 8), vld1q_f32(mm_result_ptr + x + 12)}}; + + // Add the offset terms to GEMM's result + in_f32.val[0] = vaddq_f32(in_f32.val[0], a_offset_term_scaled.val[0]); + in_f32.val[1] = vaddq_f32(in_f32.val[1], a_offset_term_scaled.val[1]); + in_f32.val[2] = vaddq_f32(in_f32.val[2], a_offset_term_scaled.val[2]); + in_f32.val[3] = vaddq_f32(in_f32.val[3], a_offset_term_scaled.val[3]); + + // Store the result with the offset contribution + vst1q_f32(mm_result_ptr + x + 0, in_f32.val[0]); + vst1q_f32(mm_result_ptr + x + 4, in_f32.val[1]); + vst1q_f32(mm_result_ptr + x + 8, in_f32.val[2]); + vst1q_f32(mm_result_ptr + x + 12, in_f32.val[3]); + } + + // Left-overs loop + for (; x < window_end_x; ++x) + { + // Compute the leftover term due to a_offset. + const int32_t a_offset_term_s32 = *(vector_sum_col_ptr + x); + + // Add the offset terms to GEMM's result + // Store the result with the offset contribution + mm_result_ptr[x] += a_offset_term_s32 * a_offset * scale; + } + }, + vector_sum_col_it, mm_result_it); + } + else // false, false + { + // No offset contribution from matrix A and matrix B + return; + } +} + void run_offset_contribution(const Window &window, ITensor *mm_result, const ITensor *vector_sum_col, @@ -361,7 +632,8 @@ void CpuGemmLowpOffsetContributionKernel::configure(ITensorInfo *mm_result, ITensorInfo *vector_sum_row, int32_t k, int32_t a_offset, - int32_t b_offset) + int32_t b_offset, + float scale) { // Perform validate step ARM_COMPUTE_UNUSED(vector_sum_row); @@ -370,10 +642,11 @@ void CpuGemmLowpOffsetContributionKernel::configure(ITensorInfo *mm_result, _a_offset = a_offset; _b_offset = b_offset; - _k_offset = a_offset * b_offset * k; + _k = k; - // If a_offset == 0, vector_sum_col can be a nullptr - if (a_offset != 0) + _scale = scale; + + if (vector_sum_col != nullptr) { // Check if vector_sum_col_shape should be slidden or not // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1 @@ -386,6 +659,21 @@ void CpuGemmLowpOffsetContributionKernel::configure(ITensorInfo *mm_result, ICpuKernel::configure(win); } +void CpuGemmLowpOffsetContributionKernel::set_a_offset(int32_t a_offset) +{ + _a_offset = a_offset; +} + +void CpuGemmLowpOffsetContributionKernel::set_b_offset(int32_t b_offset) +{ + _b_offset = b_offset; +} + +void CpuGemmLowpOffsetContributionKernel::set_scale(float scale) +{ + _scale = scale; +} + Status CpuGemmLowpOffsetContributionKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, @@ -410,8 +698,18 @@ void CpuGemmLowpOffsetContributionKernel::run_op(ITensorPack &tensors, const Win const bool reinterpret_as_3d = vector_sum_row != nullptr && mm_result->info()->num_dimensions() > 1 && mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x(); - run_offset_contribution(window, mm_result, vector_sum_col, vector_sum_row, _a_offset, _b_offset, _k_offset, - _slide_vector_sum_col, reinterpret_as_3d); + // check to see what is the output type of result + auto k_offset = _a_offset * _b_offset * _k; + if (mm_result->info()->data_type() == DataType::F32) + { + run_offset_contribution_float(window, mm_result, vector_sum_col, vector_sum_row, _a_offset, _b_offset, k_offset, + _scale, _slide_vector_sum_col, reinterpret_as_3d); + } + else + { + run_offset_contribution(window, mm_result, vector_sum_col, vector_sum_row, _a_offset, _b_offset, k_offset, + _slide_vector_sum_col, reinterpret_as_3d); + } } const char *CpuGemmLowpOffsetContributionKernel::name() const diff --git a/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h b/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h index 08b2d47529..ecbfb0c282 100644 --- a/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h +++ b/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2022,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,12 +21,14 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_KERNEL_H -#define ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_KERNEL_H +#ifndef ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H +#define ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H #include "src/core/common/Macros.h" #include "src/cpu/ICpuKernel.h" +#include <cstdint> + namespace arm_compute { namespace cpu @@ -62,13 +64,16 @@ public: * @param[in] k Number of matrix A columns or Matrix B rows * @param[in] a_offset Offset to be added to each element of the matrix A. * @param[in] b_offset Offset to be added to each element of the matrix B. + * @param[in] scale (Optional) multiplies the contribution to make it the same scale as the dst in the case where mm_result is float + * (and so has already been scaled). Default is 1.0 */ void configure(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, int32_t k, int32_t a_offset, - int32_t b_offset); + int32_t b_offset, + float scale = 1.0f); /** Static function to check if given info will lead to a valid configuration * * Similar to CpuGemmLowpOffsetContributionKernel::configure() @@ -81,6 +86,29 @@ public: int32_t a_offset, int32_t b_offset); + /** Set the a offset + * Warning: if a_offset is non-zero then vector_sum_col must be set in run_op. + * Run configure or validate again if you aren't sure + * + * @param[in] a_offset Offset to be added to each element of the matrix A. + */ + void set_a_offset(int32_t a_offset); + + /** Set the b offset + * Warning: if b_offset is non-zero then vector_sum_row must be set in run_op. + * Run configure or validate again if you aren't sure + * + * @param[in] b_offset Offset to be added to each element of the matrix B. + */ + void set_b_offset(int32_t b_offset); + + /** Set the dequantize scale + * + * @param[in] scale Multiplies the contribution to make it the same scale as the dst in the case where + * mm_result is float (and so has already been scaled). + */ + void set_scale(float scale); + // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; const char *name() const override; @@ -88,10 +116,11 @@ public: private: int32_t _a_offset{0}; int32_t _b_offset{0}; - int32_t _k_offset{0}; + int32_t _k{0}; // Number of columns of A or rows of B, used in last offset term + float _scale{1.0}; bool _slide_vector_sum_col{true}; }; } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_KERNEL_H */ +#endif // ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H diff --git a/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp b/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp index d008842398..3c113f2828 100644 --- a/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp +++ b/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021, 2023 Arm Limited. + * Copyright (c) 2019-2021, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -919,7 +919,7 @@ void CpuGemmLowpOffsetContributionOutputStageKernel::configure(const ITensorInfo _a_offset = a_offset; _b_offset = b_offset; - _k_offset = a_offset * b_offset * k; + _k = k; _output_stage = output_stage; // If a_offset == 0, vector_sum_col can be a nullptr @@ -958,6 +958,16 @@ Status CpuGemmLowpOffsetContributionOutputStageKernel::validate(const ITensorInf return Status{}; } +void CpuGemmLowpOffsetContributionOutputStageKernel::set_a_offset(int32_t a_offset) +{ + _a_offset = a_offset; +} + +void CpuGemmLowpOffsetContributionOutputStageKernel::set_b_offset(int32_t b_offset) +{ + _b_offset = b_offset; +} + void CpuGemmLowpOffsetContributionOutputStageKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) @@ -993,10 +1003,11 @@ void CpuGemmLowpOffsetContributionOutputStageKernel::run_op(ITensorPack &te // Check if symmetric per-channel execution const bool is_symm = _output_stage.is_quantized_per_channel; + auto k_offset = _a_offset * _b_offset * _k; if (is_symm) { run_offset_contribution_output_stage_symm(window, mm_result, vector_sum_col, vector_sum_row, bias, dst, - _a_offset, _b_offset, _k_offset, _is_vector_sum_col_batched, + _a_offset, _b_offset, k_offset, _is_vector_sum_col_batched, _output_stage, reinterpret_as_3d, is_bounded_relu, is_fixed_point); } else @@ -1004,13 +1015,13 @@ void CpuGemmLowpOffsetContributionOutputStageKernel::run_op(ITensorPack &te if (is_signed) { run_offset_contribution_output_stage<int8_t>( - window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, _k_offset, + window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, k_offset, _is_vector_sum_col_batched, _output_stage, reinterpret_as_3d, is_bounded_relu, is_fixed_point); } else { run_offset_contribution_output_stage<uint8_t>( - window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, _k_offset, + window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, k_offset, _is_vector_sum_col_batched, _output_stage, reinterpret_as_3d, is_bounded_relu, is_fixed_point); } } diff --git a/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h b/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h index af477d4756..ff706ff3dc 100644 --- a/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h +++ b/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2022 Arm Limited. + * Copyright (c) 2019-2022, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H -#define ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H +#ifndef ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H +#define ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H #include "arm_compute/core/KernelDescriptors.h" @@ -110,6 +110,22 @@ public: int32_t b_offset, GEMMLowpOutputStageInfo output_stage); + /** Set the a offset + * Warning: if a_offset is non-zero then vector_sum_col must be set in run_op. + * Run configure or validate again if you aren't sure + * + * @param[in] a_offset Offset to be added to each element of the matrix A. + */ + void set_a_offset(int32_t a_offset); + + /** Set the b offset + * Warning: if b_offset is non-zero then vector_sum_col must be set in run_op. + * Run configure or validate again if you aren't sure + * + * @param[in] b_offset Offset to be added to each element of the matrix B. + */ + void set_b_offset(int32_t b_offset); + // Inherited methods overridden: void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override; const char *name() const override; @@ -118,11 +134,11 @@ private: /** Function to use for the particular tensors passed to configure() */ int32_t _a_offset{0}; int32_t _b_offset{0}; - int32_t _k_offset{0}; + int32_t _k{0}; // Number of columns of A or rows of B, used in last offset term bool _is_vector_sum_col_batched{true}; GEMMLowpOutputStageInfo _output_stage{GEMMLowpOutputStageInfo()}; }; } // namespace kernels } // namespace cpu } // namespace arm_compute -#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H */ +#endif // ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H diff --git a/src/cpu/kernels/assembly/arm_gemm.hpp b/src/cpu/kernels/assembly/arm_gemm.hpp index 5d7cf79857..941fed0ba8 100644 --- a/src/cpu/kernels/assembly/arm_gemm.hpp +++ b/src/cpu/kernels/assembly/arm_gemm.hpp @@ -260,6 +260,19 @@ public: } }; +struct DequantizeFloat +{ +public: + float scale = 0; + + DequantizeFloat() = default; + + // Constructor + DequantizeFloat(const float scale) : scale(scale) + { + } +}; + struct Nothing { }; diff --git a/src/cpu/kernels/assembly/gemm_common.hpp b/src/cpu/kernels/assembly/gemm_common.hpp index 4825814e31..45d1e43274 100644 --- a/src/cpu/kernels/assembly/gemm_common.hpp +++ b/src/cpu/kernels/assembly/gemm_common.hpp @@ -166,6 +166,12 @@ public: { } + /*** Dequanize scale interface (optional) ***/ + /* Set the dequantize scale for GEMMs when converting from int to float (float out = scale * float(int out) ) */ + virtual void set_dequantize_scale(const float) + { + } + /*** Introspection interface ***/ /* Get the configuration of this GEMM */ virtual GemmConfig get_config() = 0; |