From a084b46835d20fdfe6e590b91b7ca64fba3542df Mon Sep 17 00:00:00 2001 From: Aleksandr Nikolaev Date: Thu, 25 Jun 2020 12:25:52 +0100 Subject: [ONCPUML-97]: Implement "int8" support for 2D decomposition at high core counts Interleaved2d functionality was extended to uint8 and int8 kernels. Change-Id: If78facbce56e9ec7b2f4c23436af0bd5db7f7b69 Signed-off-by: Aleksandr Nikolaev Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3467 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- arm_compute/graph/Types.h | 2 +- src/core/NEON/kernels/arm_gemm/gemm_int8.cpp | 19 +++++++++++++++++-- src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp | 10 +++++++++- src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp | 10 +++++++++- src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp | 19 +++++++++++++++++-- src/core/NEON/kernels/assembly/arm_gemm.hpp | 1 + src/core/NEON/kernels/assembly/gemm_common.hpp | 2 +- src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp | 8 +++++++- 8 files changed, 62 insertions(+), 9 deletions(-) diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h index cb2f7fe0ca..3a4d0a6070 100644 --- a/arm_compute/graph/Types.h +++ b/arm_compute/graph/Types.h @@ -79,7 +79,7 @@ struct TensorDescriptor; /** Graph configuration structure */ struct GraphConfig { - bool use_function_memory_manager{ true }; /**< Use a memory manager to manage per-funcion auxilary memory */ + bool use_function_memory_manager{ true }; /**< Use a memory manager to manage per-function auxilary memory */ bool use_function_weights_manager{ true }; /**< Use a weights manager to manage transformed weights */ bool use_transition_memory_manager{ true }; /**< Use a memory manager to manager transition buffer memory */ bool use_tuner{ false }; /**< Use a tuner in tunable backends */ diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp index d1d137e090..3ee47492db 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp @@ -28,6 +28,7 @@ #include "gemm_hybrid.hpp" #include "gemm_implementation.hpp" #include "gemm_interleaved.hpp" +#include "gemm_interleaved_pretransposed_2d.hpp" #include "kernels/a64_gemm_s16_12x8.hpp" #include "kernels/a64_gemm_s8_12x8.hpp" @@ -106,16 +107,30 @@ static const GemmImplementation gemm_s8_methods[] = { [](const GemmArgs &args) { return args._Nsize<=256 && args._Ksize>128; }, [](const GemmArgs &args) { return new GemmHybrid(args); } }, +{ + GemmMethod::GEMM_INTERLEAVED_2D, + "gemm_s8_12x8_2d", + [](const GemmArgs &args) { return args._ci->has_dotprod(); }, + [](const GemmArgs &args) { return (args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8); }, + [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d(args); } +}, { GemmMethod::GEMM_INTERLEAVED, - "gemm_s8_12x8", + "gemm_s8_12x8_1d", [](const GemmArgs &args) { return args._ci->has_dotprod(); }, nullptr, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, +{ + GemmMethod::GEMM_INTERLEAVED_2D, + "gemm_s8_4x4_2d", + nullptr, + [](const GemmArgs &args) { return (args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8); }, + [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d(args); } +}, { GemmMethod::GEMM_INTERLEAVED, - "gemm_s8_4x4", + "gemm_s8_4x4_1d", nullptr, nullptr, [](const GemmArgs &args) { return new GemmInterleaved(args); } diff --git a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp index 67f28d38e2..04cac6095c 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -75,6 +75,14 @@ static const GemmImplementation gemm_qint8_methods [](const GemmArgs &args, const Requantize32 &) { return args._Nsize<=256 && args._Ksize>128; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized(args, qp); } }, +/** QUANTIZE_WRAPPER_2D enables 2D parallelisation hint for IScheduler in NEGEMMAssemblyDispatch */ +{ + GemmMethod::QUANTIZE_WRAPPER_2D, + "quantized_wrapper_2d", + nullptr, + [](const GemmArgs &args, const Requantize32 &) { return (args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8);}, + [](const GemmArgs &args, const Requantize32 &qp) { return new QuantizeWrapper(args, qp); } +}, { GemmMethod::QUANTIZE_WRAPPER, "quantized_wrapper", diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp index b9e2bf6c26..7b08041005 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Arm Limited. + * Copyright (c) 2019-2020 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -75,6 +75,14 @@ static const GemmImplementation gemm_quint8_meth [](const GemmArgs &args, const Requantize32 &) { return args._Nsize<=256 && args._Ksize>128; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized(args, qp); } }, +/** QUANTIZE_WRAPPER_2D enables 2D parallelisation hint for IScheduler in NEGEMMAssemblyDispatch */ +{ + GemmMethod::QUANTIZE_WRAPPER_2D, + "quantized_wrapper_2d", + nullptr, + [](const GemmArgs &args, const Requantize32 &) { return (args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8);}, + [](const GemmArgs &args, const Requantize32 &qp) { return new QuantizeWrapper(args, qp); } +}, { GemmMethod::QUANTIZE_WRAPPER, "quantized_wrapper", diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp index d5a9e585b5..caab2e2cc2 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp @@ -27,6 +27,7 @@ #include "gemm_common.hpp" #include "gemm_implementation.hpp" #include "gemm_interleaved.hpp" +#include "gemm_interleaved_pretransposed_2d.hpp" #include "gemm_hybrid.hpp" #include "kernels/a64_gemm_u16_12x8.hpp" @@ -106,16 +107,30 @@ static const GemmImplementation gemm_u8_methods[] = { [](const GemmArgs &args) { return args._Nsize<=256 && args._Ksize>128; }, [](const GemmArgs &args) { return new GemmHybrid(args); } }, +{ + GemmMethod::GEMM_INTERLEAVED_2D, + "gemm_u8_12x8_2d", + [](const GemmArgs &args) { return args._ci->has_dotprod(); }, + [](const GemmArgs &args) { return (args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8) ; }, + [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d(args); } +}, { GemmMethod::GEMM_INTERLEAVED, - "gemm_u8_12x8", + "gemm_u8_12x8_1d", [](const GemmArgs &args) { return args._ci->has_dotprod(); }, nullptr, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, +{ + GemmMethod::GEMM_INTERLEAVED_2D, + "gemm_u8_4x4_2d", + nullptr, + [](const GemmArgs &args) { return (args._maxthreads >= 8) && (args._Msize >= 8) && (args._Nsize >= 8); }, + [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d(args); } +}, { GemmMethod::GEMM_INTERLEAVED, - "gemm_u8_4x4", + "gemm_u8_4x4_1d", nullptr, nullptr, [](const GemmArgs &args) { return new GemmInterleaved(args); } diff --git a/src/core/NEON/kernels/assembly/arm_gemm.hpp b/src/core/NEON/kernels/assembly/arm_gemm.hpp index 1a613e2160..e3e15478fb 100644 --- a/src/core/NEON/kernels/assembly/arm_gemm.hpp +++ b/src/core/NEON/kernels/assembly/arm_gemm.hpp @@ -42,6 +42,7 @@ enum class GemmMethod GEMM_INTERLEAVED, GEMM_INTERLEAVED_2D, QUANTIZE_WRAPPER, + QUANTIZE_WRAPPER_2D, GEMM_HYBRID_QUANTIZED }; diff --git a/src/core/NEON/kernels/assembly/gemm_common.hpp b/src/core/NEON/kernels/assembly/gemm_common.hpp index 8feecf440e..e9e56842c7 100644 --- a/src/core/NEON/kernels/assembly/gemm_common.hpp +++ b/src/core/NEON/kernels/assembly/gemm_common.hpp @@ -77,7 +77,7 @@ public: return false; } - /** Main execute member fucntion + /** Main execute member function * @param [in] work_range specifies the range of work we want to be computed, total range defined by get_window_size() * @param [in] thread_locator where are we inside of the thread space * @naram [in] threadid a unique threadid diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp index b09ea2ec57..8e9f393da5 100644 --- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp +++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp @@ -438,12 +438,18 @@ void Fallback::run() const int granule_threshold = 200; scheduling_hint = IScheduler::Hints(Window::DimX, IScheduler::StrategyHint::DYNAMIC, granule_threshold); } - else if(_kernel_info.method == arm_gemm::GemmMethod::GEMM_INTERLEAVED_2D && (_d->info()->data_type() == DataType::F32 || _d->info()->data_type() == DataType::F16)) + else if(_kernel_info.method == arm_gemm::GemmMethod::GEMM_INTERLEAVED_2D && (_d->info()->data_type() == DataType::F32 || _d->info()->data_type() == DataType::F16 || _d->info()->data_type() == DataType::U8 || _d->info()->data_type() == DataType::S8) ) { //GEMM_INTERLEAVED supports 2D parallelism, IScheduler::split_dimensions_all signals to parallelise over all window dimensions const int granule_threshold = 200; scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold); } + else if(_kernel_info.method == arm_gemm::GemmMethod::QUANTIZE_WRAPPER_2D && (_d->info()->data_type() == DataType::QASYMM8 || _d->info()->data_type() == DataType::QASYMM8_SIGNED)) + { + //special case for QASYMM8 to support 2D parallelism, scheduler here may be tweaked differently compared to FP32 case + const int granule_threshold = 200; + scheduling_hint = IScheduler::Hints(IScheduler::split_dimensions_all, IScheduler::StrategyHint::STATIC, granule_threshold); + } NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint); } -- cgit v1.2.1