aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-10-14 19:03:09 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-10-23 12:08:12 +0000
commit48b3ef89de5f21a0169d8416e3d54081f82c7bf8 (patch)
treef857d733ccf446c704823dc7ac796a96eb55095e /src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
parent1dce3101ef8d77c8cf0af7dfd4af6595a0136b91 (diff)
downloadComputeLibrary-48b3ef89de5f21a0169d8416e3d54081f82c7bf8.tar.gz
COMPMID-2577: Fuse bias addition and activation in gemm assembly kernels
Change-Id: I7f52112d2d05b1ea3d3f3d4b19b8eafab05d6c44 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/2141 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp32
1 files changed, 16 insertions, 16 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
index a402332552..079c04ae06 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
@@ -42,45 +42,45 @@ static const GemmImplementation<uint8_t, uint8_t, ARequantizeLayer32> gemm_quint
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_u8u32_dot_1VLx8",
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &) { return args._Ksize<=64 && args._alpha==1 && args._beta==0 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const ARequantizeLayer32 &) { return args._Ksize<=64 && !args._trA && args._pretransposed_hint; },
nullptr,
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_1VLx8, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_1VLx8, uint8_t, uint8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"hybrid_u8u32_dot_4VLx4",
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &) { return args._Ksize>=16 && args._alpha==1 && !args._trA && !args._trB && args._pretransposed_hint; },
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<hybrid_u8u32_dot_4VLx4, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const ARequantizeLayer32 &) { return args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
+ [](const GemmArgs &args, const ARequantizeLayer32 &) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
+ [](const GemmArgs &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<hybrid_u8u32_dot_4VLx4, uint8_t, uint8_t>(args, qp); }
},
#endif
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_u8u32_dot_4x8",
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32) && args._alpha==1 && args._beta==0 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const ARequantizeLayer32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32) && !args._trA && args._pretransposed_hint; },
nullptr,
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_4x8, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_4x8, uint8_t, uint8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_u8u32_dot_4x6",
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64) && args._alpha==1 && args._beta==0 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const ARequantizeLayer32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64) && !args._trA && args._pretransposed_hint; },
nullptr,
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_4x6, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_4x6, uint8_t, uint8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"hybrid_u8u32_dot_16x4",
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &) { return args._ci->has_dotprod() && args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &) { return args._Nsize<=256 && args._Ksize>128; },
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<hybrid_u8u32_dot_16x4, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const ARequantizeLayer32 &) { return args._ci->has_dotprod() && args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
+ [](const GemmArgs &args, const ARequantizeLayer32 &) { return args._Nsize<=256 && args._Ksize>128; },
+ [](const GemmArgs &args, const ARequantizeLayer32 &qp) { return new GemmHybridQuantized<hybrid_u8u32_dot_16x4, uint8_t, uint8_t>(args, qp); }
},
{
GemmMethod::QUANTIZE_WRAPPER,
"quantized_wrapper",
nullptr,
nullptr,
- [](const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &qp) { return new QuantizeWrapper<uint8_t, uint8_t, uint32_t>(args, qp); }
+ [](const GemmArgs &args, const ARequantizeLayer32 &qp) { return new QuantizeWrapper<uint8_t, uint8_t, uint32_t>(args, qp); }
},
{
GemmMethod::DEFAULT,
@@ -96,9 +96,9 @@ const GemmImplementation<uint8_t, uint8_t, ARequantizeLayer32> *gemm_implementat
return gemm_quint8_methods;
}
-template UniqueGemmCommon<uint8_t, uint8_t> gemm<uint8_t, uint8_t, ARequantizeLayer32>(const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &os);
-template KernelDescription get_gemm_method<uint8_t, uint8_t, ARequantizeLayer32>(const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &os);
-template std::vector<KernelDescription> get_compatible_kernels<uint8_t, uint8_t, ARequantizeLayer32>(const GemmArgs<uint8_t> &args, const ARequantizeLayer32 &os);
+template UniqueGemmCommon<uint8_t, uint8_t> gemm<uint8_t, uint8_t, ARequantizeLayer32>(const GemmArgs &args, const ARequantizeLayer32 &os);
+template KernelDescription get_gemm_method<uint8_t, uint8_t, ARequantizeLayer32>(const GemmArgs &args, const ARequantizeLayer32 &os);
+template std::vector<KernelDescription> get_compatible_kernels<uint8_t, uint8_t, ARequantizeLayer32>(const GemmArgs &args, const ARequantizeLayer32 &os);
} // namespace arm_gemm