aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
diff options
context:
space:
mode:
authormorgolock <pablo.tello@arm.com>2020-09-01 11:14:27 +0100
committerPablo Marquez <pablo.tello@arm.com>2020-09-03 09:38:33 +0000
commitec4dee8c68a3d0f6d63db184bfb2f4589429778e (patch)
treeb4e2da6d11bd81075f1b0404151d780a13600d12 /src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
parent12e0209d9369f9aea14946453ad1b0887292a6ba (diff)
downloadComputeLibrary-ec4dee8c68a3d0f6d63db184bfb2f4589429778e.tar.gz
COMPMID-3750: Disable asm kernels when shifts are negative.
Change-Id: I65a738221a6c6fc3527ececda42f7a7e547755c1 Signed-off-by: morgolock <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3896 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp')
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp29
1 files changed, 25 insertions, 4 deletions
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 83db146a8a..dada6d16da 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -117,8 +117,18 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
{
if(is_data_type_quantized_asymmetric(a_to_use->info()->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
{
- _asm_glue.configure(a_to_use, b, c, output, gemm_info);
- _fused_assembly_path = _asm_glue.is_configured();
+ // Result shifts < 0 are not supported by asm kernels
+ const std::vector<int32_t> &shifts = info.gemmlowp_output_stage().gemmlowp_shifts;
+ const bool is_asm_supported = info.gemmlowp_output_stage().gemmlowp_shift >= 0
+ && std::all_of(shifts.cbegin(), shifts.cend(), [](int32_t val)
+ {
+ return val >= 0;
+ });
+ if(is_asm_supported)
+ {
+ _asm_glue.configure(a_to_use, b, c, output, gemm_info);
+ _fused_assembly_path = _asm_glue.is_configured();
+ }
}
else
{
@@ -329,8 +339,19 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
bool run_optimised_requantized = false;
if(is_data_type_quantized_asymmetric(a_to_use->data_type()) && info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT)
{
- run_optimised = bool(NEGEMMAssemblyDispatch::validate(a_to_use, b, c, output, gemm_info));
- run_optimised_requantized = run_optimised;
+ // Result shifts < 0 are not supported by asm kernels
+ const std::vector<int32_t> &shifts = info.gemmlowp_output_stage().gemmlowp_shifts;
+ const bool is_asm_supported = info.gemmlowp_output_stage().gemmlowp_shift >= 0
+ && std::all_of(shifts.cbegin(), shifts.cend(), [](int32_t val)
+ {
+ return val >= 0;
+ });
+
+ if(is_asm_supported)
+ {
+ run_optimised = bool(NEGEMMAssemblyDispatch::validate(a_to_use, b, c, output, gemm_info));
+ run_optimised_requantized = run_optimised;
+ }
}
else
{