aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2017-11-13 16:44:08 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit6681d24ccc084a0d98d84edadc8aeb5416159261 (patch)
treee90b7a771df5daac6bc2ac38d70d906fc9908e77 /src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
parent2f8e077378a757128540428b0a1318f80b6dea75 (diff)
downloadComputeLibrary-6681d24ccc084a0d98d84edadc8aeb5416159261.tar.gz
COMPMID-675 - Fixed mismatches in GEMMLowpMatrixMultiplyKernel dotproduct path
Change-Id: I791a08c1e333ce6fc5d537f50ab731fbe066e9c9 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/95737 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp')
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp13
1 files changed, 4 insertions, 9 deletions
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 929ee41220..0fff6c9ca1 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -41,7 +41,7 @@
namespace arm_compute
{
#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp"
-#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp"
+#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_12x8.hpp"
} // namespace arm_compute
using namespace arm_compute;
@@ -75,20 +75,15 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
{
dot_product_path = true;
- // If the DOT product instruction is available, the computation will be performed in int8_t
- // In order to take into account this, we need to subtract -128 from a_offset and b_offset
- _a_offset -= 128;
- _b_offset -= 128;
-
// Configure matrix multiply kernel
struct CPUInfo ci = NEScheduler::get().cpu_info();
const int M = output->info()->tensor_shape().y();
const int N = output->info()->tensor_shape().x();
const int K = a->info()->tensor_shape().x();
- GemmInterleaved<gemm_s8_12x8, int8_t, int32_t> gemm(&ci, M, N, K, false, false);
- constexpr size_t alignment = 4096;
- _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
+ const size_t workbench_size = GemmInterleaved<gemm_u8_12x8, gemm_u8_12x8::operand_type, gemm_u8_12x8::result_type>(&ci, M, N, K, false, false).get_working_size();
+ constexpr size_t alignment = 4096;
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (workbench_size + alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
_memory_group.manage(&_workspace);
// Configure matrix multiplication kernel