aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2017-06-28 17:27:56 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:15:39 +0100
commit221f38176b0d4dbc212441779d9bbac3cc0eecfa (patch)
treee838d673b35c5b40d4b484a3645cc7ae3c9d3ecc /src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
parent6410fb2a14427713251f5d97144ac5d4f17c988c (diff)
downloadComputeLibrary-221f38176b0d4dbc212441779d9bbac3cc0eecfa.tar.gz
COMPMID-421: Fixed FP16 support in Neon GEMM.
Fixed GEMM FP16 problem with matrices that are not multiple of 32. Added a new test suite NEON/GEMM/Float16/SmallGEMM. Implemented FP16 function to multiply vector by a matrix. Change-Id: Ie6c692885a48d0206bd6fe748332fa83bc286d67 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/79118 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Moritz Pflanzer <moritz.pflanzer@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp')
-rw-r--r--src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp47
1 files changed, 6 insertions, 41 deletions
diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
index 71dd4c7aa1..7d659ab2e6 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
@@ -52,25 +52,8 @@ void matrix_addition_f32(const ITensor *input, ITensor *output, const Window &wi
const auto in_ptr = reinterpret_cast<const float *>(in.ptr());
const auto out_ptr = reinterpret_cast<float *>(out.ptr());
- float32x4x4_t alpha_ab =
- {
- {
- vld1q_f32(out_ptr + 0),
- vld1q_f32(out_ptr + 4),
- vld1q_f32(out_ptr + 8),
- vld1q_f32(out_ptr + 12)
- }
- };
-
- const float32x4x4_t c =
- {
- {
- vld1q_f32(in_ptr + 0),
- vld1q_f32(in_ptr + 4),
- vld1q_f32(in_ptr + 8),
- vld1q_f32(in_ptr + 12)
- }
- };
+ float32x4x4_t alpha_ab = vld4q_f32(out_ptr);
+ const float32x4x4_t c = vld4q_f32(in_ptr);
// Multiply matrix C by its weight and accumulate
alpha_ab.val[0] = vmlaq_f32(alpha_ab.val[0], c.val[0], beta_f32);
@@ -78,10 +61,7 @@ void matrix_addition_f32(const ITensor *input, ITensor *output, const Window &wi
alpha_ab.val[2] = vmlaq_f32(alpha_ab.val[2], c.val[2], beta_f32);
alpha_ab.val[3] = vmlaq_f32(alpha_ab.val[3], c.val[3], beta_f32);
- vst1q_f32(out_ptr + 0, alpha_ab.val[0]);
- vst1q_f32(out_ptr + 4, alpha_ab.val[1]);
- vst1q_f32(out_ptr + 8, alpha_ab.val[2]);
- vst1q_f32(out_ptr + 12, alpha_ab.val[3]);
+ vst4q_f32(out_ptr, alpha_ab);
},
in, out);
}
@@ -99,28 +79,13 @@ void matrix_addition_f16(const ITensor *input, ITensor *output, const Window &wi
const auto in_ptr = reinterpret_cast<const float16_t *>(in.ptr());
const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr());
- float16x8x2_t alpha_ab =
- {
- {
- vld1q_f16(out_ptr + 0),
- vld1q_f16(out_ptr + 8)
- }
- };
-
- float16x8x2_t c =
- {
- {
- vld1q_f16(in_ptr + 0),
- vld1q_f16(in_ptr + 8)
- }
- };
-
+ float16x8x2_t alpha_ab = vld2q_f16(out_ptr);
+ const float16x8x2_t c = vld2q_f16(in_ptr);
// Multiply matrix C by its weight and accumulate
alpha_ab.val[0] = vaddq_f16(alpha_ab.val[0], vmulq_f16(c.val[0], beta_f16));
alpha_ab.val[1] = vaddq_f16(alpha_ab.val[1], vmulq_f16(c.val[1], beta_f16));
- vst1q_f16(out_ptr + 0, alpha_ab.val[0]);
- vst1q_f16(out_ptr + 8, alpha_ab.val[1]);
+ vst2q_f16(out_ptr + 0, alpha_ab);
},
in, out);
}