aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2017-11-02 16:09:35 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit6ff12a0f7765f62b8d0fa8554021e1cac2789f19 (patch)
tree9338db697789106b49ea391634be8b3c08ef9f97 /src/core
parent97988a4b3ef0f840432daf95b6e4b2ad7e5feefd (diff)
downloadComputeLibrary-6ff12a0f7765f62b8d0fa8554021e1cac2789f19.tar.gz
COMPMID-662: Integrated the new a64_s8_gemm_12x8 + dot product kernel into ACL.
Change-Id: Id8f919e486a132fc58346c9f84fccbeeb83d19b3 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94233 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'src/core')
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp38
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp130
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp174
-rw-r--r--src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp483
4 files changed, 218 insertions, 607 deletions
diff --git a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
index 400c6d9d8c..255e486365 100644
--- a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
@@ -145,7 +145,7 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
// Convert S32 to U16
- const int16x8x2_t in_u16 =
+ const int16x8x2_t in_s16 =
{
{
vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
@@ -153,10 +153,10 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
}
};
- // Convert U16 to U8
- const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+ // Convert S16 to S8
+ const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
- vst1q_u8(out.ptr(), out_u8);
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
},
vector_sum_col, vector_sum_row, mm_result, out);
}
@@ -209,7 +209,7 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
// Convert S32 to U16
- const int16x8x2_t in_u16 =
+ const int16x8x2_t in_s16 =
{
{
vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
@@ -217,10 +217,10 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
}
};
- // Convert U16 to U8
- const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+ // Convert S16 to S8
+ const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
- vst1q_u8(out.ptr(), out_u8);
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
},
vector_sum_row, mm_result, out);
}
@@ -295,8 +295,8 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
- // Convert S32 to U16
- const int16x8x2_t in_u16 =
+ // Convert S32 to S16
+ const int16x8x2_t in_s16 =
{
{
vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
@@ -304,10 +304,10 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
}
};
- // Convert U16 to U8
- const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+ // Convert S16 to S8
+ const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
- vst1q_u8(out.ptr(), out_u8);
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
},
vector_sum_col, mm_result, out);
}
@@ -346,8 +346,8 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
- // Convert S32 to U16
- const int16x8x2_t in_u16 =
+ // Convert S32 to S16
+ const int16x8x2_t in_s16 =
{
{
vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
@@ -355,10 +355,10 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
}
};
- // Convert U16 to U8
- const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_u16.val[0]), vqmovun_s16(in_u16.val[1]));
+ // Convert U16 to S8
+ const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
- vst1q_u8(out.ptr(), out_u8);
+ vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
},
mm_result, out);
}
@@ -375,7 +375,7 @@ void NEGEMMLowpFinalizeKernel::configure(const ITensor *vector_sum_col, const IT
int32_t c_offset, int32_t c_mult_int, int32_t shift)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S8);
TensorShape mm_result_shape = mm_result->info()->tensor_shape();
TensorShape output_shape = output->info()->tensor_shape();
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index 3e614a8bfc..4b9c9f3e64 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -52,7 +52,7 @@ NEGEMMLowpMatrixMultiplyKernel::NEGEMMLowpMatrixMultiplyKernel()
void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
@@ -127,115 +127,115 @@ void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo
// All the values needed for computing a single 4x4 block will be read from consecutive memory positions
execute_window_loop(window, [&](const Coordinates & id)
{
- const uint8_t *mtx_a0 = ina.ptr();
- const uint8_t *mtx_b0 = inb.ptr();
+ auto *mtx_a0 = reinterpret_cast<const int8_t *>(ina.ptr());
+ auto *mtx_b0 = reinterpret_cast<const int8_t *>(inb.ptr());
// Note: Since the input are all positives, we can use uint32_t
// Accumulators for the block 0
- uint32x4x4_t c0 =
+ int32x4x4_t c0 =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
// Accumulators for the block 1
- uint32x4x4_t c1 =
+ int32x4x4_t c1 =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
// Accumulators for the block 2
- uint32x4x4_t c2 =
+ int32x4x4_t c2 =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
// Accumulators for the block 3
- uint32x4x4_t c3 =
+ int32x4x4_t c3 =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
for(int k = 0; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16)
{
- const uint8x8_t a00_u8 = vld1_u8(mtx_a0);
- const uint8x16_t b00_u8 = vld1q_u8(mtx_b0);
+ const int8x8_t a00_s8 = vld1_s8(mtx_a0);
+ const int8x16_t b00_s8 = vld1q_s8(mtx_b0);
- // Convert a00_u8 to uint16_t and get the lower part
- const uint16x4_t a00_u16 = vget_low_u16(vmovl_u8(a00_u8));
+ // Convert a00_s8 to uint16_t and get the lower part
+ const int16x4_t a00_s16 = vget_low_s16(vmovl_s8(a00_s8));
- // Convert b00_u8 to int16_t
- const uint16x4x4_t b00_u16 =
+ // Convert b00_s8 to int16_t
+ const int16x4x4_t b00_s16 =
{
{
- vget_low_u16(vmovl_u8(vget_low_u8(b00_u8))),
- vget_high_u16(vmovl_u8(vget_low_u8(b00_u8))),
- vget_low_u16(vmovl_u8(vget_high_u8(b00_u8))),
- vget_high_u16(vmovl_u8(vget_high_u8(b00_u8)))
+ vget_low_s16(vmovl_s8(vget_low_s8(b00_s8))),
+ vget_high_s16(vmovl_s8(vget_low_s8(b00_s8))),
+ vget_low_s16(vmovl_s8(vget_high_s8(b00_s8))),
+ vget_high_s16(vmovl_s8(vget_high_s8(b00_s8)))
}
};
// 4x4 block 0
- c0.val[0] = vmlal_lane_u16(c0.val[0], b00_u16.val[0], a00_u16, 0);
- c0.val[1] = vmlal_lane_u16(c0.val[1], b00_u16.val[1], a00_u16, 0);
- c0.val[2] = vmlal_lane_u16(c0.val[2], b00_u16.val[2], a00_u16, 0);
- c0.val[3] = vmlal_lane_u16(c0.val[3], b00_u16.val[3], a00_u16, 0);
+ c0.val[0] = vmlal_lane_s16(c0.val[0], b00_s16.val[0], a00_s16, 0);
+ c0.val[1] = vmlal_lane_s16(c0.val[1], b00_s16.val[1], a00_s16, 0);
+ c0.val[2] = vmlal_lane_s16(c0.val[2], b00_s16.val[2], a00_s16, 0);
+ c0.val[3] = vmlal_lane_s16(c0.val[3], b00_s16.val[3], a00_s16, 0);
// 4x4 block 1
- c1.val[0] = vmlal_lane_u16(c1.val[0], b00_u16.val[0], a00_u16, 1);
- c1.val[1] = vmlal_lane_u16(c1.val[1], b00_u16.val[1], a00_u16, 1);
- c1.val[2] = vmlal_lane_u16(c1.val[2], b00_u16.val[2], a00_u16, 1);
- c1.val[3] = vmlal_lane_u16(c1.val[3], b00_u16.val[3], a00_u16, 1);
+ c1.val[0] = vmlal_lane_s16(c1.val[0], b00_s16.val[0], a00_s16, 1);
+ c1.val[1] = vmlal_lane_s16(c1.val[1], b00_s16.val[1], a00_s16, 1);
+ c1.val[2] = vmlal_lane_s16(c1.val[2], b00_s16.val[2], a00_s16, 1);
+ c1.val[3] = vmlal_lane_s16(c1.val[3], b00_s16.val[3], a00_s16, 1);
// 4x4 block 2
- c2.val[0] = vmlal_lane_u16(c2.val[0], b00_u16.val[0], a00_u16, 2);
- c2.val[1] = vmlal_lane_u16(c2.val[1], b00_u16.val[1], a00_u16, 2);
- c2.val[2] = vmlal_lane_u16(c2.val[2], b00_u16.val[2], a00_u16, 2);
- c2.val[3] = vmlal_lane_u16(c2.val[3], b00_u16.val[3], a00_u16, 2);
+ c2.val[0] = vmlal_lane_s16(c2.val[0], b00_s16.val[0], a00_s16, 2);
+ c2.val[1] = vmlal_lane_s16(c2.val[1], b00_s16.val[1], a00_s16, 2);
+ c2.val[2] = vmlal_lane_s16(c2.val[2], b00_s16.val[2], a00_s16, 2);
+ c2.val[3] = vmlal_lane_s16(c2.val[3], b00_s16.val[3], a00_s16, 2);
// 4x4 block 3
- c3.val[0] = vmlal_lane_u16(c3.val[0], b00_u16.val[0], a00_u16, 3);
- c3.val[1] = vmlal_lane_u16(c3.val[1], b00_u16.val[1], a00_u16, 3);
- c3.val[2] = vmlal_lane_u16(c3.val[2], b00_u16.val[2], a00_u16, 3);
- c3.val[3] = vmlal_lane_u16(c3.val[3], b00_u16.val[3], a00_u16, 3);
+ c3.val[0] = vmlal_lane_s16(c3.val[0], b00_s16.val[0], a00_s16, 3);
+ c3.val[1] = vmlal_lane_s16(c3.val[1], b00_s16.val[1], a00_s16, 3);
+ c3.val[2] = vmlal_lane_s16(c3.val[2], b00_s16.val[2], a00_s16, 3);
+ c3.val[3] = vmlal_lane_s16(c3.val[3], b00_s16.val[3], a00_s16, 3);
}
auto mtx_out = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(mtx_out + 0 * out_stride + 0, vreinterpretq_s32_u32(c0.val[0]));
- vst1q_s32(mtx_out + 0 * out_stride + 4, vreinterpretq_s32_u32(c0.val[1]));
- vst1q_s32(mtx_out + 0 * out_stride + 8, vreinterpretq_s32_u32(c0.val[2]));
- vst1q_s32(mtx_out + 0 * out_stride + 12, vreinterpretq_s32_u32(c0.val[3]));
- vst1q_s32(mtx_out + 1 * out_stride + 0, vreinterpretq_s32_u32(c1.val[0]));
- vst1q_s32(mtx_out + 1 * out_stride + 4, vreinterpretq_s32_u32(c1.val[1]));
- vst1q_s32(mtx_out + 1 * out_stride + 8, vreinterpretq_s32_u32(c1.val[2]));
- vst1q_s32(mtx_out + 1 * out_stride + 12, vreinterpretq_s32_u32(c1.val[3]));
- vst1q_s32(mtx_out + 2 * out_stride + 0, vreinterpretq_s32_u32(c2.val[0]));
- vst1q_s32(mtx_out + 2 * out_stride + 4, vreinterpretq_s32_u32(c2.val[1]));
- vst1q_s32(mtx_out + 2 * out_stride + 8, vreinterpretq_s32_u32(c2.val[2]));
- vst1q_s32(mtx_out + 2 * out_stride + 12, vreinterpretq_s32_u32(c2.val[3]));
- vst1q_s32(mtx_out + 3 * out_stride + 0, vreinterpretq_s32_u32(c3.val[0]));
- vst1q_s32(mtx_out + 3 * out_stride + 4, vreinterpretq_s32_u32(c3.val[1]));
- vst1q_s32(mtx_out + 3 * out_stride + 8, vreinterpretq_s32_u32(c3.val[2]));
- vst1q_s32(mtx_out + 3 * out_stride + 12, vreinterpretq_s32_u32(c3.val[3]));
+ vst1q_s32(mtx_out + 0 * out_stride + 0, c0.val[0]);
+ vst1q_s32(mtx_out + 0 * out_stride + 4, c0.val[1]);
+ vst1q_s32(mtx_out + 0 * out_stride + 8, c0.val[2]);
+ vst1q_s32(mtx_out + 0 * out_stride + 12, c0.val[3]);
+ vst1q_s32(mtx_out + 1 * out_stride + 0, c1.val[0]);
+ vst1q_s32(mtx_out + 1 * out_stride + 4, c1.val[1]);
+ vst1q_s32(mtx_out + 1 * out_stride + 8, c1.val[2]);
+ vst1q_s32(mtx_out + 1 * out_stride + 12, c1.val[3]);
+ vst1q_s32(mtx_out + 2 * out_stride + 0, c2.val[0]);
+ vst1q_s32(mtx_out + 2 * out_stride + 4, c2.val[1]);
+ vst1q_s32(mtx_out + 2 * out_stride + 8, c2.val[2]);
+ vst1q_s32(mtx_out + 2 * out_stride + 12, c2.val[3]);
+ vst1q_s32(mtx_out + 3 * out_stride + 0, c3.val[0]);
+ vst1q_s32(mtx_out + 3 * out_stride + 4, c3.val[1]);
+ vst1q_s32(mtx_out + 3 * out_stride + 8, c3.val[2]);
+ vst1q_s32(mtx_out + 3 * out_stride + 12, c3.val[3]);
},
ina, inb, out);
}
diff --git a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
index 3f841bbf59..9df13ce0e3 100644
--- a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
@@ -51,7 +51,7 @@ INEGEMMLowpReductionKernel::INEGEMMLowpReductionKernel()
void NEGEMMLowpMatrixAReductionKernel::configure(const ITensor *mtx_a_interleaved4x4, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a_interleaved4x4, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a_interleaved4x4, 1, DataType::S8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
_input = mtx_a_interleaved4x4;
@@ -97,9 +97,9 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- uint32x4_t sum_row = vdupq_n_u32(0);
+ int32x4_t sum_row = vdupq_n_s32(0);
- const uint8_t *matrix_a = in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2];
+ auto matrix_a = reinterpret_cast<const int8_t *>(in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a));
@@ -109,43 +109,43 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 4 accumulations
for(; i <= (_k - 4); i += 4)
{
- const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i * 4);
+ const int8x16_t a0_s8 = vld1q_s8(matrix_a + i * 4);
// Convert U8 to U16
- uint16x4x4_t a0_u16 =
+ int16x4x4_t a0_s16 =
{
{
- vget_low_u16(vmovl_u8(vget_low_u8(a0_u8))),
- vget_high_u16(vmovl_u8(vget_low_u8(a0_u8))),
- vget_low_u16(vmovl_u8(vget_high_u8(a0_u8))),
- vget_high_u16(vmovl_u8(vget_high_u8(a0_u8)))
+ vget_low_s16(vmovl_s8(vget_low_s8(a0_s8))),
+ vget_high_s16(vmovl_s8(vget_low_s8(a0_s8))),
+ vget_low_s16(vmovl_s8(vget_high_s8(a0_s8))),
+ vget_high_s16(vmovl_s8(vget_high_s8(a0_s8)))
}
};
// Accumulate to U16
- a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[1]);
- a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[2]);
- a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[3]);
+ a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[1]);
+ a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[2]);
+ a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[3]);
// Accumulate to U32
- sum_row = vaddw_u16(sum_row, a0_u16.val[0]);
+ sum_row = vaddw_s16(sum_row, a0_s16.val[0]);
}
// This for loop performs the leftover accumulations
for(; i < _k; ++i)
{
- const uint8x8_t a0_u8 = vld1_u8(matrix_a + i * 4);
+ const int8x8_t a0_s8 = vld1_s8(matrix_a + i * 4);
// Convert U8 to U16
- const uint16x4_t a0_u16 = vget_low_u16(vmovl_u8(a0_u8));
+ const int16x4_t a0_s16 = vget_low_s16(vmovl_s8(a0_s8));
// Accumulate to U32
- sum_row = vaddw_u16(sum_row, a0_u16);
+ sum_row = vaddw_s16(sum_row, a0_s16);
}
auto vector_sum_row = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_row, vreinterpretq_s32_u32(sum_row));
+ vst1q_s32(vector_sum_row, sum_row);
},
in, out);
}
@@ -154,10 +154,10 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- uint32x4_t sum_row_s32 = vdupq_n_u32(0);
- unsigned int sum_row = 0;
+ int32x4_t sum_row_s32 = vdupq_n_s32(0);
+ int32_t sum_row = 0;
- const uint8_t *matrix_a = in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2];
+ auto matrix_a = reinterpret_cast<const int8_t *>(in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a));
@@ -167,29 +167,29 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 16 accumulations
for(; i <= (_k - 16); i += 16)
{
- const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i);
+ const int8x16_t a0_s8 = vld1q_s8(matrix_a + i);
// Partial accumulations in U16
- const uint16x8_t tmp_sum0 = vaddl_u8(vget_low_u8(a0_u8), vget_high_u8(a0_u8));
+ const int16x8_t tmp_sum0 = vaddl_s8(vget_low_s8(a0_s8), vget_high_s8(a0_s8));
// Accumulate to U32
- sum_row_s32 = vaddq_u32(sum_row_s32, vpaddlq_u16(tmp_sum0));
+ sum_row_s32 = vaddq_s32(sum_row_s32, vpaddlq_s16(tmp_sum0));
}
// This for loop performs the leftover accumulations
for(; i < _k; ++i)
{
- sum_row += static_cast<unsigned int>(matrix_a[i]);
+ sum_row += static_cast<int32_t>(matrix_a[i]);
}
#if defined(__aarch64__)
// Reduction operation available on 64 bit architectures only
- sum_row += vaddvq_u32(sum_row_s32);
+ sum_row += vaddvq_s32(sum_row_s32);
#else // __aarch64__
- uint32x2_t tmp = vpadd_u32(vget_high_u32(sum_row_s32), vget_low_u32(sum_row_s32));
- tmp = vpadd_u32(tmp, tmp);
+ int32x2_t tmp = vpadd_s32(vget_high_s32(sum_row_s32), vget_low_s32(sum_row_s32));
+ tmp = vpadd_s32(tmp, tmp);
- sum_row += vget_lane_u32(tmp, 0);
+ sum_row += vget_lane_s32(tmp, 0);
#endif // __aarch64__
*(reinterpret_cast<int *>(out.ptr())) = static_cast<int>(sum_row);
@@ -200,7 +200,7 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
void NEGEMMLowpMatrixBReductionKernel::configure(const ITensor *mtx_b_transposed1xW, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b_transposed1xW, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b_transposed1xW, 1, DataType::S8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
_input = mtx_b_transposed1xW;
@@ -246,17 +246,17 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- uint32x4x4_t sum_col =
+ int32x4x4_t sum_col =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
- const uint8_t *matrix_b = in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2];
+ auto matrix_b = reinterpret_cast<const int8_t *>(in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b));
@@ -265,14 +265,14 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
int i = 0;
for(; i < _k; ++i)
{
- const uint8x16_t b0_u8 = vld1q_u8(matrix_b + i * 16);
+ const int8x16_t b0_s8 = vld1q_s8(matrix_b + i * 16);
- // Convert U8 to U16
- const uint16x8x2_t b0_u16 =
+ // Convert S8 to U16
+ const int16x8x2_t b0_s16 =
{
{
- vmovl_u8(vget_low_u8(b0_u8)),
- vmovl_u8(vget_high_u8(b0_u8))
+ vmovl_s8(vget_low_s8(b0_s8)),
+ vmovl_s8(vget_high_s8(b0_s8))
}
};
@@ -280,20 +280,20 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
sum_col =
{
{
- vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])),
- vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])),
- vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])),
- vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1]))
+ vaddw_s16(sum_col.val[0], vget_low_s16(b0_s16.val[0])),
+ vaddw_s16(sum_col.val[1], vget_high_s16(b0_s16.val[0])),
+ vaddw_s16(sum_col.val[2], vget_low_s16(b0_s16.val[1])),
+ vaddw_s16(sum_col.val[3], vget_high_s16(b0_s16.val[1]))
}
};
}
auto vector_sum_col = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0]));
- vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1]));
- vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2]));
- vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3]));
+ vst1q_s32(vector_sum_col + 0, sum_col.val[0]);
+ vst1q_s32(vector_sum_col + 4, sum_col.val[1]);
+ vst1q_s32(vector_sum_col + 8, sum_col.val[2]);
+ vst1q_s32(vector_sum_col + 12, sum_col.val[3]);
},
in, out);
}
@@ -326,17 +326,17 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
}
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- uint32x4x4_t sum_col =
+ int32x4x4_t sum_col =
{
{
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0),
- vdupq_n_u32(0)
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0),
+ vdupq_n_s32(0)
}
};
- const uint8_t *matrix_b = inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2];
+ auto matrix_b = reinterpret_cast<const int8_t *>(inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b));
@@ -347,10 +347,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 4 accumulations
for(; i <= (_k - 4); i += 4)
{
- const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride);
- const uint8x16_t b1_u8 = vld1q_u8(matrix_b + 1 * in_b_stride);
- const uint8x16_t b2_u8 = vld1q_u8(matrix_b + 2 * in_b_stride);
- const uint8x16_t b3_u8 = vld1q_u8(matrix_b + 3 * in_b_stride);
+ const int8x16_t b0_s8 = vld1q_s8(matrix_b + 0 * in_b_stride);
+ const int8x16_t b1_s8 = vld1q_s8(matrix_b + 1 * in_b_stride);
+ const int8x16_t b2_s8 = vld1q_s8(matrix_b + 2 * in_b_stride);
+ const int8x16_t b3_s8 = vld1q_s8(matrix_b + 3 * in_b_stride);
#if __arm__
asm volatile("PLD [%0, #128*1]" ::"r"(matrix_b + 1 * in_b_stride));
@@ -360,31 +360,31 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
#endif /* __arm__ */
// Partial accumulation in u16
- uint16x8x2_t tmp_sum =
+ int16x8x2_t tmp_sum =
{
{
- vdupq_n_u16(0),
- vdupq_n_u16(0)
+ vdupq_n_s16(0),
+ vdupq_n_s16(0)
}
};
- tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b0_u8));
- tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b1_u8));
- tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b2_u8));
- tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b3_u8));
- tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b0_u8));
- tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b1_u8));
- tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b2_u8));
- tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b3_u8));
+ tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b0_s8));
+ tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b1_s8));
+ tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b2_s8));
+ tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b3_s8));
+ tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b0_s8));
+ tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b1_s8));
+ tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b2_s8));
+ tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b3_s8));
// Accumulate to U32
sum_col =
{
{
- vaddw_u16(sum_col.val[0], vget_low_u16(tmp_sum.val[0])),
- vaddw_u16(sum_col.val[1], vget_high_u16(tmp_sum.val[0])),
- vaddw_u16(sum_col.val[2], vget_low_u16(tmp_sum.val[1])),
- vaddw_u16(sum_col.val[3], vget_high_u16(tmp_sum.val[1]))
+ vaddw_s16(sum_col.val[0], vget_low_s16(tmp_sum.val[0])),
+ vaddw_s16(sum_col.val[1], vget_high_s16(tmp_sum.val[0])),
+ vaddw_s16(sum_col.val[2], vget_low_s16(tmp_sum.val[1])),
+ vaddw_s16(sum_col.val[3], vget_high_s16(tmp_sum.val[1]))
}
};
@@ -394,14 +394,14 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
// This for loop perfoms the leftover accumulations
for(; i < _k; ++i)
{
- const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride);
+ const int8x16_t b0_s8 = vld1q_s8(matrix_b + 0 * in_b_stride);
- // Convert U8 to U16
- const uint16x8x2_t b0_u16 =
+ // Convert S8 to S16
+ const int16x8x2_t b0_s16 =
{
{
- vmovl_u8(vget_low_u8(b0_u8)),
- vmovl_u8(vget_high_u8(b0_u8))
+ vmovl_s8(vget_low_s8(b0_s8)),
+ vmovl_s8(vget_high_s8(b0_s8))
}
};
@@ -409,10 +409,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
sum_col =
{
{
- vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])),
- vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])),
- vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])),
- vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1]))
+ vaddw_s16(sum_col.val[0], vget_low_s16(b0_s16.val[0])),
+ vaddw_s16(sum_col.val[1], vget_high_s16(b0_s16.val[0])),
+ vaddw_s16(sum_col.val[2], vget_low_s16(b0_s16.val[1])),
+ vaddw_s16(sum_col.val[3], vget_high_s16(b0_s16.val[1]))
}
};
@@ -421,11 +421,11 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
auto vector_sum_col = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0]));
- vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1]));
- vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2]));
- vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3]));
+ vst1q_s32(vector_sum_col + 0, sum_col.val[0]);
+ vst1q_s32(vector_sum_col + 4, sum_col.val[1]);
+ vst1q_s32(vector_sum_col + 8, sum_col.val[2]);
+ vst1q_s32(vector_sum_col + 12, sum_col.val[3]);
},
inb, out);
}
-} \ No newline at end of file
+}
diff --git a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
index 8728e77d9e..5fe198f455 100644
--- a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
+++ b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
@@ -35,6 +35,12 @@
#include "arm_compute/core/Window.h"
#include "support/ToolchainSupport.h"
+namespace arm_compute
+{
+#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp"
+#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp"
+} // namespace arm_compute
+
#include <arm_neon.h>
#include <cstddef>
#include <cstdint>
@@ -42,91 +48,22 @@
// Enable only if compiled for AArch64-V8.2-A targets
#ifdef ARM_COMPUTE_AARCH64_V8_2
-#define ASM_PREFETCH(address) "PRFM PLDL1KEEP, " address "\n"
-#define ASM_PREFETCHL2(address) "PRFM PLDL2KEEP, " address "\n"
-#define ASM_PREFETCHW(address) "PRFM PSTL1KEEP, " address "\n"
-#define ASM_PREFETCHWL2(address) "PRFM PSTL2KEEP, " address "\n"
-
-static inline void stincpld(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
- uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
- uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
- uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
-{
- __asm __volatile(
- "LDR q0, [%[ptr0]]\n"
- "LDR q1, [%[ptr1]]\n"
- "LDR q2, [%[ptr2]]\n"
- "LDR q3, [%[ptr3]]\n"
- "LDR q4, [%[ptr4]]\n"
- "LDR q5, [%[ptr5]]\n"
- "LDR q6, [%[ptr6]]\n"
- "LDR q7, [%[ptr7]]\n"
- "ADD v0.4s, v0.4s, %[v0].4s\n" ASM_PREFETCH("[%[ptr0], #80]") "ADD v1.4s, v1.4s, %[v1].4s\n" ASM_PREFETCH("[%[ptr1], #80]") "ADD v2.4s, v2.4s, %[v2].4s\n" ASM_PREFETCH("[%[ptr2], #80]")
- "ADD v3.4s, v3.4s, %[v3].4s\n" ASM_PREFETCH("[%[ptr3], #80]") "ADD v4.4s, v4.4s, %[v4].4s\n" ASM_PREFETCH("[%[ptr4], #80]") "ADD v5.4s, v5.4s, %[v5].4s\n" ASM_PREFETCH("[%[ptr5], #80]")
- "ADD v6.4s, v6.4s, %[v6].4s\n" ASM_PREFETCH("[%[ptr6], #80]") "ADD v7.4s, v7.4s, %[v7].4s\n" ASM_PREFETCH("[%[ptr7], #80]")
- "STR q0, [%[ptr0]], #16\n"
- "STR q1, [%[ptr1]], #16\n"
- "STR q2, [%[ptr2]], #16\n"
- "STR q3, [%[ptr3]], #16\n"
- "STR q4, [%[ptr4]], #16\n"
- "STR q5, [%[ptr5]], #16\n"
- "STR q6, [%[ptr6]], #16\n"
- "STR q7, [%[ptr7]], #16\n"
- : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
- [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
- : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
- [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
- : "x20", "x21", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
-}
-
-static inline void stinc(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
- uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
- uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
- uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
-{
- __asm __volatile(
- "LDR q0, [%[ptr0]]\n"
- "LDR q1, [%[ptr1]]\n"
- "LDR q2, [%[ptr2]]\n"
- "LDR q3, [%[ptr3]]\n"
- "LDR q4, [%[ptr4]]\n"
- "LDR q5, [%[ptr5]]\n"
- "LDR q6, [%[ptr6]]\n"
- "LDR q7, [%[ptr7]]\n"
- "ADD v0.4s, v0.4s, %[v0].4s\n"
- "ADD v1.4s, v1.4s, %[v1].4s\n"
- "ADD v2.4s, v2.4s, %[v2].4s\n"
- "ADD v3.4s, v3.4s, %[v3].4s\n"
- "ADD v4.4s, v4.4s, %[v4].4s\n"
- "ADD v5.4s, v5.4s, %[v5].4s\n"
- "ADD v6.4s, v6.4s, %[v6].4s\n"
- "ADD v7.4s, v7.4s, %[v7].4s\n"
- "STR q0, [%[ptr0]], #16\n"
- "STR q1, [%[ptr1]], #16\n"
- "STR q2, [%[ptr2]], #16\n"
- "STR q3, [%[ptr3]], #16\n"
- "STR q4, [%[ptr4]], #16\n"
- "STR q5, [%[ptr5]], #16\n"
- "STR q6, [%[ptr6]], #16\n"
- "STR q7, [%[ptr7]], #16\n"
- : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
- [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
- : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
- [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
-}
-
namespace arm_compute
{
-void NEGEMMLowpAArch64V8P4Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output)
+void NEGEMMLowpAArch64V8P4Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
- _input0 = input0;
- _input1 = input1;
- _output = output;
+ _input0 = input0;
+ _input1 = input1;
+ _output = output;
+ _workspace = workspace;
+ _alpha = alpha;
+ _beta = beta;
+ _transform_0 = transform_0;
+ _transform_1 = transform_1;
// Configure kernel window
Window win = calculate_max_window(*output->info());
@@ -149,375 +86,49 @@ bool NEGEMMLowpAArch64V8P4Kernel::is_parallelisable() const
return false;
}
-#define _UDOT_MACRO \
- ".altmacro\n" \
- ".macro udot opd:req, opn:req, opm:req\n" \
- "local vd, vn, vm, h, l\n" \
- ".irp reg,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
- ".ifeqs \"\\opd\",\"v\\reg\\.4s\"\n" \
- ".set vd,\\reg\n" \
- ".endif\n" \
- ".ifeqs \"\\opn\",\"v\\reg\\.16b\"\n" \
- ".set vn,\\reg\n" \
- ".endif\n" \
- ".irp idx,0,1,2,3\n" \
- ".ifeqs \"\\opm\",\"v\\reg\\.4b[\\idx\\]\"\n" \
- ".set vm,\\reg\n" \
- ".set h,\\idx / 2\n" \
- ".set l,\\idx %% 2\n" \
- ".endif\n" \
- ".endr\n" \
- ".endr\n" \
- ".ifndef vd\n" \
- ".error \"Bad operand \\opd\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".ifndef vn\n" \
- ".error \"Bad operand \\opn\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".ifndef vm\n" \
- ".error \"Bad operand \\opm\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".ifndef h\n" \
- ".error \"Bad operand \\opm\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".ifndef l\n" \
- ".error \"Bad operand \\opm\"\n" \
- ".exitm\n" \
- ".endif\n" \
- ".int 0x6f80e000 | vd | (vn << 5) | (vm << 16) | (l << 21) | (h << 11)\n" \
- ".endm\n"
-
-#define _PREFETCH_ \
- __asm __volatile( \
- "" ASM_PREFETCH("[%[a_ptr], #64]") \
- ASM_PREFETCH("[%[a_ptr], #128]") \
- ASM_PREFETCH("[%[a_ptr], #192]") \
- : \
- : \
- [a_ptr] "r"(a_ptr), [b_ptr] "r"(b_ptr) \
- : "x20", "x21", "memory"); \
- __asm __volatile( \
- "" ASM_PREFETCH("[%[b_ptr]]") \
- ASM_PREFETCH("[%[b_ptr], #64]") \
- ASM_PREFETCH("[%[b_ptr], #128]") \
- ASM_PREFETCH("[%[b_ptr], #192]") \
- : \
- : \
- [b_ptr] "r"(b_ptr) \
- : "x20", "x21"); \
- __asm __volatile( \
- "" \
- : [r00] "+w"(r00), [r01] "+w"(r01), \
- [r10] "+w"(r10), [r11] "+w"(r11), \
- [r20] "+w"(r20), [r21] "+w"(r21), \
- [r30] "+w"(r30), [r31] "+w"(r31), \
- [a0] "+w"(a0), [a1] "+w"(a1), \
- [b0] "+w"(b0), [b1] "+w"(b1), [b2] "=w"(b2), \
- [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr) \
- : \
- :); \
- __asm __volatile( \
- "" \
- : [r02] "+w"(r02), \
- [r12] "+w"(r12), \
- [r22] "+w"(r22), \
- [r32] "+w"(r32), \
- [r40] "+w"(r40), \
- [r50] "+w"(r50), \
- [r60] "+w"(r60), \
- [r70] "+w"(r70), \
- [a0a] "=w"(a0a), [a1a] "=w"(a1a), \
- [b0] "+w"(b0), [b2] "+w"(b2), [b5] "=&w"(b5) \
- : \
- :); \
- __asm __volatile( \
- "" \
- : \
- [r41] "+w"(r41), [r42] "+w"(r42), \
- [r51] "+w"(r51), [r52] "+w"(r52), \
- [r61] "+w"(r61), [r62] "+w"(r62), \
- [r71] "+w"(r71), [r72] "+w"(r72), \
- [a1] "+w"(a1), \
- [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), \
- [b_ptr] "+r"(b_ptr), [k] "+r"(k) \
- : \
- :);
-
void NEGEMMLowpAArch64V8P4Kernel::run(const Window &window, const ThreadInfo &info)
{
- ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
- const int x_block = 348;
- const int k_block = 1664;
- const int nthreads = 1;
- const int M = _output->info()->tensor_shape().y();
- const int N = _output->info()->tensor_shape().x();
- const int K = _input0->info()->tensor_shape().x() >> 3;
-
- int yblocksperthread = ((M / nthreads) + 7) / 8;
-
- if(yblocksperthread < 1)
- {
- yblocksperthread = 1;
- }
-
- const int lda = _input0->info()->strides_in_bytes().y();
- const int ldb = _input1->info()->strides_in_bytes().y();
- const int ldc = _output->info()->strides_in_bytes().y();
- const int ldc2 = _output->info()->strides_in_bytes().x();
- const int ldc3 = ldc / sizeof(uint32_t);
+ const int lda = _input0->info()->strides_in_bytes().y();
+ const int ldb = _input1->info()->strides_in_bytes().y();
+ const int ldc = _output->info()->strides_in_bytes().y() / sizeof(int32_t);
- const int threadid = 0;
- int y0 = threadid * yblocksperthread * 8;
- int ymax = (threadid + 1) * yblocksperthread * 8;
- if(y0 >= M)
- {
- return;
- }
- if(ymax > M)
- {
- ymax = M;
- }
- for(int k0 = 0; k0 < K; k0 += k_block)
- {
- int kmax = k0 + k_block;
- if(kmax > K)
- {
- kmax = K;
- }
+ const auto in1_ptr = reinterpret_cast<const int8_t *>(_input1->buffer());
- for(int x0 = 0; x0 < N; x0 += x_block)
- {
- int xmax = x0 + x_block;
- if(xmax > N)
- {
- xmax = N;
- }
+ const int M = std::min(_output->info()->tensor_shape().y(), static_cast<size_t>(window.y().end())) - window.y().start();
+ const int N = _output->info()->tensor_shape().x();
+ const int K = _input0->info()->tensor_shape().x();
- for(int y = y0; y < ymax; y += 8)
- {
- auto c_ptr0 = reinterpret_cast<uint32_t *>(_output->buffer() + (y * ldc) + x0 * ldc2);
- uint32_t *c_ptr1 = c_ptr0 + ldc3;
- uint32_t *c_ptr2 = c_ptr1 + ldc3;
- uint32_t *c_ptr3 = c_ptr2 + ldc3;
- uint32_t *c_ptr4 = c_ptr3 + ldc3;
- uint32_t *c_ptr5 = c_ptr4 + ldc3;
- uint32_t *c_ptr6 = c_ptr5 + ldc3;
- uint32_t *c_ptr7 = c_ptr6 + ldc3;
+ // Only iterate over batches
+ Window win(window);
+ win.set(0, Window::Dimension(0, 1, 1));
+ win.set(1, Window::Dimension(0, 1, 1));
- __asm __volatile(
- "" ASM_PREFETCH("[%[c_ptr0]]")
- ASM_PREFETCH("[%[c_ptr1]]")
- ASM_PREFETCH("[%[c_ptr2]]")
- ASM_PREFETCH("[%[c_ptr3]]")
- ASM_PREFETCH("[%[c_ptr4]]")
- ASM_PREFETCH("[%[c_ptr5]]")
- ASM_PREFETCH("[%[c_ptr6]]")
- ASM_PREFETCH("[%[c_ptr7]]")
- :
- : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
- [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
- : "x20", "x21");
+ Iterator in0(_input0, window);
+ Iterator out(_output, window);
- for(int x = x0; x < xmax; x += 12)
- {
- register uint32x4_t r00 asm("v8");
- register uint32x4_t r10 asm("v9");
- register uint32x4_t r20 asm("v10");
- register uint32x4_t r30 asm("v11");
- register uint32x4_t r40 asm("v12");
- register uint32x4_t r50 asm("v13");
- register uint32x4_t r60 asm("v14");
- register uint32x4_t r70 asm("v15");
- register uint32x4_t r01 asm("v16");
- register uint32x4_t r11 asm("v17");
- register uint32x4_t r21 asm("v18");
- register uint32x4_t r31 asm("v19");
- register uint32x4_t r41 asm("v20");
- register uint32x4_t r51 asm("v21");
- register uint32x4_t r61 asm("v22");
- register uint32x4_t r71 asm("v23");
- register uint32x4_t r02 asm("v24");
- register uint32x4_t r12 asm("v25");
- register uint32x4_t r22 asm("v26");
- register uint32x4_t r32 asm("v27");
- register uint32x4_t r42 asm("v28");
- register uint32x4_t r52 asm("v29");
- register uint32x4_t r62 asm("v30");
- register uint32x4_t r72 asm("v31");
+ GemmInterleaved<gemm_s8_12x8, int8_t, int32_t> gemm(&info.cpu_info, M, N, K, !_transform_1, !_transform_1);
- register uint8x16_t a0 asm("v0");
- register uint8x16_t a1 asm("v1");
- register uint8x16_t b0 asm("v2");
- register uint8x16_t b1 asm("v3");
- register uint8x16_t b2 asm("v4");
- register uint8x16_t a0a asm("v5");
- register uint8x16_t a1a asm("v6");
- register uint8x16_t b5 asm("v7");
- const uint8_t *a_ptr = _input0->buffer() + ((y / 8) * lda) + (k0 * 8);
- const uint8_t *b_ptr = _input1->buffer() + ((x / 12) * ldb) + (k0 * 12);
+ constexpr size_t alignment = 4096;
+ const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id;
+ void *workspace = _workspace->buffer() + offset;
+ size_t workspace_size = _workspace->info()->total_size();
- r00 = r01 = r02 = r10 = r11 = r12 = r20 = r21 = r22 = r30 = r31 = r32 = vdupq_n_u32(0);
- r40 = r41 = r42 = r50 = r51 = r52 = r60 = r61 = r62 = r70 = r71 = r72 = vdupq_n_u32(0);
-
- int k = ((kmax - k0) / 8) - 1;
-
- a0 = vld1q_u8(a_ptr);
- b0 = vld1q_u8(b_ptr);
- a1 = vld1q_u8(a_ptr + 16);
- b1 = vld1q_u8(b_ptr + 16);
-
- _PREFETCH_
-
- __asm __volatile(
- _UDOT_MACRO
- "1:\n"
- "udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
- "udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
- "udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
- "udot v17.4s, %[b1].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[a_ptr], #256]")
- "udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
-
- "udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "udot v25.4s, %[b2].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #256]")
- "udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
-
- "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
- "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
- "ldr %q[a0], [%[a_ptr], #64]\n"
- "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
- "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
- "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
- "ldr %q[a1], [%[a_ptr], #80]\n"
- "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
- "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
- "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
-
- "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
- "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #320]")
- "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
- "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
- "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
- "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
- "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
- "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #112]\n"
-
- "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
- "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
- "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
- "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "subs %w[k], %w[k], #1\n"
- "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
- "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
-
- "bne 1b\n"
-
- "udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
- "udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
- "udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
- "udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
- "udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
-
- "udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
-
- "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0]]") "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1]]") "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
- ASM_PREFETCH("[%[c_ptr2]]") "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3]]") "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4]]")
- "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5]]") "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6]]") "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- ASM_PREFETCH("[%[c_ptr7]]")
-
- "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0], #48]") "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1], #48]") "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
- ASM_PREFETCH("[%[c_ptr2], #48]") "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3], #48]") "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4], #48]")
- "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5], #48]") "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6], #48]") "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- ASM_PREFETCH("[%[c_ptr7], #48]")
-
- "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
- "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
- "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
- "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
- "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
- "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
-
- // Clean up macro namespace
- ".purgem udot\n"
-
- :
- [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr),
- [a0] "+w"(a0), [a1] "+w"(a1), [a0a] "+w"(a0a), [a1a] "+w"(a1a),
- [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), [k] "+r"(k)
- : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
- [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
- : "x20", "x21");
-
- stincpld(r00, r10, r20, r30, r40, r50, r60, r70, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
- stinc(r01, r11, r21, r31, r41, r51, r61, r71, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
- stinc(r02, r12, r22, r32, r42, r52, r62, r72, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
- }
- }
- }
+ if(support::cpp11::align(alignment, gemm.get_working_size(), workspace, workspace_size) == nullptr)
+ {
+ ARM_COMPUTE_ERROR("Not enough space to align buffer!");
}
+
+ execute_window_loop(win, [&](const Coordinates & id)
+ {
+ gemm.execute(reinterpret_cast<const int8_t *>(in0.ptr()), lda,
+ reinterpret_cast<const int8_t *>(in1_ptr), ldb,
+ reinterpret_cast<int32_t *>(out.ptr()), ldc,
+ _alpha, _beta, workspace);
+ },
+ in0, out);
}
} // namespace arm_compute
#endif /* ARM_COMPUTE_AARCH64_V8_2 */