aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-11-08 12:24:09 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commite75a02b60736f37c34388c23c0ccee230f65da59 (patch)
treef8e9423e40589e99bd8be6c1e740b17792e2058e /src/core/NEON
parent6c0348f4cbf6e30a715780f50aebf6dd0a2a8fc3 (diff)
downloadComputeLibrary-e75a02b60736f37c34388c23c0ccee230f65da59.tar.gz
COMPMID-675 - Reworked NEGEMMLowp interface/function
The new interface makes NEGEMMLowp able to work with ASYMM8 data types. Implemented 2 new functions: - NEGEMMLowpMatrixMultiplyCore - NEGEMMLowpOutputStage These functions should make the integration in android NN doable For more information about GEMMLowp: https://github.com/google/gemmlowp/blob/master/doc/low-precision.md Change-Id: Ie2c775f45234f68ca53dba644b3a912b997fd890 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/95504 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'src/core/NEON')
-rw-r--r--src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp3
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp128
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp (renamed from src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp)451
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp141
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp176
-rw-r--r--src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp3
6 files changed, 437 insertions, 465 deletions
diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
index ae5d456141..a29b661a00 100644
--- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
@@ -132,7 +132,8 @@ NEGEMMInterleave4x4Kernel::NEGEMMInterleave4x4Kernel()
void NEGEMMInterleave4x4Kernel::configure(const ITensor *input, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16,
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32,
+ DataType::F16,
DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index 4b9c9f3e64..1352f34e3c 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -52,7 +52,7 @@ NEGEMMLowpMatrixMultiplyKernel::NEGEMMLowpMatrixMultiplyKernel()
void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
@@ -127,115 +127,115 @@ void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo
// All the values needed for computing a single 4x4 block will be read from consecutive memory positions
execute_window_loop(window, [&](const Coordinates & id)
{
- auto *mtx_a0 = reinterpret_cast<const int8_t *>(ina.ptr());
- auto *mtx_b0 = reinterpret_cast<const int8_t *>(inb.ptr());
+ const uint8_t *mtx_a0 = ina.ptr();
+ const uint8_t *mtx_b0 = inb.ptr();
// Note: Since the input are all positives, we can use uint32_t
// Accumulators for the block 0
- int32x4x4_t c0 =
+ uint32x4x4_t c0 =
{
{
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
// Accumulators for the block 1
- int32x4x4_t c1 =
+ uint32x4x4_t c1 =
{
{
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
// Accumulators for the block 2
- int32x4x4_t c2 =
+ uint32x4x4_t c2 =
{
{
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
// Accumulators for the block 3
- int32x4x4_t c3 =
+ uint32x4x4_t c3 =
{
{
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
for(int k = 0; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16)
{
- const int8x8_t a00_s8 = vld1_s8(mtx_a0);
- const int8x16_t b00_s8 = vld1q_s8(mtx_b0);
+ const uint8x8_t a00_u8 = vld1_u8(mtx_a0);
+ const uint8x16_t b00_u8 = vld1q_u8(mtx_b0);
// Convert a00_s8 to uint16_t and get the lower part
- const int16x4_t a00_s16 = vget_low_s16(vmovl_s8(a00_s8));
+ const uint16x4_t a00_u16 = vget_low_u16(vmovl_u8(a00_u8));
- // Convert b00_s8 to int16_t
- const int16x4x4_t b00_s16 =
+ // Convert b00_s8 to uint16_t
+ const uint16x4x4_t b00_u16 =
{
{
- vget_low_s16(vmovl_s8(vget_low_s8(b00_s8))),
- vget_high_s16(vmovl_s8(vget_low_s8(b00_s8))),
- vget_low_s16(vmovl_s8(vget_high_s8(b00_s8))),
- vget_high_s16(vmovl_s8(vget_high_s8(b00_s8)))
+ vget_low_u16(vmovl_u8(vget_low_u8(b00_u8))),
+ vget_high_u16(vmovl_u8(vget_low_u8(b00_u8))),
+ vget_low_u16(vmovl_u8(vget_high_u8(b00_u8))),
+ vget_high_u16(vmovl_u8(vget_high_u8(b00_u8)))
}
};
// 4x4 block 0
- c0.val[0] = vmlal_lane_s16(c0.val[0], b00_s16.val[0], a00_s16, 0);
- c0.val[1] = vmlal_lane_s16(c0.val[1], b00_s16.val[1], a00_s16, 0);
- c0.val[2] = vmlal_lane_s16(c0.val[2], b00_s16.val[2], a00_s16, 0);
- c0.val[3] = vmlal_lane_s16(c0.val[3], b00_s16.val[3], a00_s16, 0);
+ c0.val[0] = vmlal_lane_u16(c0.val[0], b00_u16.val[0], a00_u16, 0);
+ c0.val[1] = vmlal_lane_u16(c0.val[1], b00_u16.val[1], a00_u16, 0);
+ c0.val[2] = vmlal_lane_u16(c0.val[2], b00_u16.val[2], a00_u16, 0);
+ c0.val[3] = vmlal_lane_u16(c0.val[3], b00_u16.val[3], a00_u16, 0);
// 4x4 block 1
- c1.val[0] = vmlal_lane_s16(c1.val[0], b00_s16.val[0], a00_s16, 1);
- c1.val[1] = vmlal_lane_s16(c1.val[1], b00_s16.val[1], a00_s16, 1);
- c1.val[2] = vmlal_lane_s16(c1.val[2], b00_s16.val[2], a00_s16, 1);
- c1.val[3] = vmlal_lane_s16(c1.val[3], b00_s16.val[3], a00_s16, 1);
+ c1.val[0] = vmlal_lane_u16(c1.val[0], b00_u16.val[0], a00_u16, 1);
+ c1.val[1] = vmlal_lane_u16(c1.val[1], b00_u16.val[1], a00_u16, 1);
+ c1.val[2] = vmlal_lane_u16(c1.val[2], b00_u16.val[2], a00_u16, 1);
+ c1.val[3] = vmlal_lane_u16(c1.val[3], b00_u16.val[3], a00_u16, 1);
// 4x4 block 2
- c2.val[0] = vmlal_lane_s16(c2.val[0], b00_s16.val[0], a00_s16, 2);
- c2.val[1] = vmlal_lane_s16(c2.val[1], b00_s16.val[1], a00_s16, 2);
- c2.val[2] = vmlal_lane_s16(c2.val[2], b00_s16.val[2], a00_s16, 2);
- c2.val[3] = vmlal_lane_s16(c2.val[3], b00_s16.val[3], a00_s16, 2);
+ c2.val[0] = vmlal_lane_u16(c2.val[0], b00_u16.val[0], a00_u16, 2);
+ c2.val[1] = vmlal_lane_u16(c2.val[1], b00_u16.val[1], a00_u16, 2);
+ c2.val[2] = vmlal_lane_u16(c2.val[2], b00_u16.val[2], a00_u16, 2);
+ c2.val[3] = vmlal_lane_u16(c2.val[3], b00_u16.val[3], a00_u16, 2);
// 4x4 block 3
- c3.val[0] = vmlal_lane_s16(c3.val[0], b00_s16.val[0], a00_s16, 3);
- c3.val[1] = vmlal_lane_s16(c3.val[1], b00_s16.val[1], a00_s16, 3);
- c3.val[2] = vmlal_lane_s16(c3.val[2], b00_s16.val[2], a00_s16, 3);
- c3.val[3] = vmlal_lane_s16(c3.val[3], b00_s16.val[3], a00_s16, 3);
+ c3.val[0] = vmlal_lane_u16(c3.val[0], b00_u16.val[0], a00_u16, 3);
+ c3.val[1] = vmlal_lane_u16(c3.val[1], b00_u16.val[1], a00_u16, 3);
+ c3.val[2] = vmlal_lane_u16(c3.val[2], b00_u16.val[2], a00_u16, 3);
+ c3.val[3] = vmlal_lane_u16(c3.val[3], b00_u16.val[3], a00_u16, 3);
}
auto mtx_out = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(mtx_out + 0 * out_stride + 0, c0.val[0]);
- vst1q_s32(mtx_out + 0 * out_stride + 4, c0.val[1]);
- vst1q_s32(mtx_out + 0 * out_stride + 8, c0.val[2]);
- vst1q_s32(mtx_out + 0 * out_stride + 12, c0.val[3]);
- vst1q_s32(mtx_out + 1 * out_stride + 0, c1.val[0]);
- vst1q_s32(mtx_out + 1 * out_stride + 4, c1.val[1]);
- vst1q_s32(mtx_out + 1 * out_stride + 8, c1.val[2]);
- vst1q_s32(mtx_out + 1 * out_stride + 12, c1.val[3]);
- vst1q_s32(mtx_out + 2 * out_stride + 0, c2.val[0]);
- vst1q_s32(mtx_out + 2 * out_stride + 4, c2.val[1]);
- vst1q_s32(mtx_out + 2 * out_stride + 8, c2.val[2]);
- vst1q_s32(mtx_out + 2 * out_stride + 12, c2.val[3]);
- vst1q_s32(mtx_out + 3 * out_stride + 0, c3.val[0]);
- vst1q_s32(mtx_out + 3 * out_stride + 4, c3.val[1]);
- vst1q_s32(mtx_out + 3 * out_stride + 8, c3.val[2]);
- vst1q_s32(mtx_out + 3 * out_stride + 12, c3.val[3]);
+ vst1q_s32(mtx_out + 0 * out_stride + 0, vreinterpretq_s32_u32(c0.val[0]));
+ vst1q_s32(mtx_out + 0 * out_stride + 4, vreinterpretq_s32_u32(c0.val[1]));
+ vst1q_s32(mtx_out + 0 * out_stride + 8, vreinterpretq_s32_u32(c0.val[2]));
+ vst1q_s32(mtx_out + 0 * out_stride + 12, vreinterpretq_s32_u32(c0.val[3]));
+ vst1q_s32(mtx_out + 1 * out_stride + 0, vreinterpretq_s32_u32(c1.val[0]));
+ vst1q_s32(mtx_out + 1 * out_stride + 4, vreinterpretq_s32_u32(c1.val[1]));
+ vst1q_s32(mtx_out + 1 * out_stride + 8, vreinterpretq_s32_u32(c1.val[2]));
+ vst1q_s32(mtx_out + 1 * out_stride + 12, vreinterpretq_s32_u32(c1.val[3]));
+ vst1q_s32(mtx_out + 2 * out_stride + 0, vreinterpretq_s32_u32(c2.val[0]));
+ vst1q_s32(mtx_out + 2 * out_stride + 4, vreinterpretq_s32_u32(c2.val[1]));
+ vst1q_s32(mtx_out + 2 * out_stride + 8, vreinterpretq_s32_u32(c2.val[2]));
+ vst1q_s32(mtx_out + 2 * out_stride + 12, vreinterpretq_s32_u32(c2.val[3]));
+ vst1q_s32(mtx_out + 3 * out_stride + 0, vreinterpretq_s32_u32(c3.val[0]));
+ vst1q_s32(mtx_out + 3 * out_stride + 4, vreinterpretq_s32_u32(c3.val[1]));
+ vst1q_s32(mtx_out + 3 * out_stride + 8, vreinterpretq_s32_u32(c3.val[2]));
+ vst1q_s32(mtx_out + 3 * out_stride + 12, vreinterpretq_s32_u32(c3.val[3]));
},
ina, inb, out);
}
diff --git a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp
index 255e486365..bd550db54c 100644
--- a/src/core/NEON/kernels/NEGEMMLowpFinalizeKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.cpp
@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#include "arm_compute/core/NEON/kernels/NEGEMMLowpFinalizeKernel.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
#include "arm_compute/core/AccessWindowStatic.h"
#include "arm_compute/core/Error.h"
@@ -44,15 +44,117 @@ namespace arm_compute
class Coordinates;
} // namespace arm_compute
-template <bool add_a_offset, bool add_b_offset>
-void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
+NEGEMMLowpOffsetContributionKernel::NEGEMMLowpOffsetContributionKernel()
+ : _vector_sum_col(nullptr), _vector_sum_row(nullptr), _mm_result(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true)
{
- const int32x4_t c_offset_s32 = vdupq_n_s32(_c_offset);
- const int32x4_t shift_s32 = vdupq_n_s32(-_shift);
+}
+
+void NEGEMMLowpOffsetContributionKernel::configure(ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, int32_t k, int32_t a_offset, int32_t b_offset)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
+
+ // If a_offset == 0, vector_sum_col can be a nullptr
+ if(a_offset != 0)
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON(vector_sum_col->info()->dimension(0) != mm_result->info()->dimension(0));
+
+ TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape();
+ vector_sum_col_shape.collapse(1);
+
+ // Check if vector_sum_col_shape should be slidden or not
+ // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
+ // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
+ _slide_vector_sum_col = vector_sum_col_shape[1] != 1;
+ }
+
+ // If b_offset == 0, vector_sum_row can be a nullptr
+ if(b_offset != 0)
+ {
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON(vector_sum_row->info()->dimension(0) != mm_result->info()->dimension(1));
+
+ TensorShape output_shape = mm_result->info()->tensor_shape();
+ TensorShape vector_sum_row_shape = vector_sum_row->info()->tensor_shape();
+ vector_sum_row_shape.collapse(1);
+ output_shape.collapse(2);
+
+ ARM_COMPUTE_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[2], "mm_result tensor must have the same number of batches of output tensor");
+
+ if(a_offset != 0)
+ {
+ TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape();
+ vector_sum_col_shape.collapse(1);
+
+ ARM_COMPUTE_ERROR_ON_MSG(vector_sum_col_shape[1] != 1
+ && vector_sum_col_shape[1] != vector_sum_row_shape[1],
+ "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
+ }
+ }
+
+ _vector_sum_col = vector_sum_col;
+ _vector_sum_row = vector_sum_row;
+ _mm_result = mm_result;
+ _a_offset = a_offset;
+ _b_offset = b_offset;
+ _k_offset = a_offset * b_offset * k;
+
+ constexpr unsigned int num_elems_processed_per_iteration = 16;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*mm_result->info(), Steps(num_elems_processed_per_iteration));
+
+ AccessWindowHorizontal mm_result_access(mm_result->info(), 0, num_elems_processed_per_iteration);
+
+ // Accordingly with a_offset and b_offset, we can have 4 cases:
+ // a_offset != 0 && b_offset != 0
+ // a_offset = 0 && b_offset != 0
+ // a_offset != 0 && b_offset = 0
+ // a_offset = 0 && b_offset = 0
+ if(a_offset != 0 && b_offset != 0)
+ {
+ AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0);
+ AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration);
+
+ update_window_and_padding(win,
+ vector_sum_col_access,
+ vector_sum_row_access,
+ mm_result_access);
+ }
+ else if(a_offset == 0 && b_offset != 0)
+ {
+ AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0);
+
+ update_window_and_padding(win,
+ vector_sum_row_access,
+ mm_result_access);
+ }
+ else if(a_offset != 0 && b_offset == 0)
+ {
+ AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration);
+
+ update_window_and_padding(win,
+ vector_sum_col_access,
+ mm_result_access);
+ }
+ else
+ {
+ update_window_and_padding(win,
+ mm_result_access);
+ }
+
+ INEKernel::configure(win);
+}
+
+void NEGEMMLowpOffsetContributionKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
Window collapsed_window = window.collapse_if_possible(IKernel::window(), Window::DimZ);
- if(add_a_offset && add_b_offset) // true, true
+ if(_a_offset != 0 && _b_offset != 0) // true, true
{
// Set window for vector_sum_col
Window win_vector_sum_col(collapsed_window);
@@ -70,7 +172,6 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col);
Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row);
Iterator mm_result(_mm_result, window);
- Iterator out(_output, window);
execute_window_loop(window, [&](const Coordinates & id)
{
@@ -110,12 +211,6 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32));
offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32));
- // Add c_offset
- offset_term_s32.val[0] = vaddq_s32(offset_term_s32.val[0], c_offset_s32);
- offset_term_s32.val[1] = vaddq_s32(offset_term_s32.val[1], c_offset_s32);
- offset_term_s32.val[2] = vaddq_s32(offset_term_s32.val[2], c_offset_s32);
- offset_term_s32.val[3] = vaddq_s32(offset_term_s32.val[3], c_offset_s32);
-
int32x4x4_t in_s32 =
{
{
@@ -132,35 +227,15 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]);
in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]);
- // Multiply by c_mult_int
- in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int);
- in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int);
- in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int);
- in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int);
-
- // Shift final result (negative value shift right)
- in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32);
- in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32);
- in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
- in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
-
- // Convert S32 to U16
- const int16x8x2_t in_s16 =
- {
- {
- vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
- vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])),
- }
- };
-
- // Convert S16 to S8
- const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
-
- vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
+ // Store the result with the offset contribution
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 0, in_s32.val[0]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 4, in_s32.val[1]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 8, in_s32.val[2]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 12, in_s32.val[3]);
},
- vector_sum_col, vector_sum_row, mm_result, out);
+ vector_sum_col, vector_sum_row, mm_result);
}
- else if(!add_a_offset && add_b_offset) // false, true
+ else if((_a_offset == 0) && (_b_offset != 0)) // false, true
{
// Set window for vector_sum_row
Window win_vector_sum_row(collapsed_window);
@@ -169,7 +244,6 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
Iterator vector_sum_row(_vector_sum_row, win_vector_sum_row);
Iterator mm_result(_mm_result, window);
- Iterator out(_output, window);
execute_window_loop(window, [&](const Coordinates & id)
{
@@ -177,9 +251,6 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
int32x4_t b_offset_term_s32 = vld1q_dup_s32(reinterpret_cast<const int32_t *>(vector_sum_row.ptr()) + id.y());
b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, _b_offset);
- // Add b_offset_term_s32 and c_offset_term_s32
- int32x4_t offset_term_s32 = vaddq_s32(b_offset_term_s32, c_offset_s32);
-
int32x4x4_t in_s32 =
{
{
@@ -191,40 +262,20 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
};
// Add the offset terms to GEMM's result
- in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32);
- in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32);
- in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32);
- in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32);
-
- // Multiply by c_mult_int
- in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int);
- in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int);
- in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int);
- in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int);
-
- // Shift final result (negative value shift right)
- in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32);
- in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32);
- in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
- in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
-
- // Convert S32 to U16
- const int16x8x2_t in_s16 =
- {
- {
- vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
- vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3])),
- }
- };
-
- // Convert S16 to S8
- const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
-
- vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], b_offset_term_s32);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], b_offset_term_s32);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], b_offset_term_s32);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], b_offset_term_s32);
+
+ // Store the result with the offset contribution
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 0, in_s32.val[0]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 4, in_s32.val[1]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 8, in_s32.val[2]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 12, in_s32.val[3]);
},
- vector_sum_row, mm_result, out);
+ vector_sum_row, mm_result);
}
- else if(add_a_offset && !add_b_offset) // true, false
+ else if((_a_offset != 0) && (_b_offset == 0)) // true, false
{
// Set window for vector_sum_col
Window win_vector_sum_col(collapsed_window);
@@ -236,7 +287,6 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
Iterator vector_sum_col(_vector_sum_col, win_vector_sum_col);
Iterator mm_result(_mm_result, window);
- Iterator out(_output, window);
execute_window_loop(window, [&](const Coordinates & id)
{
@@ -256,17 +306,6 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], _a_offset);
a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], _a_offset);
- // Add a_offset_term_s32 and b_offset_term_s32
- int32x4x4_t offset_term_s32 =
- {
- {
- vaddq_s32(c_offset_s32, a_offset_term_s32.val[0]),
- vaddq_s32(c_offset_s32, a_offset_term_s32.val[1]),
- vaddq_s32(c_offset_s32, a_offset_term_s32.val[2]),
- vaddq_s32(c_offset_s32, a_offset_term_s32.val[3])
- }
- };
-
int32x4x4_t in_s32 =
{
{
@@ -278,232 +317,22 @@ void NEGEMMLowpFinalizeKernel::finalize(const Window &window)
};
// Add the offset terms to GEMM's result
- in_s32.val[0] = vaddq_s32(in_s32.val[0], offset_term_s32.val[0]);
- in_s32.val[1] = vaddq_s32(in_s32.val[1], offset_term_s32.val[1]);
- in_s32.val[2] = vaddq_s32(in_s32.val[2], offset_term_s32.val[2]);
- in_s32.val[3] = vaddq_s32(in_s32.val[3], offset_term_s32.val[3]);
-
- // Multiply by c_mult_int
- in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int);
- in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int);
- in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int);
- in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int);
-
- // Shift final result (negative value shift right)
- in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32);
- in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32);
- in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
- in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
-
- // Convert S32 to S16
- const int16x8x2_t in_s16 =
- {
- {
- vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
- vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
- }
- };
-
- // Convert S16 to S8
- const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
-
- vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], a_offset_term_s32.val[0]);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], a_offset_term_s32.val[1]);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], a_offset_term_s32.val[2]);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], a_offset_term_s32.val[3]);
+
+ // Store the result with the offset contribution
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 0, in_s32.val[0]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 4, in_s32.val[1]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 8, in_s32.val[2]);
+ vst1q_s32(reinterpret_cast<int32_t *>(mm_result.ptr()) + 12, in_s32.val[3]);
},
- vector_sum_col, mm_result, out);
+ vector_sum_col, mm_result);
}
else // false, false
{
- Iterator mm_result(_mm_result, window);
- Iterator out(_output, window);
-
- execute_window_loop(window, [&](const Coordinates & id)
- {
- int32x4x4_t in_s32 =
- {
- {
- vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 0),
- vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 4),
- vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 8),
- vld1q_s32(reinterpret_cast<const int32_t *>(mm_result.ptr()) + 12)
- }
- };
-
- // Add the offset terms to GEMM's result
- in_s32.val[0] = vaddq_s32(in_s32.val[0], c_offset_s32);
- in_s32.val[1] = vaddq_s32(in_s32.val[1], c_offset_s32);
- in_s32.val[2] = vaddq_s32(in_s32.val[2], c_offset_s32);
- in_s32.val[3] = vaddq_s32(in_s32.val[3], c_offset_s32);
-
- // Multiply by c_mult_int
- in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _c_mult_int);
- in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _c_mult_int);
- in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _c_mult_int);
- in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _c_mult_int);
-
- // Shift final result (negative value shift right)
- in_s32.val[0] = vshlq_s32(in_s32.val[0], shift_s32);
- in_s32.val[1] = vshlq_s32(in_s32.val[1], shift_s32);
- in_s32.val[2] = vshlq_s32(in_s32.val[2], shift_s32);
- in_s32.val[3] = vshlq_s32(in_s32.val[3], shift_s32);
-
- // Convert S32 to S16
- const int16x8x2_t in_s16 =
- {
- {
- vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
- vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
- }
- };
-
- // Convert U16 to S8
- const int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));
-
- vst1q_s8(reinterpret_cast<int8_t *>(out.ptr()), out_s8);
- },
- mm_result, out);
+ // No offset contribution from matrix A and matrix B
+ return;
}
}
-
-NEGEMMLowpFinalizeKernel::NEGEMMLowpFinalizeKernel()
- : _func(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _c_offset(0), _k_offset(0), _c_mult_int(0), _shift(0),
- _slide_vector_sum_col(true)
-{
-}
-
-void NEGEMMLowpFinalizeKernel::configure(const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *mm_result, ITensor *output, int32_t num_mtx_a_cols, int32_t a_offset,
- int32_t b_offset,
- int32_t c_offset, int32_t c_mult_int, int32_t shift)
-{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S8);
-
- TensorShape mm_result_shape = mm_result->info()->tensor_shape();
- TensorShape output_shape = output->info()->tensor_shape();
-
- mm_result_shape.collapse(2);
- output_shape.collapse(2);
-
- ARM_COMPUTE_ERROR_ON_MSG(mm_result_shape[2] != output_shape[2], "mm_result tensor must have the same number of batches of output tensor");
-
- // If a_offset == 0, vector_sum_col can be a nullptr
- if(a_offset != 0)
- {
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
- ARM_COMPUTE_ERROR_ON(vector_sum_col->info()->dimension(0) != mm_result->info()->dimension(0));
-
- TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape();
- vector_sum_col_shape.collapse(1);
-
- // Check if vector_sum_col_shape should be slidden or not
- // Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
- // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
- _slide_vector_sum_col = vector_sum_col_shape[1] != 1;
- }
-
- // If b_offset == 0, vector_sum_row can be a nullptr
- if(b_offset != 0)
- {
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
- ARM_COMPUTE_ERROR_ON(vector_sum_row->info()->dimension(0) != mm_result->info()->dimension(1));
-
- TensorShape vector_sum_row_shape = vector_sum_row->info()->tensor_shape();
- vector_sum_row_shape.collapse(1);
-
- ARM_COMPUTE_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[2], "mm_result tensor must have the same number of batches of output tensor");
-
- if(a_offset != 0)
- {
- TensorShape vector_sum_col_shape = vector_sum_col->info()->tensor_shape();
- vector_sum_col_shape.collapse(1);
-
- ARM_COMPUTE_ERROR_ON_MSG(vector_sum_col_shape[1] != 1
- && vector_sum_col_shape[1] != vector_sum_row_shape[1],
- "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
- }
- }
-
- _vector_sum_col = vector_sum_col;
- _vector_sum_row = vector_sum_row;
- _mm_result = mm_result;
- _output = output;
- _a_offset = a_offset;
- _b_offset = b_offset;
- _k_offset = a_offset * b_offset * num_mtx_a_cols;
- _c_offset = c_offset;
- _c_mult_int = c_mult_int;
- _shift = shift;
-
- constexpr unsigned int num_elems_processed_per_iteration = 16;
-
- // Configure kernel window
- Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
-
- AccessWindowHorizontal mm_result_access(mm_result->info(), 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_result_access(output->info(), 0, num_elems_processed_per_iteration);
-
- // Accordingly with a_offset and b_offset, we can have 4 cases:
- // a_offset != 0 && b_offset != 0
- // a_offset = 0 && b_offset != 0
- // a_offset != 0 && b_offset = 0
- // a_offset = 0 && b_offset = 0
- if(a_offset != 0 && b_offset != 0)
- {
- // Set the function to use
- _func = &NEGEMMLowpFinalizeKernel::finalize<true, true>;
-
- AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0);
- AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration);
-
- update_window_and_padding(win,
- vector_sum_col_access,
- vector_sum_row_access,
- mm_result_access,
- output_result_access);
- }
- else if(a_offset == 0 && b_offset != 0)
- {
- // Set the function to use
- _func = &NEGEMMLowpFinalizeKernel::finalize<false, true>;
-
- AccessWindowStatic vector_sum_row_access(vector_sum_row->info(), 0, 0, vector_sum_row->info()->dimension(0), 0);
-
- update_window_and_padding(win,
- vector_sum_row_access,
- mm_result_access,
- output_result_access);
- }
- else if(a_offset != 0 && b_offset == 0)
- {
- // Set the function to use
- _func = &NEGEMMLowpFinalizeKernel::finalize<true, false>;
-
- AccessWindowHorizontal vector_sum_col_access(vector_sum_col->info(), 0, num_elems_processed_per_iteration);
-
- update_window_and_padding(win,
- vector_sum_col_access,
- mm_result_access,
- output_result_access);
- }
- else
- {
- // Set the function to use
- _func = &NEGEMMLowpFinalizeKernel::finalize<false, false>;
-
- update_window_and_padding(win,
- mm_result_access,
- output_result_access);
- }
-
- output_result_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
-
- INEKernel::configure(win);
-}
-
-void NEGEMMLowpFinalizeKernel::run(const Window &window, const ThreadInfo &info)
-{
- ARM_COMPUTE_UNUSED(info);
- ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-
- (this->*_func)(window);
-}
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
new file mode 100644
index 0000000000..aa3c280788
--- /dev/null
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+
+using namespace arm_compute;
+
+namespace arm_compute
+{
+class Coordinates;
+} // namespace arm_compute
+
+NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel()
+ : _input(nullptr), _output(nullptr), _result_offset(0), _result_mult_int(0), _result_shift(0)
+{
+}
+
+void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::configure(const ITensor *input, ITensor *output, int result_offset, int result_mult_int, int result_shift)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8);
+
+ _input = input;
+ _output = output;
+ _result_offset = result_offset;
+ _result_mult_int = result_mult_int;
+ _result_shift = result_shift;
+
+ constexpr unsigned int num_elems_processed_per_iteration = 16;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration));
+
+ AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_result_access(output->info(), 0, num_elems_processed_per_iteration);
+
+ update_window_and_padding(win,
+ input_access,
+ output_result_access);
+
+ output_result_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+
+ INEKernel::configure(win);
+}
+
+void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+ const int32x4_t result_offset_s32 = vdupq_n_s32(_result_offset);
+ const int32x4_t result_shift_s32 = vdupq_n_s32(-_result_shift);
+ const int32x4_t zero_s32 = vdupq_n_s32(0);
+
+ Iterator in(_input, window);
+ Iterator out(_output, window);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ int32x4x4_t in_s32 =
+ {
+ {
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 0),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 4),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 8),
+ vld1q_s32(reinterpret_cast<const int32_t *>(in.ptr()) + 12)
+ }
+ };
+
+ // Add the offset terms to GEMM's result
+ in_s32.val[0] = vaddq_s32(in_s32.val[0], result_offset_s32);
+ in_s32.val[1] = vaddq_s32(in_s32.val[1], result_offset_s32);
+ in_s32.val[2] = vaddq_s32(in_s32.val[2], result_offset_s32);
+ in_s32.val[3] = vaddq_s32(in_s32.val[3], result_offset_s32);
+
+ // Multiply by c_mult_int
+ in_s32.val[0] = vmulq_n_s32(in_s32.val[0], _result_mult_int);
+ in_s32.val[1] = vmulq_n_s32(in_s32.val[1], _result_mult_int);
+ in_s32.val[2] = vmulq_n_s32(in_s32.val[2], _result_mult_int);
+ in_s32.val[3] = vmulq_n_s32(in_s32.val[3], _result_mult_int);
+
+ // Shift final result (negative value shift right)
+ in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);
+ in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);
+ in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);
+ in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);
+
+ // Saturate negative values
+ in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);
+ in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);
+ in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);
+ in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);
+
+ // Convert S32 to S16
+ const int16x8x2_t in_s16 =
+ {
+ {
+ vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),
+ vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))
+ }
+ };
+
+ // Convert S16 to U8
+ const uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));
+
+ vst1q_u8(out.ptr(), out_u8);
+ },
+ in, out);
+} \ No newline at end of file
diff --git a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
index 9df13ce0e3..81d9b5bb81 100644
--- a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
@@ -49,12 +49,12 @@ INEGEMMLowpReductionKernel::INEGEMMLowpReductionKernel()
{
}
-void NEGEMMLowpMatrixAReductionKernel::configure(const ITensor *mtx_a_interleaved4x4, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4)
+void NEGEMMLowpMatrixAReductionKernel::configure(const ITensor *mtx_a, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a_interleaved4x4, 1, DataType::S8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_a, 1, DataType::QASYMM8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
- _input = mtx_a_interleaved4x4;
+ _input = mtx_a;
_output = vector_sum_row;
_k = num_mtx_a_cols;
_is_reshaped = is_interleaved4x4;
@@ -97,9 +97,9 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- int32x4_t sum_row = vdupq_n_s32(0);
+ uint32x4_t sum_row = vdupq_n_u32(0);
- auto matrix_a = reinterpret_cast<const int8_t *>(in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]);
+ const uint8_t *matrix_a = (in.ptr() + (id.x() / 4) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a));
@@ -109,43 +109,43 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 4 accumulations
for(; i <= (_k - 4); i += 4)
{
- const int8x16_t a0_s8 = vld1q_s8(matrix_a + i * 4);
+ const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i * 4);
// Convert U8 to U16
- int16x4x4_t a0_s16 =
+ uint16x4x4_t a0_u16 =
{
{
- vget_low_s16(vmovl_s8(vget_low_s8(a0_s8))),
- vget_high_s16(vmovl_s8(vget_low_s8(a0_s8))),
- vget_low_s16(vmovl_s8(vget_high_s8(a0_s8))),
- vget_high_s16(vmovl_s8(vget_high_s8(a0_s8)))
+ vget_low_u16(vmovl_u8(vget_low_u8(a0_u8))),
+ vget_high_u16(vmovl_u8(vget_low_u8(a0_u8))),
+ vget_low_u16(vmovl_u8(vget_high_u8(a0_u8))),
+ vget_high_u16(vmovl_u8(vget_high_u8(a0_u8)))
}
};
// Accumulate to U16
- a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[1]);
- a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[2]);
- a0_s16.val[0] = vadd_s16(a0_s16.val[0], a0_s16.val[3]);
+ a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[1]);
+ a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[2]);
+ a0_u16.val[0] = vadd_u16(a0_u16.val[0], a0_u16.val[3]);
// Accumulate to U32
- sum_row = vaddw_s16(sum_row, a0_s16.val[0]);
+ sum_row = vaddw_u16(sum_row, a0_u16.val[0]);
}
// This for loop performs the leftover accumulations
for(; i < _k; ++i)
{
- const int8x8_t a0_s8 = vld1_s8(matrix_a + i * 4);
+ const uint8x8_t a0_u8 = vld1_u8(matrix_a + i * 4);
// Convert U8 to U16
- const int16x4_t a0_s16 = vget_low_s16(vmovl_s8(a0_s8));
+ const uint16x4_t a0_u16 = vget_low_u16(vmovl_u8(a0_u8));
// Accumulate to U32
- sum_row = vaddw_s16(sum_row, a0_s16);
+ sum_row = vaddw_u16(sum_row, a0_u16);
}
auto vector_sum_row = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_row, sum_row);
+ vst1q_s32(vector_sum_row, vreinterpretq_s32_u32(sum_row));
},
in, out);
}
@@ -154,10 +154,10 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- int32x4_t sum_row_s32 = vdupq_n_s32(0);
- int32_t sum_row = 0;
+ uint32x4_t sum_row_u32 = vdupq_n_u32(0);
+ uint32_t sum_row = 0;
- auto matrix_a = reinterpret_cast<const int8_t *>(in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2]);
+ const uint8_t *matrix_a = (in.ptr() + id.x() * _input->info()->strides_in_bytes()[1] + +id.y() * _input->info()->strides_in_bytes()[2]);
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_a));
@@ -167,29 +167,29 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 16 accumulations
for(; i <= (_k - 16); i += 16)
{
- const int8x16_t a0_s8 = vld1q_s8(matrix_a + i);
+ const uint8x16_t a0_u8 = vld1q_u8(matrix_a + i);
// Partial accumulations in U16
- const int16x8_t tmp_sum0 = vaddl_s8(vget_low_s8(a0_s8), vget_high_s8(a0_s8));
+ const uint16x8_t tmp_sum0 = vaddl_u8(vget_low_u8(a0_u8), vget_high_u8(a0_u8));
// Accumulate to U32
- sum_row_s32 = vaddq_s32(sum_row_s32, vpaddlq_s16(tmp_sum0));
+ sum_row_u32 = vaddq_u32(sum_row_u32, vpaddlq_u16(tmp_sum0));
}
// This for loop performs the leftover accumulations
for(; i < _k; ++i)
{
- sum_row += static_cast<int32_t>(matrix_a[i]);
+ sum_row += static_cast<uint32_t>(matrix_a[i]);
}
#if defined(__aarch64__)
// Reduction operation available on 64 bit architectures only
- sum_row += vaddvq_s32(sum_row_s32);
+ sum_row += vaddvq_u32(sum_row_u32);
#else // __aarch64__
- int32x2_t tmp = vpadd_s32(vget_high_s32(sum_row_s32), vget_low_s32(sum_row_s32));
- tmp = vpadd_s32(tmp, tmp);
+ uint32x2_t tmp = vpadd_u32(vget_high_u32(sum_row_u32), vget_low_u32(sum_row_u32));
+ tmp = vpadd_u32(tmp, tmp);
- sum_row += vget_lane_s32(tmp, 0);
+ sum_row += vget_lane_u32(tmp, 0);
#endif // __aarch64__
*(reinterpret_cast<int *>(out.ptr())) = static_cast<int>(sum_row);
@@ -198,12 +198,12 @@ void NEGEMMLowpMatrixAReductionKernel::run(const Window &window, const ThreadInf
}
}
-void NEGEMMLowpMatrixBReductionKernel::configure(const ITensor *mtx_b_transposed1xW, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW)
+void NEGEMMLowpMatrixBReductionKernel::configure(const ITensor *mtx_b, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b_transposed1xW, 1, DataType::S8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mtx_b, 1, DataType::QASYMM8);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
- _input = mtx_b_transposed1xW;
+ _input = mtx_b;
_output = vector_sum_col;
_k = num_mtx_b_rows;
_is_reshaped = is_transposed1xW;
@@ -246,17 +246,17 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
execute_window_loop(collapsed_window, [&](const Coordinates & id)
{
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- int32x4x4_t sum_col =
+ uint32x4x4_t sum_col =
{
{
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
- auto matrix_b = reinterpret_cast<const int8_t *>(in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2]);
+ const uint8_t *matrix_b = in.ptr() + (id.x() / 16) * _input->info()->strides_in_bytes()[1] + id.y() * _input->info()->strides_in_bytes()[2];
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b));
@@ -265,14 +265,14 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
int i = 0;
for(; i < _k; ++i)
{
- const int8x16_t b0_s8 = vld1q_s8(matrix_b + i * 16);
+ const uint8x16_t b0_u8 = vld1q_u8(matrix_b + i * 16);
// Convert S8 to U16
- const int16x8x2_t b0_s16 =
+ const uint16x8x2_t b0_u16 =
{
{
- vmovl_s8(vget_low_s8(b0_s8)),
- vmovl_s8(vget_high_s8(b0_s8))
+ vmovl_u8(vget_low_u8(b0_u8)),
+ vmovl_u8(vget_high_u8(b0_u8))
}
};
@@ -280,20 +280,20 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
sum_col =
{
{
- vaddw_s16(sum_col.val[0], vget_low_s16(b0_s16.val[0])),
- vaddw_s16(sum_col.val[1], vget_high_s16(b0_s16.val[0])),
- vaddw_s16(sum_col.val[2], vget_low_s16(b0_s16.val[1])),
- vaddw_s16(sum_col.val[3], vget_high_s16(b0_s16.val[1]))
+ vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])),
+ vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])),
+ vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])),
+ vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1]))
}
};
}
auto vector_sum_col = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_col + 0, sum_col.val[0]);
- vst1q_s32(vector_sum_col + 4, sum_col.val[1]);
- vst1q_s32(vector_sum_col + 8, sum_col.val[2]);
- vst1q_s32(vector_sum_col + 12, sum_col.val[3]);
+ vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0]));
+ vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1]));
+ vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2]));
+ vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3]));
},
in, out);
}
@@ -326,17 +326,17 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
}
// Note: Since the input is unsigned char, we can safely use unsigned int for the accumulation
- int32x4x4_t sum_col =
+ uint32x4x4_t sum_col =
{
{
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0),
- vdupq_n_s32(0)
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0),
+ vdupq_n_u32(0)
}
};
- auto matrix_b = reinterpret_cast<const int8_t *>(inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2]);
+ const uint8_t *matrix_b = inb.ptr() + id.y() * _input->info()->strides_in_bytes()[2];
#if __arm__
asm volatile("PLD [%0, #128*4]" ::"r"(matrix_b));
@@ -347,10 +347,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
// This for loop performs 4 accumulations
for(; i <= (_k - 4); i += 4)
{
- const int8x16_t b0_s8 = vld1q_s8(matrix_b + 0 * in_b_stride);
- const int8x16_t b1_s8 = vld1q_s8(matrix_b + 1 * in_b_stride);
- const int8x16_t b2_s8 = vld1q_s8(matrix_b + 2 * in_b_stride);
- const int8x16_t b3_s8 = vld1q_s8(matrix_b + 3 * in_b_stride);
+ const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride);
+ const uint8x16_t b1_u8 = vld1q_u8(matrix_b + 1 * in_b_stride);
+ const uint8x16_t b2_u8 = vld1q_u8(matrix_b + 2 * in_b_stride);
+ const uint8x16_t b3_u8 = vld1q_u8(matrix_b + 3 * in_b_stride);
#if __arm__
asm volatile("PLD [%0, #128*1]" ::"r"(matrix_b + 1 * in_b_stride));
@@ -360,31 +360,31 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
#endif /* __arm__ */
// Partial accumulation in u16
- int16x8x2_t tmp_sum =
+ uint16x8x2_t tmp_sum =
{
{
- vdupq_n_s16(0),
- vdupq_n_s16(0)
+ vdupq_n_u16(0),
+ vdupq_n_u16(0)
}
};
- tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b0_s8));
- tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b1_s8));
- tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b2_s8));
- tmp_sum.val[0] = vaddw_s8(tmp_sum.val[0], vget_low_s8(b3_s8));
- tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b0_s8));
- tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b1_s8));
- tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b2_s8));
- tmp_sum.val[1] = vaddw_s8(tmp_sum.val[1], vget_high_s8(b3_s8));
+ tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b0_u8));
+ tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b1_u8));
+ tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b2_u8));
+ tmp_sum.val[0] = vaddw_u8(tmp_sum.val[0], vget_low_u8(b3_u8));
+ tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b0_u8));
+ tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b1_u8));
+ tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b2_u8));
+ tmp_sum.val[1] = vaddw_u8(tmp_sum.val[1], vget_high_u8(b3_u8));
// Accumulate to U32
sum_col =
{
{
- vaddw_s16(sum_col.val[0], vget_low_s16(tmp_sum.val[0])),
- vaddw_s16(sum_col.val[1], vget_high_s16(tmp_sum.val[0])),
- vaddw_s16(sum_col.val[2], vget_low_s16(tmp_sum.val[1])),
- vaddw_s16(sum_col.val[3], vget_high_s16(tmp_sum.val[1]))
+ vaddw_u16(sum_col.val[0], vget_low_u16(tmp_sum.val[0])),
+ vaddw_u16(sum_col.val[1], vget_high_u16(tmp_sum.val[0])),
+ vaddw_u16(sum_col.val[2], vget_low_u16(tmp_sum.val[1])),
+ vaddw_u16(sum_col.val[3], vget_high_u16(tmp_sum.val[1]))
}
};
@@ -394,14 +394,14 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
// This for loop perfoms the leftover accumulations
for(; i < _k; ++i)
{
- const int8x16_t b0_s8 = vld1q_s8(matrix_b + 0 * in_b_stride);
+ const uint8x16_t b0_u8 = vld1q_u8(matrix_b + 0 * in_b_stride);
// Convert S8 to S16
- const int16x8x2_t b0_s16 =
+ const uint16x8x2_t b0_u16 =
{
{
- vmovl_s8(vget_low_s8(b0_s8)),
- vmovl_s8(vget_high_s8(b0_s8))
+ vmovl_u8(vget_low_u8(b0_u8)),
+ vmovl_u8(vget_high_u8(b0_u8))
}
};
@@ -409,10 +409,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
sum_col =
{
{
- vaddw_s16(sum_col.val[0], vget_low_s16(b0_s16.val[0])),
- vaddw_s16(sum_col.val[1], vget_high_s16(b0_s16.val[0])),
- vaddw_s16(sum_col.val[2], vget_low_s16(b0_s16.val[1])),
- vaddw_s16(sum_col.val[3], vget_high_s16(b0_s16.val[1]))
+ vaddw_u16(sum_col.val[0], vget_low_u16(b0_u16.val[0])),
+ vaddw_u16(sum_col.val[1], vget_high_u16(b0_u16.val[0])),
+ vaddw_u16(sum_col.val[2], vget_low_u16(b0_u16.val[1])),
+ vaddw_u16(sum_col.val[3], vget_high_u16(b0_u16.val[1]))
}
};
@@ -421,10 +421,10 @@ void NEGEMMLowpMatrixBReductionKernel::run(const Window &window, const ThreadInf
auto vector_sum_col = reinterpret_cast<int32_t *>(out.ptr());
- vst1q_s32(vector_sum_col + 0, sum_col.val[0]);
- vst1q_s32(vector_sum_col + 4, sum_col.val[1]);
- vst1q_s32(vector_sum_col + 8, sum_col.val[2]);
- vst1q_s32(vector_sum_col + 12, sum_col.val[3]);
+ vst1q_s32(vector_sum_col + 0, vreinterpretq_s32_u32(sum_col.val[0]));
+ vst1q_s32(vector_sum_col + 4, vreinterpretq_s32_u32(sum_col.val[1]));
+ vst1q_s32(vector_sum_col + 8, vreinterpretq_s32_u32(sum_col.val[2]));
+ vst1q_s32(vector_sum_col + 12, vreinterpretq_s32_u32(sum_col.val[3]));
},
inb, out);
}
diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
index 7f4ee1ec49..7f83144e12 100644
--- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
@@ -43,7 +43,8 @@ using namespace arm_compute;
void NEGEMMTranspose1xWKernel::configure(const ITensor *input, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16,
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32,
+ DataType::F16,
DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);