From ba27e4467dfc04e23ce9483330be062e9aaebdc5 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Tue, 28 May 2019 10:04:57 +0100 Subject: COMPMID-2236: QUANTIZED_16BIT_LSTM operator for NEON Change-Id: I554023508e09b790ecc1bbdada529697d6c7b616 Signed-off-by: giuros01 Reviewed-on: https://review.mlplatform.org/c/1551 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michalis Spyrou --- .../NEON/kernels/NEDequantizationLayerKernel.cpp | 71 +++++++++++++++++++++- src/core/NEON/kernels/NEStridedSliceKernel.cpp | 2 +- .../NEON/kernels/NEWidthConcatenateLayerKernel.cpp | 2 +- 3 files changed, 71 insertions(+), 4 deletions(-) (limited to 'src/core') diff --git a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp index bf0a2ca7bf..d11f04a82f 100644 --- a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp +++ b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp @@ -28,6 +28,7 @@ #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/NEON/NEAsymm.h" +#include "arm_compute/core/NEON/NESymm.h" #include "arm_compute/core/NEON/wrapper/wrapper.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" @@ -42,7 +43,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QSYMM8); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QSYMM8, DataType::QSYMM16); if(output->tensor_shape().total_size() > 0) { @@ -94,6 +95,27 @@ inline void store_result(float16_t *ptr, const float32x4x4_t &v) } #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ +template +inline void store_result(T *ptr, const float32x4x2_t &v) +{ + ARM_COMPUTE_UNUSED(ptr, v); +} + +template <> +inline void store_result(float *ptr, const float32x4x2_t &v) +{ + wrapper::vstore(ptr, v.val[0]); + wrapper::vstore(ptr + 4, v.val[1]); +} + +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +template <> +inline void store_result(float16_t *ptr, const float32x4x2_t &v) +{ + wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1]))); +} +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + template void run_dequantization_qasymm8(const ITensor *input, ITensor *output, const Window &window) { @@ -179,6 +201,48 @@ void run_dequantization_qsymm8(const ITensor *input, ITensor *output, const Wind in, out); } +template +void run_dequantization_qsymm16(const ITensor *input, ITensor *output, const Window &window) +{ + const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform(); + const float scale = qinfo.scale; + + const int window_step_x = 8; + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + // Collapse window and reset first dimension to handle tail calculations manually + Window win_collapsed = window.collapse_if_possible(window, Window::DimZ); + win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1)); + + // Create iterators + Iterator in(input, win_collapsed); + Iterator out(output, win_collapsed); + + execute_window_loop(win_collapsed, [&](const Coordinates &) + { + const auto in_ptr = reinterpret_cast(in.ptr()); + const auto out_ptr = reinterpret_cast(out.ptr()); + + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const auto vin = wrapper::vloadq(in_ptr + x); + const auto vdeq = vdequantize_int16(vin, scale); + + store_result(reinterpret_cast(out_ptr + x), vdeq); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + int16_t val = *(in_ptr + x); + *(out_ptr + x) = static_cast(dequantize_qsymm16(val, scale)); + } + }, + in, out); +} + template void run_dequantization_core(const ITensor *input, ITensor *output, const Window &window) { @@ -190,6 +254,9 @@ void run_dequantization_core(const ITensor *input, ITensor *output, const Window case DataType::QSYMM8: run_dequantization_qsymm8(input, output, window); break; + case DataType::QSYMM16: + run_dequantization_qsymm16(input, output, window); + break; default: ARM_COMPUTE_ERROR("Unsupported data type."); } @@ -244,4 +311,4 @@ void NEDequantizationLayerKernel::run(const Window &window, const ThreadInfo &in ARM_COMPUTE_ERROR("Unsupported data type."); } } -} // namespace arm_compute \ No newline at end of file +} // namespace arm_compute diff --git a/src/core/NEON/kernels/NEStridedSliceKernel.cpp b/src/core/NEON/kernels/NEStridedSliceKernel.cpp index ece291e0a3..c33e699999 100644 --- a/src/core/NEON/kernels/NEStridedSliceKernel.cpp +++ b/src/core/NEON/kernels/NEStridedSliceKernel.cpp @@ -45,7 +45,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, - DataType::U16, DataType::S16, + DataType::U16, DataType::S16, DataType::QSYMM16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); diff --git a/src/core/NEON/kernels/NEWidthConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEWidthConcatenateLayerKernel.cpp index 28f655c529..7b1ad9c2e8 100644 --- a/src/core/NEON/kernels/NEWidthConcatenateLayerKernel.cpp +++ b/src/core/NEON/kernels/NEWidthConcatenateLayerKernel.cpp @@ -61,7 +61,7 @@ Status validate_arguments(const ITensorInfo *input, unsigned int width_offset, c ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::F16, - DataType::U32, DataType::F32); + DataType::U32, DataType::S32, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(0) + width_offset > output->dimension(0)); -- cgit v1.2.1