From c3a74200a66ea5fb718b8406bed2043bc097930e Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Fri, 22 Nov 2019 16:05:46 +0000 Subject: COMPMID-2775 [NE] add support for QASYMM8_SIGNED to SoftmaxLayer Change-Id: Ic46d4143929c8c9b548355d85c78542faf25d612 Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/2376 Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/core/NEON/NEColorConvertHelper.inl | 20 ++--- arm_compute/core/NEON/NEMath.h | 17 +++- arm_compute/core/NEON/NEMath.inl | 25 +++++- src/core/NEON/kernels/NESoftmaxLayerKernel.cpp | 106 +++++++++++++++++++------ tests/validation/NEON/SoftmaxLayer.cpp | 23 ++++++ 5 files changed, 154 insertions(+), 37 deletions(-) diff --git a/arm_compute/core/NEON/NEColorConvertHelper.inl b/arm_compute/core/NEON/NEColorConvertHelper.inl index 62c6eb5aea..7145d6f206 100644 --- a/arm_compute/core/NEON/NEColorConvertHelper.inl +++ b/arm_compute/core/NEON/NEColorConvertHelper.inl @@ -83,7 +83,7 @@ inline void rgb_to_u8_conversion(const uint8x16x3_t &in, uint8x16_t &out) rgb2u8_red_coef, rgb2u8_green_coef, rgb2u8_blue_coef); //Conversion from 1(Greyscale) 4 floats to 1(Greyscale) 4 uint8s - arm_compute::convert_float32x4x4_to_unit8x16(out_float32, out); + arm_compute::convert_float32x4x4_to_uint8x16(out_float32, out); } inline void rgb_to_yuv_calculation(const float32x4_t &rvec, const float32x4_t &gvec, const float32x4_t &bvec, @@ -214,12 +214,12 @@ inline void rgb_to_yuv_conversion(uint8x16x3_t &vec_top, uint8x16x3_t &vec_botto fyvec_bottom.val[i], fuvec_bottom.val[i], fvvec_bottom.val[i]); } - arm_compute::convert_float32x4x4_to_unit8x16(fyvec_top, vec_top.val[0]); - arm_compute::convert_float32x4x4_to_unit8x16(fuvec_top, vec_top.val[1]); - arm_compute::convert_float32x4x4_to_unit8x16(fvvec_top, vec_top.val[2]); - arm_compute::convert_float32x4x4_to_unit8x16(fyvec_bottom, vec_bottom.val[0]); - arm_compute::convert_float32x4x4_to_unit8x16(fuvec_bottom, vec_bottom.val[1]); - arm_compute::convert_float32x4x4_to_unit8x16(fvvec_bottom, vec_bottom.val[2]); + arm_compute::convert_float32x4x4_to_uint8x16(fyvec_top, vec_top.val[0]); + arm_compute::convert_float32x4x4_to_uint8x16(fuvec_top, vec_top.val[1]); + arm_compute::convert_float32x4x4_to_uint8x16(fvvec_top, vec_top.val[2]); + arm_compute::convert_float32x4x4_to_uint8x16(fyvec_bottom, vec_bottom.val[0]); + arm_compute::convert_float32x4x4_to_uint8x16(fuvec_bottom, vec_bottom.val[1]); + arm_compute::convert_float32x4x4_to_uint8x16(fvvec_bottom, vec_bottom.val[2]); } inline void store_rgb_to_nv12(const uint8x16_t &rvec_top, const uint8x16_t &gvec_top, const uint8x16_t &bvec_top, @@ -298,9 +298,9 @@ inline void store_rgb_to_yuv4(const uint8x16_t &rvec, const uint8x16_t &gvec, co } uint8x16_t yvec, uvec, vvec; - arm_compute::convert_float32x4x4_to_unit8x16(fyvec, yvec); - arm_compute::convert_float32x4x4_to_unit8x16(fuvec, uvec); - arm_compute::convert_float32x4x4_to_unit8x16(fvvec, vvec); + arm_compute::convert_float32x4x4_to_uint8x16(fyvec, yvec); + arm_compute::convert_float32x4x4_to_uint8x16(fuvec, uvec); + arm_compute::convert_float32x4x4_to_uint8x16(fvvec, vvec); vst1q_u8(out_y, yvec); vst1q_u8(out_u, uvec); diff --git a/arm_compute/core/NEON/NEMath.h b/arm_compute/core/NEON/NEMath.h index aa3054306c..54f8252250 100644 --- a/arm_compute/core/NEON/NEMath.h +++ b/arm_compute/core/NEON/NEMath.h @@ -165,6 +165,14 @@ int32_t rounding_divide_by_pow2(int32_t x, int exponent); */ float32x4x4_t convert_uint8x16_to_float32x4x4(const uint8x16_t &in); +/** Converts from int8x16 to float32x4x4_t + * + * @param[in] in Vector of int8 to be converted + * + * @return Converted vector of float + */ +float32x4x4_t convert_int8x16_to_float32x4x4(const int8x16_t &in); + /** Converts from two float32x4x3_t to just one uint8x8x3_t * * @param[in] in1 First input vector of float to be converted @@ -178,7 +186,14 @@ void convert_float32x4x3_to_uint8x8x3(const float32x4x3_t &in1, const float32x4x * @param[in] in Vector of float to be converted * @param[out] out Converted vector of uint8 to store the result */ -void convert_float32x4x4_to_unit8x16(const float32x4x4_t &in, uint8x16_t &out); +void convert_float32x4x4_to_uint8x16(const float32x4x4_t &in, uint8x16_t &out); + +/** Converts from float32x4x4_t to just one int8x16_t + * + * @param[in] in Vector of float to be converted + * @param[out] out Converted vector of uint8 to store the result + */ +void convert_float32x4x4_to_int8x16(const float32x4x4_t &in, int8x16_t &out); /** Calculate sine. * diff --git a/arm_compute/core/NEON/NEMath.inl b/arm_compute/core/NEON/NEMath.inl index 179f1b6299..5d8b82c281 100644 --- a/arm_compute/core/NEON/NEMath.inl +++ b/arm_compute/core/NEON/NEMath.inl @@ -331,6 +331,20 @@ inline float32x4x4_t convert_uint8x16_to_float32x4x4(const uint8x16_t &in) return out; } +inline float32x4x4_t convert_int8x16_to_float32x4x4(const int8x16_t &in) +{ + float32x4x4_t out; + + const auto tmp1 = vmovl_s8(vget_low_s8(in)); + out.val[0] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(tmp1))); + out.val[1] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(tmp1))); + + const auto tmp2 = vmovl_s8(vget_high_s8(in)); + out.val[2] = vcvtq_f32_s32(vmovl_s16(vget_low_s16(tmp2))); + out.val[3] = vcvtq_f32_s32(vmovl_s16(vget_high_s16(tmp2))); + return out; +} + inline void convert_float32x4x3_to_uint8x8x3(const float32x4x3_t &in1, const float32x4x3_t &in2, uint8x8x3_t &out) { out.val[0] = vqmovn_u16(vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in1.val[0])), @@ -341,7 +355,7 @@ inline void convert_float32x4x3_to_uint8x8x3(const float32x4x3_t &in1, const flo vqmovn_u32(vcvtq_u32_f32(in2.val[2])))); } -inline void convert_float32x4x4_to_unit8x16(const float32x4x4_t &in, uint8x16_t &out) +inline void convert_float32x4x4_to_uint8x16(const float32x4x4_t &in, uint8x16_t &out) { const auto low = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(in.val[0])), vqmovn_u32(vcvtq_u32_f32(in.val[1]))); @@ -350,6 +364,15 @@ inline void convert_float32x4x4_to_unit8x16(const float32x4x4_t &in, uint8x16_t out = vcombine_u8(vqmovn_u16(low), vqmovn_u16(high)); } +inline void convert_float32x4x4_to_int8x16(const float32x4x4_t &in, int8x16_t &out) +{ + const auto low = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[0])), + vqmovn_s32(vcvtq_s32_f32(in.val[1]))); + const auto high = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(in.val[2])), + vqmovn_s32(vcvtq_s32_f32(in.val[3]))); + out = vcombine_s8(vqmovn_s16(low), vqmovn_s16(high)); +} + #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC /** Exponent polynomial coefficients */ /** Logarithm polynomial coefficients */ diff --git a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp index a3ecce3a1e..95cbdf582b 100644 --- a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp +++ b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp @@ -44,12 +44,46 @@ namespace arm_compute { +template +int_vec_type convert_float_to_int(const float_vec_type &in); + +template +float_vec_type convert_int_to_float(const int_vec_type &in); + +template <> +uint8x16_t convert_float_to_int(const float32x4x4_t &in) +{ + uint8x16_t out; + convert_float32x4x4_to_uint8x16(in, out); + return out; +} + +template <> +int8x16_t convert_float_to_int(const float32x4x4_t &in) +{ + int8x16_t out; + convert_float32x4x4_to_int8x16(in, out); + return out; +} + +template <> +float32x4x4_t convert_int_to_float(const uint8x16_t &in) +{ + return convert_uint8x16_to_float32x4x4(in); +} + +template <> +float32x4x4_t convert_int_to_float(const int8x16_t &in) +{ + return convert_int8x16_to_float32x4x4(in); +} + namespace { Status validate_arguments_logits_1d_max(const ITensorInfo &input, const ITensorInfo &output) { ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); // Validate in case of configured output if(output.total_size() != 0) @@ -156,6 +190,9 @@ void NELogits1DMaxKernel::configure(const ITensor *input, ITensor *output) case DataType::QASYMM8: _func = &logits_1d_max; break; + case DataType::QASYMM8_SIGNED: + _func = &logits_1d_max; + break; #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: _func = &logits_1d_max; @@ -203,12 +240,12 @@ void NELogits1DMaxKernel::run(const Window &window, const ThreadInfo &info) namespace { Status validate_arguments_logits_softmax(const ITensorInfo &input, const ITensorInfo &max, - const ITensorInfo &output, const float beta, const ITensorInfo &tmp) + const ITensorInfo &output, const float beta, const ITensorInfo &tmp, bool is_log) { ARM_COMPUTE_UNUSED(beta); // Check input ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(input.data_type()); @@ -220,7 +257,7 @@ Status validate_arguments_logits_softmax(const ITensorInfo &input, const ITensor // Check output if configured if(output.total_size() != 0) { - const QuantizationInfo output_quantization = is_quantized_asymmetric ? QuantizationInfo(1.f / 256.f, 0) : output.quantization_info(); + const QuantizationInfo output_quantization = is_quantized_asymmetric ? arm_compute::get_softmax_output_quantization_info(input.data_type(), is_log) : output.quantization_info(); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input, &output); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&input, &output); ARM_COMPUTE_RETURN_ERROR_ON(output.quantization_info() != output_quantization); @@ -240,12 +277,12 @@ Status validate_arguments_logits_softmax(const ITensorInfo &input, const ITensor } std::pair validate_and_configure_window_logits_softmax(ITensorInfo &input, ITensorInfo &max, - ITensorInfo &output, ITensorInfo &tmp) + ITensorInfo &output, ITensorInfo &tmp, bool is_log) { const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(input.data_type()); // Output auto initialization if not yet initialized - const QuantizationInfo output_quantization = is_quantized_asymmetric ? QuantizationInfo(1.f / 256.f, 0) : output.quantization_info(); + const QuantizationInfo output_quantization = is_quantized_asymmetric ? arm_compute::get_softmax_output_quantization_info(input.data_type(), is_log) : output.quantization_info(); auto_init_if_empty(output, TensorInfo(input).set_quantization_info(output_quantization).reset_padding()); // Tmp auto initialization if not yet initialized @@ -269,9 +306,13 @@ std::pair validate_and_configure_window_logits_softmax(ITensorIn return std::make_pair(err, win); } -template +template void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *const tmp, ITensor &out, const float beta, const Window &window) { + static_assert(std::is_same::value + || std::is_same::value, + "quantized type should be either qasymm8_t or qasymm8_signed_t."); + const int start_x = in.info()->valid_region().anchor.x(); const int input_width = in.info()->valid_region().shape.x(); @@ -286,8 +327,8 @@ void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *cons execute_window_loop(window, [&](const Coordinates &) { /* Get pointers */ - const auto in_ptr = reinterpret_cast(in_it.ptr()) + start_x; - const auto out_ptr = reinterpret_cast(out_it.ptr()) + start_x; + const auto in_ptr = reinterpret_cast(in_it.ptr()) + start_x; + const auto out_ptr = reinterpret_cast(out_it.ptr()) + start_x; const auto tmp_ptr = reinterpret_cast(tmp); float sum{}; @@ -296,8 +337,8 @@ void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *cons /* Compute exponentials and sum */ { /* Get max value */ - const auto max_val = *reinterpret_cast(max_it.ptr()); - const auto vec_max = vdupq_n_u8(max_val); + const auto max_val = *reinterpret_cast(max_it.ptr()); + const auto vec_max = wrapper::vdup_n(max_val, wrapper::traits::vector_128_tag{}); /* Init sum to zero */ float32x4x4_t vec_sum = @@ -313,8 +354,8 @@ void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *cons for(; x <= (input_width - vec_size); x += vec_size) { auto vec_elements = wrapper::vloadq(in_ptr + x); - vec_elements = vsubq_u8(vec_max, vec_elements); - auto vec_elements_flt = convert_uint8x16_to_float32x4x4(vec_elements); + vec_elements = wrapper::vsub(vec_max, vec_elements); + auto vec_elements_flt = convert_int_to_float(vec_elements); if(is_log) { @@ -374,12 +415,14 @@ void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *cons /* Normalize exponentials */ { + constexpr bool is_qasymm8_signed = std::is_same::value; /* Loop over row and compute softmax */ int x = 0; for(; x <= (input_width - vec_size); x += vec_size) { + using int_vec_type = wrapper::traits::neon_vector_t; float32x4x4_t vec_in = vld4q_f32(tmp_ptr + x); - uint8x16_t normalized_value{}; + int_vec_type normalized_value{}; if(is_log) { const float32x4x4_t sub = @@ -389,31 +432,41 @@ void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *cons vsubq_f32(vec_in.val[2], vdupq_n_f32(sum)), vsubq_f32(vec_in.val[3], vdupq_n_f32(sum)), }; - convert_float32x4x4_to_unit8x16(sub, normalized_value); + normalized_value = convert_float_to_int(sub); } else { - const float32x4x4_t mul = + float32x4x4_t mul = { vmulq_f32(vec_in.val[0], vdupq_n_f32(sum_inversed)), vmulq_f32(vec_in.val[1], vdupq_n_f32(sum_inversed)), vmulq_f32(vec_in.val[2], vdupq_n_f32(sum_inversed)), vmulq_f32(vec_in.val[3], vdupq_n_f32(sum_inversed)), }; - convert_float32x4x4_to_unit8x16(mul, normalized_value); + + if(is_qasymm8_signed) + { + const auto offset_vec = wrapper::vdup_n(128.f, wrapper::traits::vector_128_tag{}); + mul.val[0] = wrapper::vsub(mul.val[0], offset_vec); + mul.val[1] = wrapper::vsub(mul.val[1], offset_vec); + mul.val[2] = wrapper::vsub(mul.val[2], offset_vec); + mul.val[3] = wrapper::vsub(mul.val[3], offset_vec); + } + + normalized_value = convert_float_to_int(mul); } - vst1q_u8(out_ptr + x, normalized_value); + wrapper::vstore(out_ptr + x, normalized_value); } /* Run remaining elements */ for(; x < input_width; ++x) { if(is_log) { - out_ptr[x] = utils::cast::saturate_cast(tmp_ptr[x] - sum); + out_ptr[x] = utils::cast::saturate_cast(tmp_ptr[x] - sum); } else { - out_ptr[x] = utils::cast::saturate_cast(tmp_ptr[x] * sum_inversed); + out_ptr[x] = utils::cast::saturate_cast((tmp_ptr[x] * sum_inversed) - (is_qasymm8_signed ? 128.f : 0)); } } } @@ -556,15 +609,18 @@ void NELogits1DSoftmaxKernel::configure(const ITensor *input, const ITen ARM_COMPUTE_ERROR_ON_NULLPTR(input, max, output, tmp); ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), max->info(), output->info(), tmp->info()); // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_logits_softmax(*input->info(), *max->info(), *output->info(), beta, *tmp->info())); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_logits_softmax(*input->info(), *max->info(), *output->info(), beta, *tmp->info(), IS_LOG)); // Configure kernel window - auto win_config = validate_and_configure_window_logits_softmax(*input->info(), *max->info(), *output->info(), *tmp->info()); + auto win_config = validate_and_configure_window_logits_softmax(*input->info(), *max->info(), *output->info(), *tmp->info(), IS_LOG); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); switch(input->info()->data_type()) { case DataType::QASYMM8: - _func = &logits_1d_softmax_qasymm8; + _func = &logits_1d_softmax_qasymm8; + break; + case DataType::QASYMM8_SIGNED: + _func = &logits_1d_softmax_qasymm8; break; #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC case DataType::F16: @@ -594,8 +650,8 @@ Status NELogits1DSoftmaxKernel::validate(const ITensorInfo *input, const { ARM_COMPUTE_ERROR_ON_NULLPTR(input, max, output, tmp); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_logits_softmax(*input, *max, *output, beta, *tmp)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_logits_softmax(*input->clone(), *max->clone(), *output->clone(), *tmp->clone()).first); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_logits_softmax(*input, *max, *output, beta, *tmp, IS_LOG)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_logits_softmax(*input->clone(), *max->clone(), *output->clone(), *tmp->clone(), IS_LOG).first); return Status{}; } diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index 7f8c622ef9..cbf7729bc3 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -48,6 +48,7 @@ RelativeTolerance tolerance_f16(half(0.2)); /** Tolerance for quantized operations */ constexpr AbsoluteTolerance tolerance_qasymm8(1); +constexpr AbsoluteTolerance tolerance_qasymm8_signed(1); /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", @@ -241,6 +242,28 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerQuantizedFixture, framew validate(Accessor(_target), _reference, tolerance_qasymm8); } TEST_SUITE_END() //QASYMM8 + +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), + framework::dataset::make("Beta", { 1.0f, 2.f }))), + framework::dataset::make("Axis", { 1 }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small4DShapes(), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), + framework::dataset::make("Beta", { 1.0f, 2.f }))), + framework::dataset::make("Axis", { 1, 2, 3 }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_signed); +} +TEST_SUITE_END() //QASYMM8_SIGNED + TEST_SUITE_END() //Quantized TEST_SUITE_END() //SoftmaxLayer -- cgit v1.2.1