From 0d008f77b0085619c446d0ab5dc1228a80776706 Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Fri, 13 Mar 2020 14:56:05 +0000 Subject: COMPMID-3281: Implement QSYMM16 Layer Normalization for NEON QLSTM - Reference kernel is modified to use the same algorithm as NEON kernel. - NEON kernel is implemented. - Tests for validation and run are added. Change-Id: I3533bc2bd12c6e9cc75d837ecf193f74ceddf796 Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2948 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio --- .../reference/QLSTMLayerNormalization.cpp | 74 ++++++++++++---------- 1 file changed, 40 insertions(+), 34 deletions(-) (limited to 'tests/validation/reference/QLSTMLayerNormalization.cpp') diff --git a/tests/validation/reference/QLSTMLayerNormalization.cpp b/tests/validation/reference/QLSTMLayerNormalization.cpp index 0e24de6584..dd6517f81f 100644 --- a/tests/validation/reference/QLSTMLayerNormalization.cpp +++ b/tests/validation/reference/QLSTMLayerNormalization.cpp @@ -26,10 +26,9 @@ #include "ArithmeticOperations.h" #include "MeanStdDevNormalizationLayer.h" #include "PixelWiseMultiplication.h" +#include "arm_compute/core/utils/misc/Utility.h" #include "src/core/utils/quantization/AsymmHelpers.cpp" -#include "support/ToolchainSupport.h" - namespace arm_compute { namespace test @@ -38,53 +37,60 @@ namespace validation { namespace reference { -SimpleTensor qlstm_layer_normalization_float_compute(SimpleTensor src, SimpleTensor weight, SimpleTensor bias) -{ - SimpleTensor output = mean_std_normalization_layer(src); - output = pixel_wise_multiplication(output, weight, 1, ConvertPolicy::SATURATE, RoundingPolicy::TO_ZERO, DataType::F32); - return arithmetic_operation(ArithmeticOperation::ADD, output, bias, DataType::F32, ConvertPolicy::SATURATE); -} - SimpleTensor qlstm_layer_normalization(const SimpleTensor &src, const SimpleTensor &weight, const SimpleTensor &bias) { ARM_COMPUTE_ERROR_ON(src.shape().num_dimensions() > 2); + SimpleTensor output{ src.shape(), DataType::QSYMM16 }; - SimpleTensor converted_src{ src.shape(), DataType::F32 }; - SimpleTensor converted_weight{ weight.shape(), DataType::F32 }; - SimpleTensor converted_bias{ bias.shape(), DataType::F32 }; - - const auto iq_info = src.quantization_info().uniform(); + const auto wq_info = weight.quantization_info().uniform(); int output_multiplier{}; int output_shift{}; - quantization::calculate_quantized_multiplier(iq_info.scale, &output_multiplier, &output_shift); - - const float layer_norm_scale = output_multiplier * std::pow(2, static_cast(output_shift - 31)); - const float bias_scale = std::pow(2., -10) * layer_norm_scale; + const auto s = quantization::calculate_quantized_multiplier(wq_info.scale, &output_multiplier, &output_shift); + output_shift *= -1; - for(int i = 0; i < src.num_elements(); i++) + if(!bool(s)) { - converted_src[i] = static_cast(src[i]); + output_multiplier = 0; + output_shift = 0; } - for(int i = 0; i < bias.num_elements(); i++) - { - converted_bias[i] = static_cast(bias[i]) * bias_scale; - } + const uint32_t num_batch = src.shape()[1]; + const uint32_t num_input = src.shape()[0]; - for(int i = 0; i < weight.num_elements(); i++) + for(uint32_t batch_idx = 0; batch_idx < num_batch; ++batch_idx) { - converted_weight[i] = weight[i] * layer_norm_scale; - } + int64_t sum{}; + int64_t sum_sq{}; - SimpleTensor output_float = qlstm_layer_normalization_float_compute(converted_src, converted_weight, converted_bias); - SimpleTensor output{ output_float.shape(), DataType::QSYMM16 }; + for(uint32_t input_idx = 0; input_idx < num_input; ++input_idx) + { + const auto index = batch_idx * num_input + input_idx; + const auto val = static_cast(src[index]); + sum += val; + sum_sq += val * val; + } - for(int i = 0; i < output.num_elements(); i++) - { - const auto output_val_s32 = static_cast(support::cpp11::round(output_float[i] * std::pow(2, 12))); - output[i] = utility::clamp(output_val_s32, std::numeric_limits::min()); - } + const auto temp = static_cast(0x100000) / num_input; + const auto mean = sum * 1024 / static_cast(num_input); + const auto variance = ((sum_sq * temp) - (mean * mean)) / 0x100000; + + int32_t stddev_invsqrt_mul{}; + int32_t stddev_invsqrt_shift{}; + quantization::get_invsqrt_quantized_multiplier_exp(variance, -1, stddev_invsqrt_mul, stddev_invsqrt_shift); + for(uint32_t input_idx = 0; input_idx < num_input; ++input_idx) + { + const auto index = batch_idx * num_input + input_idx; + const auto val = static_cast(src[index]); + const auto shifted = (val << 10) - mean; + const auto rescaled = quantization::multiply_by_quantized_multiplier(shifted, stddev_invsqrt_mul, stddev_invsqrt_shift); + const int64_t weighted = rescaled * weight[input_idx] + bias[input_idx]; + const auto reverse_shifted = static_cast((weighted + 512) >> 10); + auto out_val = quantization::multiply_by_quantized_multiplier(reverse_shifted, output_multiplier, output_shift + 12); + out_val = arm_compute::utility::clamp(out_val, std::numeric_limits::min()); + output[index] = static_cast(out_val); + } + } return output; } } // namespace reference -- cgit v1.2.1