diff options
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp | 10 | ||||
-rw-r--r-- | src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp | 5 |
2 files changed, 9 insertions, 6 deletions
diff --git a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp index b9767e8ec2..d9da3cb36e 100644 --- a/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp +++ b/src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.cpp @@ -31,11 +31,17 @@ namespace arm_compute { namespace { +QuantizationInfo compute_output_qinfo() +{ + return QuantizationInfo(1.f / 4096); +} + std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output, *input); + output->set_quantization_info(compute_output_qinfo()); const uint32_t temp_num_elems_processed_per_iteration = max_cl_vector_width / input->element_size(); /* If width is less then step, then make step same as width to avoid global size being step instead of actual width. */ @@ -48,7 +54,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen return std::make_pair(Status{}, win); } -Status validate_arguments(const ITensorInfo *input, ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weight, bias, output); @@ -129,7 +135,7 @@ void CLQLSTMLayerNormalizationKernel::configure(const ICLTensor *input, ICLTenso configure(CLKernelLibrary::get().get_compile_context(), input, output, weight, bias); } -Status CLQLSTMLayerNormalizationKernel::validate(const ITensorInfo *input, ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias) +Status CLQLSTMLayerNormalizationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias) { ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, weight, bias)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first); diff --git a/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp b/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp index e966c6bdba..29ffee867b 100644 --- a/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp +++ b/src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.cpp @@ -172,10 +172,7 @@ void NEQLSTMLayerNormalizationKernel::run(const Window &window, const ThreadInfo inline QuantizationInfo NEQLSTMLayerNormalizationKernel::compute_output_qinfo() { - const UniformQuantizationInfo iq_info = _input->info()->quantization_info().uniform(); - const UniformQuantizationInfo wq_info = _weight->info()->quantization_info().uniform(); - const float output_scale = (wq_info.scale * iq_info.scale) * 1024; - return QuantizationInfo(output_scale); + return QuantizationInfo(1.f / 4096); } inline std::pair<int64_t, int64_t> NEQLSTMLayerNormalizationKernel::sum_qsymm16(const int16_t *input_ptr) |