aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp10
-rw-r--r--src/runtime/NEON/functions/NEFullyConnectedLayer.cpp8
2 files changed, 9 insertions, 9 deletions
diff --git a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp
index 86abb2d65c..5d2df6d2c9 100644
--- a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -909,11 +909,11 @@ get_configured_function(const ITensor *mm_result, const ITensor *vector_sum_row,
&& mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
// Check if we need to clamp the result using min and max
- PixelValue type_min = 0;
- PixelValue type_max = 0;
+ PixelValue type_min{};
+ PixelValue type_max{};
std::tie(type_min, type_max) = get_min_max(output->info()->data_type());
- int type_min_int = type_min.get<int>();
- int type_max_int = type_max.get<int>();
+ int32_t type_min_int = type_min.get<int32_t>();
+ int32_t type_max_int = type_max.get<int32_t>();
const bool is_bounded_relu = !(output_stage.gemmlowp_min_bound == type_min_int && output_stage.gemmlowp_max_bound == type_max_int);
// Check if we need to perform fixed point requantization
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index 01746eb3db..4c264e4832 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -357,9 +357,9 @@ Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorIn
const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
- float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
- int output_multiplier;
- int output_shift;
+ float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
+ int32_t output_multiplier;
+ int32_t output_shift;
ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(&gemmlowp_output, biases, output));
}