From e7be8a072967f9ae547468a7625e11477ea32221 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Thu, 12 Dec 2019 16:16:09 +0000 Subject: COMPMID-2980 (Nightly) armv7a build failures Change-Id: I8c2a20fc345694d1ad6e0fe63e4f22fb73e6c1df Signed-off-by: Michalis Spyrou Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/2463 Tested-by: Arm Jenkins --- .../NEON/functions/NEDepthwiseConvolutionLayer.cpp | 6 +++--- src/runtime/NEON/functions/NEFullyConnectedLayer.cpp | 6 +++--- .../NEON/functions/NEGEMMConvolutionLayer.cpp | 20 +++++++++++--------- src/runtime/NEON/functions/NELSTMLayerQuantized.cpp | 8 ++++---- .../NEDepthwiseConvolutionAssemblyDispatch.cpp | 14 +++++++------- 5 files changed, 28 insertions(+), 26 deletions(-) (limited to 'src/runtime/NEON') diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp index 5e47dd56ae..ca4fe732a7 100644 --- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp @@ -202,9 +202,9 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal:: const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform(); const UniformQuantizationInfo oq_info = (output->info()->total_size() == 0) ? iq_info : output->info()->quantization_info().uniform(); - float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale; - int output_multiplier; - int output_shift; + float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale; + int32_t output_multiplier; + int32_t output_shift; quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); _output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, output_multiplier, output_shift, oq_info.offset); _accumulator.allocator()->allocate(); diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp index ee622f4699..b3b90f8599 100644 --- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp +++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp @@ -255,9 +255,9 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform(); const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform(); - float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale; - int output_multiplier; - int output_shift; + float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale; + int32_t output_multiplier; + int32_t output_shift; quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift); _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, oq_info.offset); _gemmlowp_output.allocator()->allocate(); diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp index bb9620b293..0507c6b2bd 100644 --- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp @@ -33,7 +33,8 @@ #include #include -using namespace arm_compute; +namespace arm_compute +{ using namespace arm_compute::misc::shape_calculator; NEConvolutionLayerReshapeWeights::NEConvolutionLayerReshapeWeights() @@ -131,11 +132,11 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w } // Merge activation with output stage - PixelValue type_min = 0; - PixelValue type_max = 0; + PixelValue type_min{}; + PixelValue type_max{}; std::tie(type_min, type_max) = get_min_max(data_type); - int min_activation = type_min.get(); - int max_activation = type_max.get(); + int32_t min_activation = type_min.get(); + int32_t max_activation = type_max.get(); if(supported_acts.count(act_info.activation()) != 0) { @@ -190,11 +191,11 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens const UniformQuantizationInfo uoqinfo = oqinfo.uniform(); // Merge activation with output stage - PixelValue type_min = 0; - PixelValue type_max = 0; + PixelValue type_min{}; + PixelValue type_max{}; std::tie(type_min, type_max) = get_min_max(data_type); - int min_activation = type_min.get(); - int max_activation = type_max.get(); + int32_t min_activation = type_min.get(); + int32_t max_activation = type_max.get(); const std::set supported_acts = { ActivationLayerInfo::ActivationFunction::RELU, ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, @@ -595,3 +596,4 @@ void NEGEMMConvolutionLayer::prepare() _is_prepared = true; } } +} // namespace arm_compute diff --git a/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp b/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp index cfd996b538..cdfc035400 100644 --- a/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp +++ b/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp @@ -136,8 +136,8 @@ void NELSTMLayerQuantized::configure(const ITensor *input, _output_lowp.allocator()->init(TensorInfo(_output_highp.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_3)); const float multiplier = 4096.f * qasymm.uniform().scale * qweights.uniform().scale; - int output_multiplier = 0; - int output_shift = 0; + int32_t output_multiplier = 0; + int32_t output_shift = 0; quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift); _memory_group.manage(&_output_lowp); @@ -342,8 +342,8 @@ Status NELSTMLayerQuantized::validate(const ITensorInfo *input, const TensorInfo output_lowp(output_highp.tensor_shape(), 1, DataType::QSYMM16, qsymm_3); const float multiplier = 4096.f * qasymm.uniform().scale * qweights.uniform().scale; - int output_multiplier = 0; - int output_shift = 0; + int32_t output_multiplier = 0; + int32_t output_shift = 0; ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift)); // _output_stage diff --git a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp index c564e22d46..3235eee19a 100644 --- a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp +++ b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp @@ -263,8 +263,8 @@ std::unique_ptr create_convolver(const ITensor // Calculate rescale parameters const float fmultipler = iqinfo.scale * wqinfo.scale / oqinfo.scale; - int qmultiplier = 0; - int qshift = 0; + int32_t qmultiplier = 0; + int32_t qshift = 0; quantization::calculate_quantized_multiplier_less_than_one(fmultipler, &qmultiplier, &qshift); qasymm8::QAsymm8RescaleParams rescale_params(qshift, qmultiplier, fmultipler); @@ -285,15 +285,15 @@ std::unique_ptr create_convolver(const ITensor const qasymm8::QAsymm8Params oqinfo{ static_cast(output_qinfo.offset), output_qinfo.scale }; // Calculate rescale parameters - std::vector fmultipliers; - std::vector qmultipliers; - std::vector qshifts; + std::vector fmultipliers; + std::vector qmultipliers; + std::vector qshifts; for(auto const s : wqinfo.scales) { const float fmultipler = iqinfo.scale * s / oqinfo.scale; - int qmultiplier = 0; - int qshift = 0; + int32_t qmultiplier = 0; + int32_t qshift = 0; quantization::calculate_quantized_multiplier_less_than_one(fmultipler, &qmultiplier, &qshift); fmultipliers.push_back(fmultipler); qmultipliers.push_back(qmultiplier); -- cgit v1.2.1