aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-12-12 16:16:09 +0000
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-12-19 16:58:40 +0000
commite7be8a072967f9ae547468a7625e11477ea32221 (patch)
tree1ea27ef9ac9d4896decfac4e5431e80ec84e3885 /src/runtime/NEON
parent62bdd8c4d605d75214ac3ca674cd647911ea9bbc (diff)
downloadComputeLibrary-e7be8a072967f9ae547468a7625e11477ea32221.tar.gz
COMPMID-2980 (Nightly) armv7a build failures
Change-Id: I8c2a20fc345694d1ad6e0fe63e4f22fb73e6c1df Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/2463 Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/NEON')
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp6
-rw-r--r--src/runtime/NEON/functions/NEFullyConnectedLayer.cpp6
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp20
-rw-r--r--src/runtime/NEON/functions/NELSTMLayerQuantized.cpp8
-rw-r--r--src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp14
5 files changed, 28 insertions, 26 deletions
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index 5e47dd56ae..ca4fe732a7 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -202,9 +202,9 @@ void NEDepthwiseConvolutionLayer::NEDepthwiseConvolutionLayerOptimizedInternal::
const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
const UniformQuantizationInfo oq_info = (output->info()->total_size() == 0) ? iq_info : output->info()->quantization_info().uniform();
- float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
- int output_multiplier;
- int output_shift;
+ float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
+ int32_t output_multiplier;
+ int32_t output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, output_multiplier, output_shift, oq_info.offset);
_accumulator.allocator()->allocate();
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index ee622f4699..b3b90f8599 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -255,9 +255,9 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh
const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
- float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
- int output_multiplier;
- int output_shift;
+ float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
+ int32_t output_multiplier;
+ int32_t output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, oq_info.offset);
_gemmlowp_output.allocator()->allocate();
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index bb9620b293..0507c6b2bd 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -33,7 +33,8 @@
#include <set>
#include <tuple>
-using namespace arm_compute;
+namespace arm_compute
+{
using namespace arm_compute::misc::shape_calculator;
NEConvolutionLayerReshapeWeights::NEConvolutionLayerReshapeWeights()
@@ -131,11 +132,11 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
}
// Merge activation with output stage
- PixelValue type_min = 0;
- PixelValue type_max = 0;
+ PixelValue type_min{};
+ PixelValue type_max{};
std::tie(type_min, type_max) = get_min_max(data_type);
- int min_activation = type_min.get<int>();
- int max_activation = type_max.get<int>();
+ int32_t min_activation = type_min.get<int32_t>();
+ int32_t max_activation = type_max.get<int32_t>();
if(supported_acts.count(act_info.activation()) != 0)
{
@@ -190,11 +191,11 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens
const UniformQuantizationInfo uoqinfo = oqinfo.uniform();
// Merge activation with output stage
- PixelValue type_min = 0;
- PixelValue type_max = 0;
+ PixelValue type_min{};
+ PixelValue type_max{};
std::tie(type_min, type_max) = get_min_max(data_type);
- int min_activation = type_min.get<int>();
- int max_activation = type_max.get<int>();
+ int32_t min_activation = type_min.get<int32_t>();
+ int32_t max_activation = type_max.get<int32_t>();
const std::set<ActivationLayerInfo::ActivationFunction> supported_acts = { ActivationLayerInfo::ActivationFunction::RELU,
ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
@@ -595,3 +596,4 @@ void NEGEMMConvolutionLayer::prepare()
_is_prepared = true;
}
}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp b/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp
index cfd996b538..cdfc035400 100644
--- a/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp
+++ b/src/runtime/NEON/functions/NELSTMLayerQuantized.cpp
@@ -136,8 +136,8 @@ void NELSTMLayerQuantized::configure(const ITensor *input,
_output_lowp.allocator()->init(TensorInfo(_output_highp.info()->tensor_shape(), 1, DataType::QSYMM16, qsymm_3));
const float multiplier = 4096.f * qasymm.uniform().scale * qweights.uniform().scale;
- int output_multiplier = 0;
- int output_shift = 0;
+ int32_t output_multiplier = 0;
+ int32_t output_shift = 0;
quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
_memory_group.manage(&_output_lowp);
@@ -342,8 +342,8 @@ Status NELSTMLayerQuantized::validate(const ITensorInfo *input,
const TensorInfo output_lowp(output_highp.tensor_shape(), 1, DataType::QSYMM16, qsymm_3);
const float multiplier = 4096.f * qasymm.uniform().scale * qweights.uniform().scale;
- int output_multiplier = 0;
- int output_shift = 0;
+ int32_t output_multiplier = 0;
+ int32_t output_shift = 0;
ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
// _output_stage
diff --git a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
index c564e22d46..3235eee19a 100644
--- a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
@@ -263,8 +263,8 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> create_convolver(const ITensor
// Calculate rescale parameters
const float fmultipler = iqinfo.scale * wqinfo.scale / oqinfo.scale;
- int qmultiplier = 0;
- int qshift = 0;
+ int32_t qmultiplier = 0;
+ int32_t qshift = 0;
quantization::calculate_quantized_multiplier_less_than_one(fmultipler, &qmultiplier, &qshift);
qasymm8::QAsymm8RescaleParams rescale_params(qshift, qmultiplier, fmultipler);
@@ -285,15 +285,15 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> create_convolver(const ITensor
const qasymm8::QAsymm8Params oqinfo{ static_cast<uint8_t>(output_qinfo.offset), output_qinfo.scale };
// Calculate rescale parameters
- std::vector<float> fmultipliers;
- std::vector<int> qmultipliers;
- std::vector<int> qshifts;
+ std::vector<float> fmultipliers;
+ std::vector<int32_t> qmultipliers;
+ std::vector<int32_t> qshifts;
for(auto const s : wqinfo.scales)
{
const float fmultipler = iqinfo.scale * s / oqinfo.scale;
- int qmultiplier = 0;
- int qshift = 0;
+ int32_t qmultiplier = 0;
+ int32_t qshift = 0;
quantization::calculate_quantized_multiplier_less_than_one(fmultipler, &qmultiplier, &qshift);
fmultipliers.push_back(fmultipler);
qmultipliers.push_back(qmultiplier);