aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-04-26 14:54:54 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-05-01 10:06:58 +0000
commita4f378dcd39addd4a63db1c0848f2c120804f4eb (patch)
tree6fa8a0071bef32d2bdef0e5469678a7cfecea348 /src/core/NEON/kernels
parent8ec0bb6d9027bb7505d6fa0eada42a52c6e1073b (diff)
downloadComputeLibrary-a4f378dcd39addd4a63db1c0848f2c120804f4eb.tar.gz
COMPMID-1995: Fix clang-tidy warnings
- Remove VirtualCall checks - Fix some unused variables errors - Use std::array insted of C style arrays - Various fixes Change-Id: Ife6170b7102de42b8f04e298dcf8476bf90779f0 Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/1049 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'src/core/NEON/kernels')
-rw-r--r--src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEAccumulateKernel.cpp10
-rw-r--r--src/core/NEON/kernels/NEActivationLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp24
-rw-r--r--src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp28
-rw-r--r--src/core/NEON/kernels/NEBitwiseAndKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEBitwiseNotKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEBitwiseOrKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEBitwiseXorKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEBox3x3Kernel.cpp6
-rw-r--r--src/core/NEON/kernels/NECannyEdgeKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEChannelCombineKernel.cpp12
-rw-r--r--src/core/NEON/kernels/NEChannelExtractKernel.cpp10
-rw-r--r--src/core/NEON/kernels/NEConvolutionKernel.cpp40
-rw-r--r--src/core/NEON/kernels/NECopyKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp28
-rw-r--r--src/core/NEON/kernels/NEDequantizationLayerKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEDerivativeKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp12
-rw-r--r--src/core/NEON/kernels/NEElementwiseOperationKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEFastCornersKernel.cpp74
-rw-r--r--src/core/NEON/kernels/NEFillInnerBorderKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEFloorKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEGaussian3x3Kernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEGaussian5x5Kernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEGaussianPyramidKernel.cpp10
-rw-r--r--src/core/NEON/kernels/NEHOGDescriptorKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEHarrisCornersKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEIntegralImageKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEMagnitudePhaseKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEMeanStdDevKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEMedian3x3Kernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEMemsetKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEMinMaxLayerKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEMinMaxLocationKernel.cpp12
-rw-r--r--src/core/NEON/kernels/NENonLinearFilterKernel.cpp38
-rw-r--r--src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEQuantizationLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEScharr3x3Kernel.cpp8
-rw-r--r--src/core/NEON/kernels/NESelectKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NESobel3x3Kernel.cpp10
-rw-r--r--src/core/NEON/kernels/NESobel5x5Kernel.cpp14
-rw-r--r--src/core/NEON/kernels/NESobel7x7Kernel.cpp14
-rw-r--r--src/core/NEON/kernels/NETableLookupKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEThresholdKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEUpsampleLayerKernel.cpp12
56 files changed, 278 insertions, 280 deletions
diff --git a/src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp b/src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp
index e0c2891592..62285e0578 100644
--- a/src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp
+++ b/src/core/NEON/kernels/NEAbsoluteDifferenceKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ void abs_diff_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, con
Iterator input2(in2, window);
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t input1_val = vld1q_u8(input1.ptr());
const uint8x16_t input2_val = vld1q_u8(input2.ptr());
@@ -78,7 +78,7 @@ void abs_diff_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out,
Iterator input2(in2, window);
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
int16x8x2_t input1_val = vld2q_s16(reinterpret_cast<const int16_t *>(input1.ptr()));
int16x8x2_t input2_val = vld2q_s16(reinterpret_cast<const int16_t *>(input2.ptr()));
@@ -93,7 +93,7 @@ void abs_diff_U8_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, c
Iterator input2(in2, window);
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t input1_val = vld1q_u8(input1.ptr());
const int16x8x2_t input2_val =
diff --git a/src/core/NEON/kernels/NEAccumulateKernel.cpp b/src/core/NEON/kernels/NEAccumulateKernel.cpp
index dae08008fd..d601adc1af 100644
--- a/src/core/NEON/kernels/NEAccumulateKernel.cpp
+++ b/src/core/NEON/kernels/NEAccumulateKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -126,7 +126,7 @@ void NEAccumulateWeightedFP16Kernel::run(const Window &window, const ThreadInfo
const float16x8_t scale_val = vdupq_n_f16(1.f - _alpha);
const float16x8_t scale_val2 = vdupq_n_f16(_alpha);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
fp16::acc_we_v16_u8(input.ptr(), accum.ptr(), scale_val, scale_val2);
},
@@ -271,7 +271,7 @@ void NEAccumulateKernel::run(const Window &window, const ThreadInfo &info)
Iterator input(_input, window);
Iterator accum(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
acc_v16_u8(input.ptr(), accum.ptr());
},
@@ -314,7 +314,7 @@ void NEAccumulateWeightedKernel::run(const Window &window, const ThreadInfo &inf
const float32x4_t scale_val = vdupq_n_f32(1.f - _alpha);
const float32x4_t scale_val2 = vdupq_n_f32(_alpha);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
acc_we_v16_u8(input.ptr(), accum.ptr(), scale_val, scale_val2);
},
@@ -353,7 +353,7 @@ void NEAccumulateSquaredKernel::run(const Window &window, const ThreadInfo &info
Iterator input(_input, window);
Iterator accum(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
acc_sq_v16_u8(input.ptr(), _shift, accum.ptr());
},
diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
index cf31cb841a..8de8db9ad9 100644
--- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
@@ -195,7 +195,7 @@ NEActivationLayerKernel::activation(const Window &window)
const auto a = static_cast<T>(_act_info.a());
const auto b = static_cast<T>(_act_info.b());
- execute_window_loop(win_collapsed, [&](const Coordinates & id)
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
{
const auto input_ptr = reinterpret_cast<const T *>(input.ptr());
const auto output_ptr = reinterpret_cast<T *>(output.ptr());
@@ -327,7 +327,7 @@ typename std::enable_if<std::is_same<T, qasymm8_t>::value, void>::type NEActivat
float32x4_t vs = vdupq_n_f32(s);
float32x4_t vo = vdupq_n_f32(o);
- execute_window_loop(win_collapsed, [&](const Coordinates & id)
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
{
const auto input_ptr = reinterpret_cast<const T *>(input.ptr());
const auto output_ptr = reinterpret_cast<T *>(output.ptr());
diff --git a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
index ffa578f40e..ca79a0a419 100644
--- a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
+++ b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
@@ -84,7 +84,7 @@ void add_same(const ITensor *in1, const ITensor *in2, ITensor *out, ConvertPolic
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const T *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<T *>(output.ptr());
@@ -120,7 +120,7 @@ void add_same(const ITensor *in1, const ITensor *in2, ITensor *out, ConvertPolic
Iterator input2(in2, input2_win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const T *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const T *>(input2.ptr());
@@ -165,8 +165,8 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor
const auto window_end_x = static_cast<int>(window.x().end());
const bool is_broadcast_across_x = (input1_win.x().step() == 0) || (input2_win.x().step() == 0);
- const float output_scale = out->info()->quantization_info().scale;
- const int output_offset = out->info()->quantization_info().offset;
+ const float output_scale = out->info()->quantization_info().scale;
+ const int output_offset = out->info()->quantization_info().offset;
const float32x4_t vscale1 = vdupq_n_f32(in1->info()->quantization_info().scale);
const float32x4_t vscale2 = vdupq_n_f32(in2->info()->quantization_info().scale);
@@ -192,7 +192,7 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
@@ -234,7 +234,7 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor
vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#else //__aarch64__
+#else //__aarch64__
vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
@@ -252,7 +252,7 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor
for(; x < window_end_x; ++x)
{
const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale;
- *(output_ptr + x) = out->info()->quantization_info().quantize((afs + bfs),RoundingPolicy::TO_NEAREST_UP);
+ *(output_ptr + x) = out->info()->quantization_info().quantize((afs + bfs), RoundingPolicy::TO_NEAREST_UP);
}
},
broadcast_input, non_broadcast_input, output);
@@ -270,7 +270,7 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor
Iterator input2(in2, input2_win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
@@ -311,7 +311,7 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor
vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
vcvtnq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[3], bf.val[3]), invvscaleo)),
-#else //__aarch64__
+#else //__aarch64__
vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[0], bf.val[0]), invvscaleo)),
vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[1], bf.val[1]), invvscaleo)),
vcvtq_s32_f32(vmlaq_f32(voffseto, vaddq_f32(af.val[2], bf.val[2]), invvscaleo)),
@@ -330,7 +330,7 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor
{
const float afs = static_cast<int32_t>((*(input1_ptr + x)) - input1_qinfo.offset) * input1_qinfo.scale;
const float bfs = static_cast<int32_t>((*(input2_ptr + x)) - input2_qinfo.offset) * input2_qinfo.scale;
- *(output_ptr + x) = out->info()->quantization_info().quantize((afs + bfs),RoundingPolicy::TO_NEAREST_UP);
+ *(output_ptr + x) = out->info()->quantization_info().quantize((afs + bfs), RoundingPolicy::TO_NEAREST_UP);
}
},
input1, input2, output);
@@ -357,7 +357,7 @@ void add_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, Conver
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
@@ -427,7 +427,7 @@ void add_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, Convert
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
diff --git a/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp b/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
index ff5893de96..45e1562d8d 100644
--- a/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
+++ b/src/core/NEON/kernels/NEArithmeticSubtractionKernel.cpp
@@ -55,7 +55,7 @@ void sub_wrap_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, con
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t ta1 = vld1q_u8(input1.ptr());
const uint8x16_t ta2 = vld1q_u8(input2.ptr());
@@ -71,7 +71,7 @@ void sub_saturate_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out,
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t ta1 = vld1q_u8(input1.ptr());
const uint8x16_t ta2 = vld1q_u8(input2.ptr());
@@ -87,7 +87,7 @@ void sub_saturate_QAYSMM8_QAYSMM8_QAYSMM8(const ITensor *in1, const ITensor *in2
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float32x4x4_t ta1 = vdequantize(vld1q_u8(reinterpret_cast<const qasymm8_t *>(input1.ptr())), in1->info()->quantization_info());
const float32x4x4_t ta2 = vdequantize(vld1q_u8(reinterpret_cast<const qasymm8_t *>(input2.ptr())), in2->info()->quantization_info());
@@ -115,7 +115,7 @@ void sub_wrap_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out,
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const int16x8x2_t ta1 = vld2q_s16(reinterpret_cast<const int16_t *>(input1.ptr()));
const int16x8x2_t ta2 = vld2q_s16(reinterpret_cast<const int16_t *>(input2.ptr()));
@@ -139,7 +139,7 @@ void sub_saturate_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *o
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const int16x8x2_t ta1 = vld2q_s16(reinterpret_cast<const int16_t *>(input1.ptr()));
const int16x8x2_t ta2 = vld2q_s16(reinterpret_cast<const int16_t *>(input2.ptr()));
@@ -179,7 +179,7 @@ void sub_F16_F16_F16(const ITensor *in1, const ITensor *in2, ITensor *out, const
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float16x8x2_t a = vld2q_f16(reinterpret_cast<const float16_t *>(input1.ptr()));
const float16x8x2_t b = vld2q_f16(reinterpret_cast<const float16_t *>(input2.ptr()));
@@ -202,7 +202,7 @@ void sub_F32_F32_F32(const ITensor *in1, const ITensor *in2, ITensor *out, const
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float32x4x4_t ta1 = vld4q_f32(reinterpret_cast<const float *>(input1.ptr()));
const float32x4x4_t ta2 = vld4q_f32(reinterpret_cast<const float *>(input2.ptr()));
@@ -227,7 +227,7 @@ void sub_wrap_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, c
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t bv_0 = vld1q_u8(input2.ptr());
int16x8_t a1_0 = vld1q_s16(reinterpret_cast<const int16_t *>(input1.ptr()));
@@ -248,7 +248,7 @@ void sub_saturate_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *ou
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t bv_0 = vld1q_u8(input2.ptr());
int16x8_t a1_0 = vld1q_s16(reinterpret_cast<const int16_t *>(input1.ptr()));
@@ -269,7 +269,7 @@ void sub_wrap_U8_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, c
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t bv_0 = vld1q_u8(input1.ptr());
int16x8_t a1_0 = vld1q_s16(reinterpret_cast<const int16_t *>(input2.ptr()));
@@ -290,7 +290,7 @@ void sub_saturate_U8_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *ou
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t bv_0 = vld1q_u8(input1.ptr());
int16x8_t a1_0 = vld1q_s16(reinterpret_cast<const int16_t *>(input2.ptr()));
@@ -311,7 +311,7 @@ void sub_wrap_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, co
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t av_0 = vld1q_u8(input1.ptr());
const uint8x16_t bv_0 = vld1q_u8(input2.ptr());
@@ -333,7 +333,7 @@ void sub_saturate_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out
Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t av_0 = vld1q_u8(input1.ptr());
const uint8x16_t bv_0 = vld1q_u8(input2.ptr());
@@ -515,5 +515,5 @@ BorderSize NEArithmeticSubtractionKernel::border_size() const
{
const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
- return BorderSize(0, border, 0, 0);
+ return BorderSize{ 0, border, 0, 0 };
} \ No newline at end of file
diff --git a/src/core/NEON/kernels/NEBitwiseAndKernel.cpp b/src/core/NEON/kernels/NEBitwiseAndKernel.cpp
index ed83286acf..71312a923d 100644
--- a/src/core/NEON/kernels/NEBitwiseAndKernel.cpp
+++ b/src/core/NEON/kernels/NEBitwiseAndKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -106,7 +106,7 @@ void NEBitwiseAndKernel::run(const Window &window, const ThreadInfo &info)
Iterator input2(_input2, window);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
bitwise_and<uint8_t>(input1.ptr(), input2.ptr(), output.ptr());
},
diff --git a/src/core/NEON/kernels/NEBitwiseNotKernel.cpp b/src/core/NEON/kernels/NEBitwiseNotKernel.cpp
index 08d7fe2610..5791dcc704 100644
--- a/src/core/NEON/kernels/NEBitwiseNotKernel.cpp
+++ b/src/core/NEON/kernels/NEBitwiseNotKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -89,7 +89,7 @@ void NEBitwiseNotKernel::run(const Window &window, const ThreadInfo &info)
Iterator input(_input, window);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
bitwise_not_U8_U8(input.ptr(), output.ptr());
},
diff --git a/src/core/NEON/kernels/NEBitwiseOrKernel.cpp b/src/core/NEON/kernels/NEBitwiseOrKernel.cpp
index 1b17cc283c..8aed9bb6da 100644
--- a/src/core/NEON/kernels/NEBitwiseOrKernel.cpp
+++ b/src/core/NEON/kernels/NEBitwiseOrKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -102,7 +102,7 @@ void NEBitwiseOrKernel::run(const Window &window, const ThreadInfo &info)
Iterator input2(_input2, window);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
bitwise_or_U8_U8_U8(input1.ptr(), input2.ptr(), output.ptr());
},
diff --git a/src/core/NEON/kernels/NEBitwiseXorKernel.cpp b/src/core/NEON/kernels/NEBitwiseXorKernel.cpp
index 9451e8a08d..e2dcb95f64 100644
--- a/src/core/NEON/kernels/NEBitwiseXorKernel.cpp
+++ b/src/core/NEON/kernels/NEBitwiseXorKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,7 +98,7 @@ void NEBitwiseXorKernel::run(const Window &window, const ThreadInfo &info)
Iterator input2(_input2, window);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
bitwise_xor_U8_U8_U8(input1.ptr(), input2.ptr(), output.ptr());
},
diff --git a/src/core/NEON/kernels/NEBox3x3Kernel.cpp b/src/core/NEON/kernels/NEBox3x3Kernel.cpp
index 0c9700526b..7a53f93b86 100644
--- a/src/core/NEON/kernels/NEBox3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEBox3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ void NEBox3x3FP16Kernel::run(const Window &window, const ThreadInfo &info)
const float16x8_t oneovernine = vdupq_n_f16(1.0f / 9.0f);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
@@ -160,7 +160,7 @@ void NEBox3x3Kernel::run(const Window &window, const ThreadInfo &info)
const float32x4_t oneovernine = vdupq_n_f32(1.0f / 9.0f);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
diff --git a/src/core/NEON/kernels/NECannyEdgeKernel.cpp b/src/core/NEON/kernels/NECannyEdgeKernel.cpp
index fa51a7bb0b..8d822bd701 100644
--- a/src/core/NEON/kernels/NECannyEdgeKernel.cpp
+++ b/src/core/NEON/kernels/NECannyEdgeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -950,7 +950,7 @@ void NEGradientKernel::run(const Window &window, const ThreadInfo &info)
Iterator magnitude(_magnitude, window);
Iterator phase(_phase, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
(*_func)(gx.ptr(), gy.ptr(), magnitude.ptr(), phase.ptr());
},
@@ -1034,7 +1034,7 @@ void NEEdgeNonMaxSuppressionKernel::run(const Window &window, const ThreadInfo &
const size_t input1_stride = _magnitude->info()->strides_in_bytes()[1];
const size_t input1_stride_ushort = input1_stride / data_size_from_type(_magnitude->info()->data_type());
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
(*_func)(magnitude.ptr(), phase.ptr(), output.ptr(), input1_stride_ushort, _lower_thr, _upper_thr);
},
@@ -1113,7 +1113,7 @@ void NEEdgeTraceKernel::run(const Window &window, const ThreadInfo &info)
const size_t input_stride = _input->info()->strides_in_bytes()[1];
const size_t output_stride = _output->info()->strides_in_bytes()[1];
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
edge_trace_U8_U8(input.ptr(), output.ptr(), input_stride, output_stride);
},
diff --git a/src/core/NEON/kernels/NEChannelCombineKernel.cpp b/src/core/NEON/kernels/NEChannelCombineKernel.cpp
index 28fb4bdb10..539154d671 100644
--- a/src/core/NEON/kernels/NEChannelCombineKernel.cpp
+++ b/src/core/NEON/kernels/NEChannelCombineKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -286,7 +286,7 @@ void NEChannelCombineKernel::combine_3C(const Window &win)
Iterator p2(_planes[2], win);
Iterator out(_output, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto p0_ptr = static_cast<uint8_t *>(p0.ptr());
const auto p1_ptr = static_cast<uint8_t *>(p1.ptr());
@@ -315,7 +315,7 @@ void NEChannelCombineKernel::combine_4C(const Window &win)
Iterator p3(_planes[3], win);
Iterator out(_output, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto p0_ptr = static_cast<uint8_t *>(p0.ptr());
const auto p1_ptr = static_cast<uint8_t *>(p1.ptr());
@@ -353,7 +353,7 @@ void NEChannelCombineKernel::combine_YUV_1p(const Window &win)
constexpr auto shift = is_uyvy ? 1 : 0;
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto p0_ptr = static_cast<uint8_t *>(p0.ptr());
const auto p1_ptr = static_cast<uint8_t *>(p1.ptr());
@@ -409,7 +409,7 @@ void NEChannelCombineKernel::combine_YUV_2p(const Window &win)
// Increase step size after iterator is created to calculate stride correctly for multi channel format
out_win.set_dimension_step(Window::DimX, out_win.x().step() * _x_subsampling[1]);
- execute_window_loop(out_win, [&](const Coordinates & id)
+ execute_window_loop(out_win, [&](const Coordinates &)
{
const uint8x8x2_t pixels =
{
@@ -444,7 +444,7 @@ void NEChannelCombineKernel::copy_plane(const Window &win, uint32_t plane_id)
Iterator in(_planes[plane_id], tmp_win);
Iterator out(_output_multi->plane(plane_id), tmp_win);
- execute_window_loop(tmp_win, [&](const Coordinates & id)
+ execute_window_loop(tmp_win, [&](const Coordinates &)
{
const uint8x8_t pixels = vld1_u8(in.ptr());
diff --git a/src/core/NEON/kernels/NEChannelExtractKernel.cpp b/src/core/NEON/kernels/NEChannelExtractKernel.cpp
index 98b2f280d4..61e1304963 100644
--- a/src/core/NEON/kernels/NEChannelExtractKernel.cpp
+++ b/src/core/NEON/kernels/NEChannelExtractKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -192,7 +192,7 @@ void NEChannelExtractKernel::extract_1C_from_2C_img(const Window &win)
Iterator in(_input, win);
Iterator out(_output, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto in_ptr = static_cast<uint8_t *>(in.ptr());
const auto out_ptr = static_cast<uint8_t *>(out.ptr());
@@ -207,7 +207,7 @@ void NEChannelExtractKernel::extract_1C_from_3C_img(const Window &win)
Iterator in(_input, win);
Iterator out(_output, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto in_ptr = static_cast<uint8_t *>(in.ptr());
const auto out_ptr = static_cast<uint8_t *>(out.ptr());
@@ -222,7 +222,7 @@ void NEChannelExtractKernel::extract_1C_from_4C_img(const Window &win)
Iterator in(_input, win);
Iterator out(_output, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto in_ptr = static_cast<uint8_t *>(in.ptr());
const auto out_ptr = static_cast<uint8_t *>(out.ptr());
@@ -242,7 +242,7 @@ void NEChannelExtractKernel::extract_YUYV_uv(const Window &win)
Iterator in(_input, win);
Iterator out(_output, win_out);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto in_ptr = static_cast<uint8_t *>(in.ptr());
const auto out_ptr = static_cast<uint8_t *>(out.ptr());
diff --git a/src/core/NEON/kernels/NEConvolutionKernel.cpp b/src/core/NEON/kernels/NEConvolutionKernel.cpp
index 0a10546b7b..b154340bee 100644
--- a/src/core/NEON/kernels/NEConvolutionKernel.cpp
+++ b/src/core/NEON/kernels/NEConvolutionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -317,7 +317,7 @@ NEConvolutionKernel<matrix_size>::NEConvolutionKernel()
template <unsigned int matrix_size>
BorderSize NEConvolutionKernel<matrix_size>::border_size() const
{
- return BorderSize(matrix_size / 2);
+ return BorderSize{ matrix_size / 2 };
}
template <unsigned int matrix_size>
@@ -388,7 +388,7 @@ void NEConvolutionKernel<3>::convolution(const Window &win)
const unsigned char *input_mid_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-1, 0));
const unsigned char *input_low_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-1, 1));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int32x4_t out = vdupq_n_s32(0);
int32x4_t out2 = vdupq_n_s32(0);
@@ -437,7 +437,7 @@ void NEConvolutionKernel<5>::convolution(const Window &win)
const unsigned char *input_low1_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-2, 1));
const unsigned char *input_low2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-2, 2));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int32x4_t out = vdupq_n_s32(0);
int32x4_t out2 = vdupq_n_s32(0);
@@ -496,7 +496,7 @@ void NEConvolutionKernel<7>::convolution(const Window &win)
const unsigned char *input_low2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, 2));
const unsigned char *input_low3_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-3, 3));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int32x4_t out = vdupq_n_s32(0);
int32x4_t out2 = vdupq_n_s32(0);
@@ -565,7 +565,7 @@ void NEConvolutionKernel<9>::convolution(const Window &win)
const unsigned char *input_low3_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, 3));
const unsigned char *input_low4_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-4, 4));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int32x4_t out = vdupq_n_s32(0);
int32x4_t out2 = vdupq_n_s32(0);
@@ -728,7 +728,7 @@ inline void NESeparableConvolutionHorKernel<5>::convolve<uint16_t>(const Window
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -761,7 +761,7 @@ inline void NESeparableConvolutionHorKernel<5>::convolve<int16_t>(const Window &
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -794,7 +794,7 @@ void NESeparableConvolutionHorKernel<5>::convolve<int32_t>(const Window &window)
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -840,7 +840,7 @@ inline void NESeparableConvolutionHorKernel<7>::convolve<uint16_t>(const Window
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -875,7 +875,7 @@ inline void NESeparableConvolutionHorKernel<7>::convolve<int16_t>(const Window &
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -910,7 +910,7 @@ void NESeparableConvolutionHorKernel<7>::convolve<int32_t>(const Window &window)
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -962,7 +962,7 @@ inline void NESeparableConvolutionHorKernel<9>::convolve<uint16_t>(const Window
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -999,7 +999,7 @@ inline void NESeparableConvolutionHorKernel<9>::convolve<int16_t>(const Window &
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -1036,7 +1036,7 @@ void NESeparableConvolutionHorKernel<9>::convolve<int32_t>(const Window &window)
Iterator input(_input, win_in);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -1096,7 +1096,7 @@ NESeparableConvolutionVertKernel<matrix_size>::NESeparableConvolutionVertKernel(
template <unsigned int matrix_size>
BorderSize NESeparableConvolutionVertKernel<matrix_size>::border_size() const
{
- return BorderSize(matrix_size / 2, 0);
+ return BorderSize{ matrix_size / 2, 0 };
}
template <unsigned int matrix_size>
@@ -1209,7 +1209,7 @@ void NESeparableConvolutionVertKernel<matrix_size>::convolution_u16(const Window
input_ptrs[k_half + i] = _input->ptr_to_element(Coordinates(0, i));
}
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
uint16x8_t out0 = vdupq_n_u16(0);
uint16x8_t out1 = vdupq_n_u16(0);
@@ -1275,7 +1275,7 @@ void NESeparableConvolutionVertKernel<matrix_size>::convolution_s16(const Window
input_ptrs[k_half + i] = _input->ptr_to_element(Coordinates(0, i));
}
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int16x8_t out0 = vdupq_n_s16(0);
int16x8_t out1 = vdupq_n_s16(0);
@@ -1343,7 +1343,7 @@ void NESeparableConvolutionVertKernel<matrix_size>::convolution_s32(const Window
const int32x4_t zero = vdupq_n_s32(0);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int32x4x2_t out0 =
{
@@ -1576,7 +1576,7 @@ void NEConvolutionRectangleKernel::convolution(const Window &win)
input_ptrs[k_row_half + i] = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-k_col_half, i));
}
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int32x4_t out = vdupq_n_s32(0);
int32x4_t out2 = vdupq_n_s32(0);
diff --git a/src/core/NEON/kernels/NECopyKernel.cpp b/src/core/NEON/kernels/NECopyKernel.cpp
index c5103f07c8..4722c05507 100644
--- a/src/core/NEON/kernels/NECopyKernel.cpp
+++ b/src/core/NEON/kernels/NECopyKernel.cpp
@@ -70,7 +70,7 @@ void NECopyKernel::run(const Window &window, const ThreadInfo &info)
Iterator input_it(_input, out_slice);
Iterator output_it(_output, out_slice);
- execute_window_loop(out_slice, [&](const Coordinates & id)
+ execute_window_loop(out_slice, [&](const Coordinates &)
{
memcpy(output_it.ptr(), input_it.ptr(), _output->info()->dimension(0) * _output->info()->element_size());
},
diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
index 54337551a7..cbc90a058f 100644
--- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -148,7 +148,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float32x4_t scale = vdupq_n_f32(_input->info()->quantization_info().scale);
const int32x4_t offset = vdupq_n_s32(_input->info()->quantization_info().offset);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
const uint16x8x2_t texels_u16 =
@@ -184,7 +184,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float16x8_t scale = vdupq_n_f16(static_cast<float16_t>(_input->info()->quantization_info().scale));
const int16x8_t offset = vdupq_n_s16(static_cast<int16_t>(_input->info()->quantization_info().offset));
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
const int16x8x2_t texels_s16 =
@@ -216,7 +216,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
case DataType::S16:
{
/* Up-conversion U8 -> S16 */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
@@ -237,7 +237,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
case DataType::S32:
{
/* Up-conversion U8 -> S32 */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
@@ -260,7 +260,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
case DataType::U16:
{
/* Up-conversion U8 -> U16 */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
@@ -294,7 +294,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Down-conversion S16 -> U8 */
if(ConvertPolicy::SATURATE == _policy)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const int16x8x2_t texels =
{
@@ -310,7 +310,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
}
else
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const int16x8x2_t texels =
{
@@ -332,7 +332,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int32x4_t b = vdupq_n_s32(_shift);
/* Up-conversion S16 -> S32 */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const int16x8x2_t texels =
{
@@ -376,7 +376,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
/* Down-conversion U16 -> U8 */
if(ConvertPolicy::SATURATE == _policy)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint16x8x2_t texels =
{
@@ -392,7 +392,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
}
else
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint16x8x2_t texels =
{
@@ -413,7 +413,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int32x4_t b = vdupq_n_s32(_shift);
/* Up-conversion U16 -> U32 */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint16x8x2_t texels =
{
@@ -504,7 +504,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const int32x4_t zero_val_vec = vdupq_n_s32(0);
/* Down-conversion F32 -> QASYMM8 */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float32x4x4_t texels =
{
@@ -535,7 +535,7 @@ void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info
const float32x4_t scale = vdupq_n_f32(1.f / (1 << _shift));
/* Down-conversion F32 -> F16 */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float32x4x4_t texels =
{
diff --git a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
index 119aa4ad9a..1520225249 100644
--- a/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDequantizationLayerKernel.cpp
@@ -111,7 +111,7 @@ void run_dequantization(const ITensor *input, ITensor *output, const Window &win
Iterator in(input, win_collapsed);
Iterator out(output, win_collapsed);
- execute_window_loop(win_collapsed, [&](const Coordinates & id)
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
{
const auto in_ptr = reinterpret_cast<const uint8_t *>(in.ptr());
const auto out_ptr = reinterpret_cast<T *>(out.ptr());
diff --git a/src/core/NEON/kernels/NEDerivativeKernel.cpp b/src/core/NEON/kernels/NEDerivativeKernel.cpp
index cfed324773..1d7237a5b4 100644
--- a/src/core/NEON/kernels/NEDerivativeKernel.cpp
+++ b/src/core/NEON/kernels/NEDerivativeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -124,7 +124,7 @@ void NEDerivativeKernel::derivative_x(const Window &window)
Iterator out_x(_output_x, window);
/* Apply 1-D centered point discrete derivative mask ([-1 0 1]) along the X direction */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
/* Load left and right data */
const uint8x16_t l_data = vld1q_u8(in.ptr() - 1);
@@ -153,7 +153,7 @@ void NEDerivativeKernel::derivative_y(const Window &window)
const size_t stride = _input->info()->strides_in_bytes()[1];
/* Apply 1-D centered point discrete derivative mask ([-1 0 1]^T) along the Y direction */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
/* Load top and bottom data */
const uint8x16_t t_data = vld1q_u8(in.ptr() - stride);
@@ -183,7 +183,7 @@ void NEDerivativeKernel::derivative_xy(const Window &window)
const size_t stride = _input->info()->strides_in_bytes()[1];
/* Apply 1-D centered point discrete derivative masks ([-1 0 1] and [-1 0 1]^T) along the X and Y directions */
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
/* Load top, bottom, left and right data */
const uint8x16_t t_data = vld1q_u8(in.ptr() - stride);
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
index 09836f1d6b..7e113935c5 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerOutputStageKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -266,7 +266,7 @@ void output_stage_nhwc(ITensor *input, const ITensor *bias, const Window &window
if(in_place) // In place accumulate
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
// Get bias and pointer to input
const auto in_ptr = reinterpret_cast<T1 *>(in.ptr());
@@ -287,7 +287,7 @@ void output_stage_nhwc(ITensor *input, const ITensor *bias, const Window &window
else // Out of place accumulate
{
Iterator out(output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
// Get bias and pointer to input
const auto in_ptr = reinterpret_cast<T1 *>(in.ptr());
@@ -363,7 +363,7 @@ void output_stage_nchw<int32_t, uint8_t, false, false>(ITensor *input, const ITe
Iterator in(input, window);
Iterator out(output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
// Get bias and pointer to input
const auto in_ptr = reinterpret_cast<int32_t *>(in.ptr());
@@ -399,7 +399,7 @@ void output_stage_nhwc<int32_t, uint8_t, false, true>(ITensor *input, const ITen
Iterator bi(bias, window_bias);
Iterator out(output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
// Get bias and pointer to input
const auto in_ptr = reinterpret_cast<int32_t *>(in.ptr());
@@ -433,7 +433,7 @@ void output_stage_nhwc<int32_t, uint8_t, false, false>(ITensor *input, const ITe
Iterator in(input, window);
Iterator out(output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
// Get pointer to input
const auto in_ptr = reinterpret_cast<int32_t *>(in.ptr());
diff --git a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp
index aa458c2119..6b87ea017b 100644
--- a/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp
+++ b/src/core/NEON/kernels/NEElementwiseOperationKernel.cpp
@@ -506,7 +506,7 @@ void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
auto output_ptr = reinterpret_cast<OutputScalarType *>(output.ptr());
const auto non_broadcast_input_ptr = reinterpret_cast<const InputScalarType *>(non_broadcast_input.ptr());
@@ -531,7 +531,7 @@ void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const
Iterator input2(in2, input2_win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
auto output_ptr = reinterpret_cast<OutputScalarType *>(output.ptr());
const auto input1_ptr = reinterpret_cast<const InputScalarType *>(input1.ptr());
@@ -599,7 +599,7 @@ void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *o
Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
@@ -640,7 +640,7 @@ void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *o
Iterator input2(in2, input2_win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
diff --git a/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp b/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp
index 7ecc4d1c44..34696d872a 100644
--- a/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp
+++ b/src/core/NEON/kernels/NEElementwiseUnaryKernel.cpp
@@ -87,7 +87,7 @@ void elementwise_op(const ITensor *in, ITensor *out, const Window &window)
Iterator input(in, win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
const auto input_ptr = reinterpret_cast<const ScalarType *>(input.ptr());
diff --git a/src/core/NEON/kernels/NEFastCornersKernel.cpp b/src/core/NEON/kernels/NEFastCornersKernel.cpp
index 919efd2ae2..81bcc8bc3c 100644
--- a/src/core/NEON/kernels/NEFastCornersKernel.cpp
+++ b/src/core/NEON/kernels/NEFastCornersKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,32 +49,30 @@ inline uint8x8x2_t create_permutation_index(size_t k)
{
ARM_COMPUTE_ERROR_ON(k >= PERMUTATIONS);
- static const uint8_t permutations_table[PERMUTATIONS][PERM_SIZE]
- {
- { 0, 1, 2, 3, 4, 5, 6, 7, 8, 255, 255, 255, 255, 255, 255, 255 },
- { 15, 0, 1, 2, 3, 4, 5, 6, 7, 255, 255, 255, 255, 255, 255, 255 },
- { 14, 15, 0, 1, 2, 3, 4, 5, 6, 255, 255, 255, 255, 255, 255, 255 },
- { 13, 14, 15, 0, 1, 2, 3, 4, 5, 255, 255, 255, 255, 255, 255, 255 },
- { 12, 13, 14, 15, 0, 1, 2, 3, 4, 255, 255, 255, 255, 255, 255, 255 },
- { 11, 12, 13, 14, 15, 0, 1, 2, 3, 255, 255, 255, 255, 255, 255, 255 },
- { 10, 11, 12, 13, 14, 15, 0, 1, 2, 255, 255, 255, 255, 255, 255, 255 },
- { 9, 10, 11, 12, 13, 14, 15, 0, 1, 255, 255, 255, 255, 255, 255, 255 },
- { 8, 9, 10, 11, 12, 13, 14, 15, 0, 255, 255, 255, 255, 255, 255, 255 },
- { 7, 8, 9, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255 },
- { 6, 7, 8, 9, 10, 11, 12, 13, 14, 255, 255, 255, 255, 255, 255, 255 },
- { 5, 6, 7, 8, 9, 10, 11, 12, 13, 255, 255, 255, 255, 255, 255, 255 },
- { 4, 5, 6, 7, 8, 9, 10, 11, 12, 255, 255, 255, 255, 255, 255, 255 },
- { 3, 4, 5, 6, 7, 8, 9, 10, 11, 255, 255, 255, 255, 255, 255, 255 },
- { 2, 3, 4, 5, 6, 7, 8, 9, 10, 255, 255, 255, 255, 255, 255, 255 },
- { 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, 255 }
-
- };
+ static const std::array<std::array<uint8_t, PERMUTATIONS>, PERM_SIZE> permutations_table{ { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 255, 255, 255, 255, 255, 255, 255 },
+ { 15, 0, 1, 2, 3, 4, 5, 6, 7, 255, 255, 255, 255, 255, 255, 255 },
+ { 14, 15, 0, 1, 2, 3, 4, 5, 6, 255, 255, 255, 255, 255, 255, 255 },
+ { 13, 14, 15, 0, 1, 2, 3, 4, 5, 255, 255, 255, 255, 255, 255, 255 },
+ { 12, 13, 14, 15, 0, 1, 2, 3, 4, 255, 255, 255, 255, 255, 255, 255 },
+ { 11, 12, 13, 14, 15, 0, 1, 2, 3, 255, 255, 255, 255, 255, 255, 255 },
+ { 10, 11, 12, 13, 14, 15, 0, 1, 2, 255, 255, 255, 255, 255, 255, 255 },
+ { 9, 10, 11, 12, 13, 14, 15, 0, 1, 255, 255, 255, 255, 255, 255, 255 },
+ { 8, 9, 10, 11, 12, 13, 14, 15, 0, 255, 255, 255, 255, 255, 255, 255 },
+ { 7, 8, 9, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255 },
+ { 6, 7, 8, 9, 10, 11, 12, 13, 14, 255, 255, 255, 255, 255, 255, 255 },
+ { 5, 6, 7, 8, 9, 10, 11, 12, 13, 255, 255, 255, 255, 255, 255, 255 },
+ { 4, 5, 6, 7, 8, 9, 10, 11, 12, 255, 255, 255, 255, 255, 255, 255 },
+ { 3, 4, 5, 6, 7, 8, 9, 10, 11, 255, 255, 255, 255, 255, 255, 255 },
+ { 2, 3, 4, 5, 6, 7, 8, 9, 10, 255, 255, 255, 255, 255, 255, 255 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, 255 }
+
+ } };
const uint8x8x2_t index =
{
{
- vld1_u8(permutations_table[k]),
- vld1_u8(permutations_table[k] + 8)
+ vld1_u8(permutations_table[k].data()),
+ vld1_u8(permutations_table[k].data() + 8)
}
};
@@ -112,7 +110,7 @@ inline uint8x8x4_t create_circle_index_register()
. . 9 8 7 . . .
*/
- static const uint8_t top_right[8] =
+ static const std::array<uint8_t, 8> top_right =
{
/* The register r.val[0] will be used to retrieve these texels:
. . . 0 1 . . .
@@ -130,7 +128,7 @@ inline uint8x8x4_t create_circle_index_register()
255
};
- static const uint8_t bottom_right[8] =
+ static const std::array<uint8_t, 8> bottom_right =
{
/* The register r.val[1] will be used to retrieve these texels:
. . . . . . 5 .
@@ -147,7 +145,7 @@ inline uint8x8x4_t create_circle_index_register()
20 /* low table, third row, elem 5, value 7 in the diagram above*/
};
- static const uint8_t top_left[8] =
+ static const std::array<uint8_t, 8> top_left =
{
/* The register r.val[2] will be used to retrieve these texels:
. . F . . . . .
@@ -165,7 +163,7 @@ inline uint8x8x4_t create_circle_index_register()
2 /* top table, first row, elem 3, value F in the diagram above*/
};
- static const uint8_t bottom_left[8] =
+ static const std::array<uint8_t, 8> bottom_left =
{
/* The register r.val[3] will be used to retrieve these texels:
B . . . . . . .
@@ -185,10 +183,10 @@ inline uint8x8x4_t create_circle_index_register()
const uint8x8x4_t reg =
{
{
- vld1_u8(top_right),
- vld1_u8(bottom_right),
- vld1_u8(top_left),
- vld1_u8(bottom_left)
+ vld1_u8(top_right.data()),
+ vld1_u8(bottom_right.data()),
+ vld1_u8(top_left.data()),
+ vld1_u8(bottom_left.data())
}
};
@@ -268,7 +266,7 @@ inline bool is_permutation_corner(const uint8x16_t &permutation, const uint8x16_
return is_permutation_brighter(permutation, pg) || is_permutation_darker(permutation, pl);
}
-inline bool point_is_fast_corner(uint8_t p, uint8_t threshold, const uint8x8x2_t &tbl_circle_texels, uint8x8x2_t perm_indices[PERMUTATIONS])
+inline bool point_is_fast_corner(uint8_t p, uint8_t threshold, const uint8x8x2_t &tbl_circle_texels, std::array<uint8x8x2_t, PERMUTATIONS> &perm_indices)
{
/*
This function determines whether the point 'p' is a corner.
@@ -287,7 +285,7 @@ inline bool point_is_fast_corner(uint8_t p, uint8_t threshold, const uint8x8x2_t
return corner_detected;
}
-inline uint8x8x2_t create_circle_tbl(const uint8_t *const __restrict buffer[7], size_t in_offset, const uint8x8x4_t &circle_index_r)
+inline uint8x8x2_t create_circle_tbl(const std::array<uint8_t *const __restrict, 7> &buffer, size_t in_offset, const uint8x8x4_t &circle_index_r)
{
/*
This function builds a LUT holding the 16 texels in the Brensenham circle radius 3.
@@ -329,7 +327,7 @@ inline uint8x8x2_t create_circle_tbl(const uint8_t *const __restrict buffer[7],
return tbl_circle_texels;
}
-inline uint8_t get_point_score(uint8_t p, uint8_t tolerance, const uint8x8x2_t &tbl_circle, uint8x8x2_t perm_indices[PERMUTATIONS])
+inline uint8_t get_point_score(uint8_t p, uint8_t tolerance, const uint8x8x2_t &tbl_circle, std::array<uint8x8x2_t, PERMUTATIONS> &perm_indices)
{
uint8_t b = 255;
uint8_t a = tolerance;
@@ -411,7 +409,7 @@ void NEFastCornersKernel::run(const Window &window, const ThreadInfo &info)
Iterator in(_input, window);
Iterator out(_output, window);
- const uint8_t *const __restrict in_row[7] =
+ const std::array<uint8_t *const __restrict, 7> in_row
{
_input->ptr_to_element(Coordinates(-3, -3)),
_input->ptr_to_element(Coordinates(-3, -2)),
@@ -429,7 +427,7 @@ void NEFastCornersKernel::run(const Window &window, const ThreadInfo &info)
return p_is_in_ab && q_is_in_ab;
};
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const size_t in_offset = in.offset();
const uint8_t p0 = *in.ptr();
@@ -455,11 +453,11 @@ void NEFastCornersKernel::run(const Window &window, const ThreadInfo &info)
/* at this stage we use the full test with the 16 permutations to classify the point as corner or not */
const uint8x8x2_t tbl_circle_texel = create_circle_tbl(in_row, in_offset, circle_index_r);
- if(point_is_fast_corner(p0, _threshold, tbl_circle_texel, perm_index.data()))
+ if(point_is_fast_corner(p0, _threshold, tbl_circle_texel, perm_index))
{
if(_non_max_suppression)
{
- score = get_point_score(p0, _threshold, tbl_circle_texel, perm_index.data());
+ score = get_point_score(p0, _threshold, tbl_circle_texel, perm_index);
}
else
{
diff --git a/src/core/NEON/kernels/NEFillInnerBorderKernel.cpp b/src/core/NEON/kernels/NEFillInnerBorderKernel.cpp
index d1cff6f62d..50060b2376 100644
--- a/src/core/NEON/kernels/NEFillInnerBorderKernel.cpp
+++ b/src/core/NEON/kernels/NEFillInnerBorderKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,7 +111,7 @@ void NEFillInnerBorderKernel::fill_value_single_channel(const Window &window)
Iterator vertical_it(_tensor, vertical);
- execute_window_loop(vertical, [&](const Coordinates & id)
+ execute_window_loop(vertical, [&](const Coordinates &)
{
std::fill_n(reinterpret_cast<T *>(vertical_it.ptr()), _border_size.left, constant_border_value);
std::fill_n(reinterpret_cast<T *>(vertical_it.ptr()) + width - _border_size.right, _border_size.right, constant_border_value);
@@ -122,7 +122,7 @@ void NEFillInnerBorderKernel::fill_value_single_channel(const Window &window)
// All values are set at once
Iterator horizontal_it(_tensor, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
for(size_t i = 0; i < _border_size.top; ++i)
{
diff --git a/src/core/NEON/kernels/NEFloorKernel.cpp b/src/core/NEON/kernels/NEFloorKernel.cpp
index 6551d9ed57..43554a097c 100644
--- a/src/core/NEON/kernels/NEFloorKernel.cpp
+++ b/src/core/NEON/kernels/NEFloorKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,7 +111,7 @@ void NEFloorKernel::run(const Window &window, const ThreadInfo &info)
if(data_type == DataType::F32)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float32x4_t res = vfloorq_f32(vld1q_f32(reinterpret_cast<const float *>(input.ptr())));
vst1q_f32(reinterpret_cast<float *>(output.ptr()), res);
@@ -121,7 +121,7 @@ void NEFloorKernel::run(const Window &window, const ThreadInfo &info)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
else if(data_type == DataType::F16)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float16x8_t res = vfloorq_f16(vld1q_f16(reinterpret_cast<const float16_t *>(input.ptr())));
vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()), res);
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index a100cd2bf6..b561d1e3f4 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -479,7 +479,7 @@ void inline vector_matrix_multiply_s8(Iterator &ina, Iterator &inb, Iterator &ou
void inline matrix_multiply_u8(Iterator &ina, Iterator &inb, Iterator &out, int width_b, size_t out_stride, const Window &window)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8_t *mtx_a0 = ina.ptr();
const uint8_t *mtx_b0 = inb.ptr();
@@ -599,7 +599,7 @@ void inline matrix_multiply_s8(Iterator &ina, Iterator &inb, Iterator &out, int
// The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW
// The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration
// All the values needed for computing a single 4x4 block will be read from consecutive memory positions
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
auto *mtx_a0 = reinterpret_cast<const int8_t *>(ina.ptr());
auto *mtx_b0 = reinterpret_cast<const int8_t *>(inb.ptr());
diff --git a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp
index 16a10a8463..46e53cec12 100644
--- a/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp
@@ -444,7 +444,7 @@ void run_offset_contribution_output_stage(const Window &window,
if(bias != nullptr)
{
Iterator bias_it = get_bias_it(collapsed_window, bias);
- execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ execute_window_loop(collapsed_window, [&](const Coordinates &)
{
run_offset_contribution_output_stage_window<false, false, true, is_bounded_relu, is_fixed_point>(nullptr, nullptr, reinterpret_cast<const int32_t *>(bias_it.ptr()), mm_result_it, out_it,
result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
@@ -454,7 +454,7 @@ void run_offset_contribution_output_stage(const Window &window,
}
else
{
- execute_window_loop(collapsed_window, [&](const Coordinates & id)
+ execute_window_loop(collapsed_window, [&](const Coordinates &)
{
run_offset_contribution_output_stage_window<false, false, false, is_bounded_relu, is_fixed_point>(nullptr, nullptr, nullptr, mm_result_it, out_it,
result_offset_s32, result_shift_s32, min_u8, max_u8, a_offset, b_offset, k_offset,
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
index d3cfc7a8fa..4906e6a987 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel.cpp
@@ -114,7 +114,7 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run(const Window
win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
Iterator bias(_bias, win_biases);
- execute_window_loop(win_collapsed, [&](const Coordinates & id)
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
{
// Compute 16 elements per iteration
int x = window_start_x;
@@ -165,7 +165,7 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::run(const Window
}
else
{
- execute_window_loop(win_collapsed, [&](const Coordinates & id)
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
{
// Compute 16 elements per iteration
int x = window_start_x;
diff --git a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
index 573373f6b1..a221bd7925 100644
--- a/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -183,7 +183,7 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window)
win_biases.set(Window::DimY, Window::Dimension(0, 1, 1));
Iterator bias(_bias, win_biases);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
// Compute 16 elements per iteration
int x = window_start_x;
@@ -245,7 +245,7 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::run(const Window &window)
}
else
{
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
// Compute 16 elements per iteration
int x = window_start_x;
diff --git a/src/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.cpp
index 42353ed0eb..5ac2323896 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMMatrixAccumulateBiasesKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -122,7 +122,7 @@ void NEGEMMMatrixAccumulateBiasesKernel::run(const Window &window, const ThreadI
{
case DataType::F32:
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float32x4x4_t accum = vld4q_f32(reinterpret_cast<const float *>(in0_out.ptr()));
const float32x4x4_t biases = vld4q_f32(reinterpret_cast<const float *>(in1.ptr()));
@@ -144,7 +144,7 @@ void NEGEMMMatrixAccumulateBiasesKernel::run(const Window &window, const ThreadI
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const float16x8x2_t accum = vld2q_f16(reinterpret_cast<const float16_t *>(in0_out.ptr()));
const float16x8x2_t biases = vld2q_f16(reinterpret_cast<const float16_t *>(in1.ptr()));
diff --git a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
index 757dbbc399..86bea849e4 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -60,7 +60,7 @@ void matrix_addition_f32(const ITensor *input, ITensor *output, const Window &wi
Iterator in(input, window);
Iterator out(output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const auto in_ptr = reinterpret_cast<const float *>(in.ptr());
const auto out_ptr = reinterpret_cast<float *>(out.ptr());
@@ -87,7 +87,7 @@ void matrix_addition_f16(const ITensor *input, ITensor *output, const Window &wi
Iterator in(input, window);
Iterator out(output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const auto in_ptr = reinterpret_cast<const float16_t *>(in.ptr());
const auto out_ptr = reinterpret_cast<float16_t *>(out.ptr());
diff --git a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp
index f182fb24c3..a82fae7521 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -388,7 +388,7 @@ void matrix_matrix_multiply_f32(const ITensor *input0, const ITensor *input1, IT
// The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW
// The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration
// All the values needed for computing a single 4x4 block will be read from consecutive memory positions
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
auto mtx_a0 = reinterpret_cast<const float *>(ina.ptr());
auto mtx_b0 = reinterpret_cast<const float *>(inb.ptr());
@@ -687,7 +687,7 @@ void matrix_matrix_multiply_f16(const ITensor *input0, const ITensor *input1, IT
const float16x8_t alpha_f16 = vdupq_n_f16(alpha);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const auto *mtx_a0 = reinterpret_cast<const float16_t *>(ina.ptr());
const auto *mtx_b0 = reinterpret_cast<const float16_t *>(inb.ptr());
diff --git a/src/core/NEON/kernels/NEGaussian3x3Kernel.cpp b/src/core/NEON/kernels/NEGaussian3x3Kernel.cpp
index 048c22933c..f41298067f 100644
--- a/src/core/NEON/kernels/NEGaussian3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEGaussian3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -80,7 +80,7 @@ void NEGaussian3x3Kernel::run(const Window &window, const ThreadInfo &info)
static const int16x8_t two = vdupq_n_s16(2);
static const int16x8_t four = vdupq_n_s16(4);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
diff --git a/src/core/NEON/kernels/NEGaussian5x5Kernel.cpp b/src/core/NEON/kernels/NEGaussian5x5Kernel.cpp
index b62e2816c0..0e4549e640 100644
--- a/src/core/NEON/kernels/NEGaussian5x5Kernel.cpp
+++ b/src/core/NEON/kernels/NEGaussian5x5Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,7 +88,7 @@ void NEGaussian5x5HorKernel::run(const Window &window, const ThreadInfo &info)
static const int16x8_t six = vdupq_n_s16(6);
static const int16x8_t four = vdupq_n_s16(4);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
uint8x16_t data = vld1q_u8(input.ptr());
@@ -112,7 +112,7 @@ void NEGaussian5x5HorKernel::run(const Window &window, const ThreadInfo &info)
BorderSize NEGaussian5x5VertKernel::border_size() const
{
- return BorderSize(2, 0);
+ return BorderSize{ 2, 0 };
}
void NEGaussian5x5VertKernel::configure(const ITensor *input, ITensor *output, bool border_undefined)
@@ -159,7 +159,7 @@ void NEGaussian5x5VertKernel::run(const Window &window, const ThreadInfo &info)
const uint16x8_t six = vdupq_n_u16(6);
const uint16x8_t four = vdupq_n_u16(4);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const size_t input_offset_high_s16 = input.offset();
const size_t input_offset_low_s16 = input.offset() + 16;
diff --git a/src/core/NEON/kernels/NEGaussianPyramidKernel.cpp b/src/core/NEON/kernels/NEGaussianPyramidKernel.cpp
index 7a123e2f57..13cee19f5c 100644
--- a/src/core/NEON/kernels/NEGaussianPyramidKernel.cpp
+++ b/src/core/NEON/kernels/NEGaussianPyramidKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,7 +47,7 @@ NEGaussianPyramidHorKernel::NEGaussianPyramidHorKernel()
BorderSize NEGaussianPyramidHorKernel::border_size() const
{
- return BorderSize(0, 2);
+ return BorderSize{ 0, 2 };
}
void NEGaussianPyramidHorKernel::configure(const ITensor *input, ITensor *output)
@@ -126,7 +126,7 @@ void NEGaussianPyramidHorKernel::run(const Window &window, const ThreadInfo &inf
Iterator out(_output, win_out);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16x2_t data_2q = vld2q_u8(in.ptr());
const uint8x16_t &data_even = data_2q.val[0];
@@ -155,7 +155,7 @@ NEGaussianPyramidVertKernel::NEGaussianPyramidVertKernel()
BorderSize NEGaussianPyramidVertKernel::border_size() const
{
- return BorderSize(2, 0);
+ return BorderSize{ 2, 0 };
}
void NEGaussianPyramidVertKernel::configure(const ITensor *input, ITensor *output)
@@ -236,7 +236,7 @@ void NEGaussianPyramidVertKernel::run(const Window &window, const ThreadInfo &in
const uint8_t *input_low_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 3));
const uint8_t *input_low2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 4));
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
// Low data
const uint16x8_t data_low_t2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top2_ptr + in.offset())));
diff --git a/src/core/NEON/kernels/NEHOGDescriptorKernel.cpp b/src/core/NEON/kernels/NEHOGDescriptorKernel.cpp
index c204395586..c58b1c024a 100644
--- a/src/core/NEON/kernels/NEHOGDescriptorKernel.cpp
+++ b/src/core/NEON/kernels/NEHOGDescriptorKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -695,7 +695,7 @@ void NEHOGOrientationBinningKernel::run(const Window &window, const ThreadInfo &
Iterator phase(_input_phase, win_phase);
Iterator out(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const auto mag_row_ptr = reinterpret_cast<const int16_t *>(mag.ptr());
const auto phase_row_ptr = reinterpret_cast<const uint8_t *>(phase.ptr());
diff --git a/src/core/NEON/kernels/NEHarrisCornersKernel.cpp b/src/core/NEON/kernels/NEHarrisCornersKernel.cpp
index 61221c1070..34e68e71cb 100644
--- a/src/core/NEON/kernels/NEHarrisCornersKernel.cpp
+++ b/src/core/NEON/kernels/NEHarrisCornersKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -719,7 +719,7 @@ void NEHarrisScoreKernel<block_size>::run(const Window &window, const ThreadInfo
const size_t input_stride = _input1->info()->strides_in_bytes()[1] / element_size_from_data_type(_input1->info()->data_type());
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
(*_func)(input1.ptr(), input2.ptr(), output.ptr(), input_stride, _norm_factor, _sensitivity, _strength_thresh);
},
diff --git a/src/core/NEON/kernels/NEIntegralImageKernel.cpp b/src/core/NEON/kernels/NEIntegralImageKernel.cpp
index 16a3cf7f07..b6db5f0e6a 100644
--- a/src/core/NEON/kernels/NEIntegralImageKernel.cpp
+++ b/src/core/NEON/kernels/NEIntegralImageKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,7 +63,7 @@ void NEIntegralImageKernel::configure(const ITensor *input, ITensor *output)
BorderSize NEIntegralImageKernel::border_size() const
{
- return BorderSize(1, 0, 0, 1);
+ return BorderSize{ 1, 0, 0, 1 };
}
bool NEIntegralImageKernel::is_parallelisable() const
@@ -83,7 +83,7 @@ void NEIntegralImageKernel::run(const Window &window, const ThreadInfo &info)
const auto output_top_left = reinterpret_cast<const uint32_t *>(_output->ptr_to_element(Coordinates(-1, -1)));
const auto output_top_mid = reinterpret_cast<const uint32_t *>(_output->ptr_to_element(Coordinates(0, -1)));
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t input_pixels = vld1q_u8(input.ptr());
diff --git a/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp
index cda041de66..efdcc44e0e 100644
--- a/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,7 +61,7 @@ void l2_normalize_X(const ITensor *in, const ITensor *sum, ITensor *out, float e
const auto sum_value = *reinterpret_cast<const T *>(sum_it.ptr());
const auto vec_normalize_value = wrapper::vdup_n(static_cast<T>(1.f / std::sqrt(std::max(sum_value, static_cast<T>(epsilon)))), ExactTagType{});
- execute_window_loop(in_slice, [&](const Coordinates & id)
+ execute_window_loop(in_slice, [&](const Coordinates &)
{
const auto in_ptr = reinterpret_cast<const T *>(input_it.ptr());
const auto out_ptr = reinterpret_cast<T *>(output_it.ptr());
@@ -93,7 +93,7 @@ void l2_normalize_Y(const ITensor *in, const ITensor *sum, ITensor *out, float e
auto eps = wrapper::vdup_n(static_cast<T>(epsilon), ExactTagType{});
- execute_window_loop(in_slice, [&](const Coordinates & id)
+ execute_window_loop(in_slice, [&](const Coordinates &)
{
const auto in_ptr = reinterpret_cast<const T *>(input_it.ptr());
const auto sum_ptr = reinterpret_cast<const T *>(sum_it.ptr());
@@ -127,7 +127,7 @@ void l2_normalize_Z(const ITensor *in, const ITensor *sum, ITensor *out, float e
auto eps = wrapper::vdup_n(static_cast<T>(epsilon), ExactTagType{});
- execute_window_loop(in_slice, [&](const Coordinates & id)
+ execute_window_loop(in_slice, [&](const Coordinates &)
{
const auto in_ptr = reinterpret_cast<const T *>(input_it.ptr());
const auto sum_ptr = reinterpret_cast<const T *>(sum_it.ptr());
diff --git a/src/core/NEON/kernels/NEMagnitudePhaseKernel.cpp b/src/core/NEON/kernels/NEMagnitudePhaseKernel.cpp
index 4a318f02c1..8c09898403 100644
--- a/src/core/NEON/kernels/NEMagnitudePhaseKernel.cpp
+++ b/src/core/NEON/kernels/NEMagnitudePhaseKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -323,7 +323,7 @@ void NEMagnitudePhaseKernel<mag_type, phase_type>::magnitude(const Window &windo
Iterator gy(_gy, window);
Iterator magnitude(_magnitude, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const int16x8x2_t input1 =
{
@@ -369,7 +369,7 @@ void NEMagnitudePhaseKernel<mag_type, phase_type>::phase(const Window &window)
Iterator gy(_gy, window);
Iterator phase(_phase, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const int16x8x2_t input1 =
{
@@ -415,7 +415,7 @@ void NEMagnitudePhaseKernel<mag_type, phase_type>::magnitude_phase(const Window
Iterator magnitude(_magnitude, window);
Iterator phase(_phase, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const int16x8x2_t input1 =
{
diff --git a/src/core/NEON/kernels/NEMeanStdDevKernel.cpp b/src/core/NEON/kernels/NEMeanStdDevKernel.cpp
index 7895b009d6..0af63059fb 100644
--- a/src/core/NEON/kernels/NEMeanStdDevKernel.cpp
+++ b/src/core/NEON/kernels/NEMeanStdDevKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,7 +51,7 @@ std::pair<uint64x1_t, uint64x1_t> accumulate(const Window &window, Iterator &ite
uint64x1_t sum_squared = vdup_n_u64(0);
// Calculate sum
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t in_data = vld1q_u8(iterator.ptr());
diff --git a/src/core/NEON/kernels/NEMedian3x3Kernel.cpp b/src/core/NEON/kernels/NEMedian3x3Kernel.cpp
index 5bcdc7bc7a..9dc1bc9a36 100644
--- a/src/core/NEON/kernels/NEMedian3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEMedian3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,7 +87,7 @@ void NEMedian3x3Kernel::run(const Window &window, const ThreadInfo &info)
Iterator input(_input, window);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
diff --git a/src/core/NEON/kernels/NEMemsetKernel.cpp b/src/core/NEON/kernels/NEMemsetKernel.cpp
index 2b57b1595b..a0fab99c55 100644
--- a/src/core/NEON/kernels/NEMemsetKernel.cpp
+++ b/src/core/NEON/kernels/NEMemsetKernel.cpp
@@ -67,7 +67,7 @@ void NEMemsetKernel::run(const Window &window, const ThreadInfo &info)
collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
Iterator tensor_it(_tensor, collapsed);
- execute_window_loop(collapsed, [&](const Coordinates & id)
+ execute_window_loop(collapsed, [&](const Coordinates &)
{
uint8_t *base_addr = start_valid_region + tensor_it.offset();
// Set memory
diff --git a/src/core/NEON/kernels/NEMinMaxLayerKernel.cpp b/src/core/NEON/kernels/NEMinMaxLayerKernel.cpp
index 5d1b4b3aa4..fe3af0b44f 100644
--- a/src/core/NEON/kernels/NEMinMaxLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEMinMaxLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -144,7 +144,7 @@ void NEMinMaxLayerKernel::run(const Window &window, const ThreadInfo &info)
float carry_min_scalar = std::numeric_limits<float>::max();
float carry_max_scalar = std::numeric_limits<float>::lowest();
- execute_window_loop(window_input, [&](const Coordinates & id)
+ execute_window_loop(window_input, [&](const Coordinates &)
{
int x = x_start;
const auto in_ptr = reinterpret_cast<const float *>(input.ptr() + id_batch[1] * _input->info()->strides_in_bytes()[3]);
@@ -203,7 +203,7 @@ void NEMinMaxLayerKernel::reset()
Iterator output(_output, window_output);
- execute_window_loop(window_output, [&](const Coordinates & id)
+ execute_window_loop(window_output, [&](const Coordinates &)
{
vst1_f32(reinterpret_cast<float *>(output.ptr()), reset_values);
},
diff --git a/src/core/NEON/kernels/NEMinMaxLocationKernel.cpp b/src/core/NEON/kernels/NEMinMaxLocationKernel.cpp
index befece2741..08b27e319e 100644
--- a/src/core/NEON/kernels/NEMinMaxLocationKernel.cpp
+++ b/src/core/NEON/kernels/NEMinMaxLocationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -152,7 +152,7 @@ void NEMinMaxKernel::minmax_U8(Window win)
Iterator input(_input, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int x = x_start;
@@ -209,7 +209,7 @@ void NEMinMaxKernel::minmax_S16(Window win)
Iterator input(_input, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int x = x_start;
const auto in_ptr = reinterpret_cast<const int16_t *>(input.ptr());
@@ -268,7 +268,7 @@ void NEMinMaxKernel::minmax_F32(Window win)
Iterator input(_input, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
int x = x_start;
const auto in_ptr = reinterpret_cast<const float *>(input.ptr());
@@ -323,11 +323,11 @@ bool NEMinMaxLocationKernel::is_parallelisable() const
template <class T, std::size_t... N>
struct NEMinMaxLocationKernel::create_func_table<T, utility::index_sequence<N...>>
{
- static const NEMinMaxLocationKernel::MinMaxLocFunction func_table[sizeof...(N)];
+ static const std::array<NEMinMaxLocationKernel::MinMaxLocFunction, sizeof...(N)> func_table;
};
template <class T, std::size_t... N>
-const NEMinMaxLocationKernel::MinMaxLocFunction NEMinMaxLocationKernel::create_func_table<T, utility::index_sequence<N...>>::func_table[sizeof...(N)] =
+const std::array<NEMinMaxLocationKernel::MinMaxLocFunction, sizeof...(N)> NEMinMaxLocationKernel::create_func_table<T, utility::index_sequence<N...>>::func_table
{
&NEMinMaxLocationKernel::minmax_loc<T, bool(N & 8), bool(N & 4), bool(N & 2), bool(N & 1)>...
};
diff --git a/src/core/NEON/kernels/NENonLinearFilterKernel.cpp b/src/core/NEON/kernels/NENonLinearFilterKernel.cpp
index 52dbe26f42..00536f092c 100644
--- a/src/core/NEON/kernels/NENonLinearFilterKernel.cpp
+++ b/src/core/NEON/kernels/NENonLinearFilterKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -117,7 +117,7 @@ inline void sort9(uint8x8_t &p0, uint8x8_t &p1, uint8x8_t &p2,
sort(p4, p2);
}
-inline void sort21(uint8x8_t p[21])
+inline void sort21(std::array<uint8x8_t, 21> &p)
{
sort(p[0], p[1]);
sort(p[2], p[3]);
@@ -222,7 +222,7 @@ inline void sort21(uint8x8_t p[21])
sort(p[10], p[16]);
}
-inline void sort25(uint8x8_t p[25])
+inline void sort25(std::array<uint8x8_t, 25> &p)
{
sort(p[1], p[2]);
sort(p[0], p[1]);
@@ -429,7 +429,7 @@ void NENonLinearFilterKernel::median_filter_box<3, 3>(const Window &win)
const auto input_mid_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-1, 0)));
const auto input_bot_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-1, 1)));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
@@ -463,7 +463,7 @@ void NENonLinearFilterKernel::median_filter_box<5, 5>(const Window &win)
const auto input_bot_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-2, 1)));
const auto input_bot2_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-2, 2)));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const uint8x16_t top2_data = vld1q_u8(input_top2_ptr + input.offset());
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
@@ -471,7 +471,7 @@ void NENonLinearFilterKernel::median_filter_box<5, 5>(const Window &win)
const uint8x16_t bot_data = vld1q_u8(input_bot_ptr + input.offset());
const uint8x16_t bot2_data = vld1q_u8(input_bot2_ptr + input.offset());
- const uint8x8_t d[] =
+ const std::array<uint8x8_t, 10> d =
{
vget_low_u8(top2_data),
vget_high_u8(top2_data),
@@ -485,7 +485,7 @@ void NENonLinearFilterKernel::median_filter_box<5, 5>(const Window &win)
vget_high_u8(bot2_data)
};
- uint8x8_t p[25];
+ std::array<uint8x8_t, 25> p{ 0 };
for(unsigned int i = 0; i < 5; ++i)
{
const unsigned int idx_d = i * 2;
@@ -524,7 +524,7 @@ void NENonLinearFilterKernel::min_filter_box(const Window &win)
input_ptrs[k_row_half + i] = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-k_col_half, i));
}
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
// Get min of rows
uint8x16_t rows_min = vld1q_u8(input_ptrs[0] + input.offset());
@@ -563,7 +563,7 @@ void NENonLinearFilterKernel::max_filter_box(const Window &win)
input_ptrs[k_row_half + i] = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(-k_col_half, i));
}
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
uint8x16_t rows_max = vld1q_u8(input_ptrs[0] + input.offset());
@@ -593,7 +593,7 @@ void NENonLinearFilterKernel::median_filter_cross<3, 3>(const Window &win)
const auto input_mid_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-1, 0)));
const auto input_bot_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(0, 1)));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const uint8x8_t top_data = vld1_u8(input_top_ptr + input.offset());
const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
@@ -624,7 +624,7 @@ void NENonLinearFilterKernel::median_filter_cross<5, 5>(const Window &win)
const auto input_bot_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(0, 1)));
const auto input_bot2_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(0, 2)));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const uint8x8_t top2_data = vld1_u8(input_top2_ptr + input.offset());
const uint8x8_t top_data = vld1_u8(input_top_ptr + input.offset());
@@ -671,7 +671,7 @@ void NENonLinearFilterKernel::min_filter_cross(const Window &win)
input_ptrs[k_row_half + i] = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, i));
}
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
uint8x8_t rows_min = vld1_u8(input_ptrs[0] + input.offset());
@@ -717,7 +717,7 @@ void NENonLinearFilterKernel::max_filter_cross(const Window &win)
input_ptrs[k_row_half + i] = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, i));
}
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
uint8x8_t rows_max = vld1_u8(input_ptrs[0] + input.offset());
@@ -754,7 +754,7 @@ void NENonLinearFilterKernel::median_filter_disk<5, 5>(const Window &win)
const auto input_bot_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-2, 1)));
const auto input_bot2_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-2, 2)));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const uint8x16_t top2_data = vextq_u8(vld1q_u8(input_top2_ptr + input.offset()), zero, 1);
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
@@ -762,7 +762,7 @@ void NENonLinearFilterKernel::median_filter_disk<5, 5>(const Window &win)
const uint8x16_t bot_data = vld1q_u8(input_bot_ptr + input.offset());
const uint8x16_t bot2_data = vextq_u8(vld1q_u8(input_bot2_ptr + input.offset()), zero, 1);
- uint8x8_t d[] =
+ std::array<uint8x8_t, 10> d =
{
vget_low_u8(top2_data),
vget_high_u8(top2_data),
@@ -776,7 +776,7 @@ void NENonLinearFilterKernel::median_filter_disk<5, 5>(const Window &win)
vget_high_u8(bot2_data)
};
- uint8x8_t p[21];
+ std::array<uint8x8_t, 21> p{ 0 };
p[0] = d[0];
p[1] = vext_u8(d[0], d[1], 1);
p[2] = vext_u8(d[0], d[1], 2);
@@ -816,7 +816,7 @@ void NENonLinearFilterKernel::min_filter_disk<5, 5>(const Window &win)
const auto input_bot_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-2, 1)));
const auto input_bot2_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-2, 2)));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const uint8x16_t top2_data = vextq_u8(vld1q_u8(input_top2_ptr + input.offset()), zero, 1);
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
@@ -849,7 +849,7 @@ void NENonLinearFilterKernel::max_filter_disk<5, 5>(const Window &win)
const auto input_bot_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-2, 1)));
const auto input_bot2_ptr = static_cast<const unsigned char *>(_input->ptr_to_element(Coordinates(-2, 2)));
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
const uint8x16_t top2_data = vextq_u8(vld1q_u8(input_top2_ptr + input.offset()), zero, 1);
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
@@ -889,7 +889,7 @@ void NENonLinearFilterKernel::non_linear_filter_generic(const Window &win)
std::array<uint8_t, mask_size> vals{ {} };
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
// Clear array
std::fill(std::begin(vals), std::end(vals), 0);
diff --git a/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp b/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp
index 8f97e6ac16..674a7c88cc 100644
--- a/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -506,7 +506,7 @@ void NENonMaximaSuppression3x3Kernel::run(const Window &window, const ThreadInfo
const size_t input_stride = _input->info()->strides_in_bytes()[1] / element_size_from_data_type(_input->info()->data_type());
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
_func(input.ptr(), output.ptr(), input_stride);
},
diff --git a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
index e3166e02b6..b565300906 100644
--- a/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
+++ b/src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp
@@ -627,7 +627,7 @@ void NEPixelWiseMultiplicationKernel::run(const Window &window, const ThreadInfo
if(_func_qasymm8 != nullptr)
{
- execute_window_loop(collapsed, [&](const Coordinates & id)
+ execute_window_loop(collapsed, [&](const Coordinates &)
{
(*_func_qasymm8)(input1.ptr(), input2.ptr(), output.ptr(), _scale,
_input1->info()->quantization_info(), _input2->info()->quantization_info(), _output->info()->quantization_info());
@@ -638,7 +638,7 @@ void NEPixelWiseMultiplicationKernel::run(const Window &window, const ThreadInfo
}
else if(_func_int != nullptr)
{
- execute_window_loop(collapsed, [&](const Coordinates & id)
+ execute_window_loop(collapsed, [&](const Coordinates &)
{
(*_func_int)(input1.ptr(), input2.ptr(), output.ptr(), _scale_exponent);
collapsed.slide_window_slice_3D(slice_input1);
@@ -649,7 +649,7 @@ void NEPixelWiseMultiplicationKernel::run(const Window &window, const ThreadInfo
else
{
ARM_COMPUTE_ERROR_ON(_func_float == nullptr);
- execute_window_loop(collapsed, [&](const Coordinates & id)
+ execute_window_loop(collapsed, [&](const Coordinates &)
{
(*_func_float)(input1.ptr(), input2.ptr(), output.ptr(), _scale);
collapsed.slide_window_slice_3D(slice_input1);
@@ -663,6 +663,6 @@ BorderSize NEPixelWiseMultiplicationKernel::border_size() const
{
const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
- return BorderSize(0, border, 0, 0);
+ return BorderSize{ 0, border, 0, 0 };
}
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEQuantizationLayerKernel.cpp b/src/core/NEON/kernels/NEQuantizationLayerKernel.cpp
index 136457c34e..4deeb1c7cc 100644
--- a/src/core/NEON/kernels/NEQuantizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEQuantizationLayerKernel.cpp
@@ -51,7 +51,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
return Status{};
}
-inline const float32x4x4_t load_value(const float *input_ptr)
+inline float32x4x4_t load_value(const float *input_ptr)
{
return { wrapper::vloadq(input_ptr),
wrapper::vloadq(input_ptr + 4),
@@ -119,7 +119,7 @@ void NEQuantizationLayerKernel::quantize(const Window &window, const Quantizatio
Iterator input(_input, win_collapsed);
Iterator output(_output, win_collapsed);
- execute_window_loop(win_collapsed, [&](const Coordinates & id)
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
{
auto input_ptr = reinterpret_cast<const T *>(input.ptr());
auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
diff --git a/src/core/NEON/kernels/NEScharr3x3Kernel.cpp b/src/core/NEON/kernels/NEScharr3x3Kernel.cpp
index f23c31bc0a..3add699f62 100644
--- a/src/core/NEON/kernels/NEScharr3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NEScharr3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -161,7 +161,7 @@ void NEScharr3x3Kernel::run(const Window &window, const ThreadInfo &info)
if(_run_scharr_x && _run_scharr_y)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
@@ -197,7 +197,7 @@ void NEScharr3x3Kernel::run(const Window &window, const ThreadInfo &info)
}
else if(_run_scharr_x)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
@@ -232,7 +232,7 @@ void NEScharr3x3Kernel::run(const Window &window, const ThreadInfo &info)
}
else if(_run_scharr_y)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
diff --git a/src/core/NEON/kernels/NESelectKernel.cpp b/src/core/NEON/kernels/NESelectKernel.cpp
index f2697bcc6d..c03e5f0bca 100644
--- a/src/core/NEON/kernels/NESelectKernel.cpp
+++ b/src/core/NEON/kernels/NESelectKernel.cpp
@@ -54,7 +54,7 @@ void select_op(const ITensor *cond, const ITensor *in1, const ITensor *in2, ITen
Iterator input2(in2, win);
Iterator output(out, win);
- execute_window_loop(win, [&](const Coordinates & id)
+ execute_window_loop(win, [&](const Coordinates &)
{
auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
const auto condition_ptr = reinterpret_cast<const uint8_t *>(condition.ptr());
diff --git a/src/core/NEON/kernels/NESobel3x3Kernel.cpp b/src/core/NEON/kernels/NESobel3x3Kernel.cpp
index 5a80630a76..7a27203bef 100644
--- a/src/core/NEON/kernels/NESobel3x3Kernel.cpp
+++ b/src/core/NEON/kernels/NESobel3x3Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ NESobel3x3Kernel::NESobel3x3Kernel()
BorderSize NESobel3x3Kernel::border_size() const
{
- return BorderSize(1);
+ return BorderSize{ 1 };
}
void NESobel3x3Kernel::configure(const ITensor *input, ITensor *output_x, ITensor *output_y, bool border_undefined)
@@ -117,7 +117,7 @@ void NESobel3x3Kernel::run(const Window &window, const ThreadInfo &info)
if(_run_sobel_y && _run_sobel_x)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
@@ -181,7 +181,7 @@ void NESobel3x3Kernel::run(const Window &window, const ThreadInfo &info)
}
else if(_run_sobel_x)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
const uint8x16_t mid_data = vld1q_u8(input_mid_ptr + input.offset());
@@ -229,7 +229,7 @@ void NESobel3x3Kernel::run(const Window &window, const ThreadInfo &info)
}
else if(_run_sobel_y)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t top_data = vld1q_u8(input_top_ptr + input.offset());
const uint8x16_t bot_data = vld1q_u8(input_bot_ptr + input.offset());
diff --git a/src/core/NEON/kernels/NESobel5x5Kernel.cpp b/src/core/NEON/kernels/NESobel5x5Kernel.cpp
index 30e7817aa4..a92cfc2308 100644
--- a/src/core/NEON/kernels/NESobel5x5Kernel.cpp
+++ b/src/core/NEON/kernels/NESobel5x5Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -120,7 +120,7 @@ void NESobel5x5HorKernel::run(const Window &window, const ThreadInfo &info)
static const int16x8_t two = vdupq_n_s16(2);
static const int16x8_t minustwo = vdupq_n_s16(-2);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -154,7 +154,7 @@ void NESobel5x5HorKernel::run(const Window &window, const ThreadInfo &info)
static const int16x8_t two = vdupq_n_s16(2);
static const int16x8_t minustwo = vdupq_n_s16(-2);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -180,7 +180,7 @@ void NESobel5x5HorKernel::run(const Window &window, const ThreadInfo &info)
static const int16x8_t six = vdupq_n_s16(6);
static const int16x8_t four = vdupq_n_s16(4);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
@@ -211,7 +211,7 @@ NESobel5x5VertKernel::NESobel5x5VertKernel()
BorderSize NESobel5x5VertKernel::border_size() const
{
- return BorderSize(2, 0);
+ return BorderSize{ 2, 0 };
}
void NESobel5x5VertKernel::configure(ITensor *input_x, ITensor *input_y, ITensor *output_x, ITensor *output_y, bool border_undefined)
@@ -312,7 +312,7 @@ void NESobel5x5VertKernel::run(const Window &window, const ThreadInfo &info)
if(_run_sobel_x)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
// Convert offset from uint8_t* to uint16_t*
const size_t input_offset_high_s16 = input_x.offset() / 2;
@@ -361,7 +361,7 @@ void NESobel5x5VertKernel::run(const Window &window, const ThreadInfo &info)
if(_run_sobel_y)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
// Convert offset from uint8_t* to uint16_t*
const size_t input_offset_high_s16 = input_y.offset() / 2;
diff --git a/src/core/NEON/kernels/NESobel7x7Kernel.cpp b/src/core/NEON/kernels/NESobel7x7Kernel.cpp
index 40a3e31a39..f2b42cc5dd 100644
--- a/src/core/NEON/kernels/NESobel7x7Kernel.cpp
+++ b/src/core/NEON/kernels/NESobel7x7Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -215,7 +215,7 @@ void NESobel7x7HorKernel::run(const Window &window, const ThreadInfo &info)
if(_run_sobel_y && _run_sobel_x)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr() - 3);
@@ -244,7 +244,7 @@ void NESobel7x7HorKernel::run(const Window &window, const ThreadInfo &info)
}
else if(_run_sobel_x)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr() - 3);
@@ -269,7 +269,7 @@ void NESobel7x7HorKernel::run(const Window &window, const ThreadInfo &info)
}
else if(_run_sobel_y)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr() - 3);
@@ -301,7 +301,7 @@ NESobel7x7VertKernel::NESobel7x7VertKernel()
BorderSize NESobel7x7VertKernel::border_size() const
{
- return BorderSize(3, 0);
+ return BorderSize{ 3, 0 };
}
void NESobel7x7VertKernel::configure(const ITensor *input_x, const ITensor *input_y, ITensor *output_x, ITensor *output_y, bool border_undefined)
@@ -382,7 +382,7 @@ void NESobel7x7VertKernel::run(const Window &window, const ThreadInfo &info)
if(_run_sobel_x)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
auto in_ptr = reinterpret_cast<int32_t *>(input_x.ptr()) - 3 * in_x_stride;
@@ -453,7 +453,7 @@ void NESobel7x7VertKernel::run(const Window &window, const ThreadInfo &info)
if(_run_sobel_y)
{
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
auto in_ptr = reinterpret_cast<int32_t *>(input_y.ptr()) - 3 * in_y_stride;
diff --git a/src/core/NEON/kernels/NETableLookupKernel.cpp b/src/core/NEON/kernels/NETableLookupKernel.cpp
index 958f4a9cfb..536c2201c2 100644
--- a/src/core/NEON/kernels/NETableLookupKernel.cpp
+++ b/src/core/NEON/kernels/NETableLookupKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,7 +61,7 @@ void NETableLookupKernel::tableLookup(const Window &window)
Iterator input = Iterator(_input, window);
Iterator output = Iterator(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
auto input_ptr = reinterpret_cast<const T *>(input.ptr());
auto output_ptr = reinterpret_cast<T *>(output.ptr());
@@ -92,7 +92,7 @@ void NETableLookupKernel::tableLookup<uint8_t>(const Window &window)
Iterator input = Iterator(_input, window);
Iterator output = Iterator(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8_t *input_ptr = input.ptr();
uint8_t *output_ptr = output.ptr();
diff --git a/src/core/NEON/kernels/NEThresholdKernel.cpp b/src/core/NEON/kernels/NEThresholdKernel.cpp
index 5ef06931cc..ae9c62bc92 100644
--- a/src/core/NEON/kernels/NEThresholdKernel.cpp
+++ b/src/core/NEON/kernels/NEThresholdKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -86,7 +86,7 @@ inline void NEThresholdKernel::run_binary(const Window &window)
Iterator input(_input, window);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
const uint8x16_t mask = vcgtq_u8(data, threshold);
@@ -106,7 +106,7 @@ inline void NEThresholdKernel::run_range(const Window &window)
Iterator input(_input, window);
Iterator output(_output, window);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(input.ptr());
diff --git a/src/core/NEON/kernels/NEUpsampleLayerKernel.cpp b/src/core/NEON/kernels/NEUpsampleLayerKernel.cpp
index aae85c6741..d3d88b3bf0 100644
--- a/src/core/NEON/kernels/NEUpsampleLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEUpsampleLayerKernel.cpp
@@ -130,7 +130,7 @@ void NEUpsampleLayerKernel::upsample_f32_nchw(const arm_compute::Window &window)
Iterator output(_output, window_out);
const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(float);
- execute_window_loop(window_out, [&](const Coordinates & id)
+ execute_window_loop(window_out, [&](const Coordinates &)
{
const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr()));
const float32x4_t data_out1 = { vgetq_lane_f32(data, 0), vgetq_lane_f32(data, 0), vgetq_lane_f32(data, 1), vgetq_lane_f32(data, 1) };
@@ -157,7 +157,7 @@ void NEUpsampleLayerKernel::upsample_f32_nhwc(const arm_compute::Window &window)
const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(float);
const int offset_z_out = _output->info()->strides_in_bytes().z() / sizeof(float);
- execute_window_loop(window_out, [&](const Coordinates & id)
+ execute_window_loop(window_out, [&](const Coordinates &)
{
const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr()));
auto out = reinterpret_cast<float *>(output.ptr());
@@ -182,7 +182,7 @@ void NEUpsampleLayerKernel::upsample_qasymm8_nchw(const arm_compute::Window &win
Iterator output(_output, window_out);
const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(uint8_t);
- execute_window_loop(window_out, [&](const Coordinates & id)
+ execute_window_loop(window_out, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(reinterpret_cast<const uint8_t *>(input.ptr()));
const uint8x16_t data_out1 = { vgetq_lane_u8(data, 0), vgetq_lane_u8(data, 0), vgetq_lane_u8(data, 1), vgetq_lane_u8(data, 1),
@@ -218,7 +218,7 @@ void NEUpsampleLayerKernel::upsample_qasymm8_nhwc(const arm_compute::Window &win
const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(uint8_t);
const int offset_z_out = _output->info()->strides_in_bytes().z() / sizeof(uint8_t);
- execute_window_loop(window_out, [&](const Coordinates & id)
+ execute_window_loop(window_out, [&](const Coordinates &)
{
const uint8x16_t data = vld1q_u8(reinterpret_cast<const uint8_t *>(input.ptr()));
auto out = reinterpret_cast<uint8_t *>(output.ptr());
@@ -245,7 +245,7 @@ void NEUpsampleLayerKernel::upsample_f16_nchw(const arm_compute::Window &window)
Iterator output(_output, window_out);
const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(float16_t);
- execute_window_loop(window_out, [&](const Coordinates & id)
+ execute_window_loop(window_out, [&](const Coordinates &)
{
const float16x8_t data = vld1q_f16(reinterpret_cast<const float16_t *>(input.ptr()));
const float16x8_t data_out1 = { vgetq_lane_f16(data, 0), vgetq_lane_f16(data, 0), vgetq_lane_f16(data, 1), vgetq_lane_f16(data, 1),
@@ -278,7 +278,7 @@ void NEUpsampleLayerKernel::upsample_f16_nhwc(const arm_compute::Window &window)
const int offset_y_out = _output->info()->strides_in_bytes().y() / sizeof(float16_t);
const int offset_z_out = _output->info()->strides_in_bytes().z() / sizeof(float16_t);
- execute_window_loop(window_out, [&](const Coordinates & id)
+ execute_window_loop(window_out, [&](const Coordinates &)
{
const float16x8_t data = vld1q_f16(reinterpret_cast<const float16_t *>(input.ptr()));
auto out = reinterpret_cast<float16_t *>(output.ptr());