aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/AccessWindowAutoPadding.cpp6
-rw-r--r--src/core/CL/kernels/CLComparisonKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLElementwiseOperationKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLGaussian5x5Kernel.cpp12
-rw-r--r--src/core/CL/kernels/CLGaussianPyramidKernel.cpp6
-rw-r--r--src/core/CL/kernels/CLMinMaxLayerKernel.cpp4
-rw-r--r--src/core/CPP/kernels/CPPUpsampleKernel.cpp2
-rw-r--r--src/core/Error.cpp11
-rw-r--r--src/core/GLES_COMPUTE/GCKernelLibrary.cpp6
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp7
-rw-r--r--src/core/NEON/kernels/NECropKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEDilateKernel.cpp5
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp14
-rw-r--r--src/core/NEON/kernels/NEErodeKernel.cpp5
-rw-r--r--src/core/NEON/kernels/NEFillBorderKernel.cpp8
-rw-r--r--src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp4
-rw-r--r--src/core/NEON/kernels/NELKTrackerKernel.cpp12
-rw-r--r--src/core/NEON/kernels/NEPoolingLayerKernel.cpp3
-rw-r--r--src/core/NEON/kernels/NERemapKernel.cpp10
-rw-r--r--src/core/NEON/kernels/NEScaleKernel.cpp5
-rw-r--r--src/core/utils/helpers/tensor_transform.cpp1
-rw-r--r--src/core/utils/logging/LoggerRegistry.cpp4
-rw-r--r--src/runtime/BlobLifetimeManager.cpp4
-rw-r--r--src/runtime/CL/CLHelpers.cpp18
-rw-r--r--src/runtime/CL/CLMemory.cpp6
-rw-r--r--src/runtime/CL/CLMultiHOG.cpp9
-rw-r--r--src/runtime/CL/CLPyramid.cpp14
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp2
-rw-r--r--src/runtime/CL/CLTuner.cpp2
-rw-r--r--src/runtime/CL/functions/CLConvolution.cpp12
-rw-r--r--src/runtime/CL/functions/CLDepthConcatenateLayer.cpp7
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLFFT1D.cpp2
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp4
-rw-r--r--src/runtime/CL/functions/CLGaussianPyramid.cpp14
-rw-r--r--src/runtime/CL/functions/CLHOGMultiDetection.cpp29
-rw-r--r--src/runtime/CL/functions/CLHarrisCorners.cpp8
-rw-r--r--src/runtime/CL/functions/CLLaplacianPyramid.cpp6
-rw-r--r--src/runtime/CL/functions/CLLaplacianReconstruct.cpp8
-rw-r--r--src/runtime/CL/functions/CLOpticalFlow.cpp16
-rw-r--r--src/runtime/CL/functions/CLPadLayer.cpp17
-rw-r--r--src/runtime/CL/functions/CLReduceMean.cpp18
-rw-r--r--src/runtime/CL/functions/CLReductionOperation.cpp28
-rw-r--r--src/runtime/CL/functions/CLSplit.cpp6
-rw-r--r--src/runtime/CL/functions/CLStackLayer.cpp4
-rw-r--r--src/runtime/CL/functions/CLUnstack.cpp4
-rw-r--r--src/runtime/CL/functions/CLWidthConcatenateLayer.cpp2
-rw-r--r--src/runtime/CL/tuners/CLLWSList.cpp4
-rw-r--r--src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp4
-rw-r--r--src/runtime/CPUUtils.cpp38
-rw-r--r--src/runtime/Distribution1D.cpp7
-rw-r--r--src/runtime/GLES_COMPUTE/GCMemory.cpp6
-rw-r--r--src/runtime/GLES_COMPUTE/GCScheduler.cpp10
-rw-r--r--src/runtime/HOG.cpp11
-rw-r--r--src/runtime/LutAllocator.cpp10
-rw-r--r--src/runtime/Memory.cpp6
-rw-r--r--src/runtime/MultiHOG.cpp9
-rw-r--r--src/runtime/NEON/functions/NEConcatenateLayer.cpp1
-rw-r--r--src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp2
-rw-r--r--src/runtime/NEON/functions/NEHarrisCorners.cpp6
-rw-r--r--src/runtime/NEON/functions/NEHistogram.cpp8
-rw-r--r--src/runtime/NEON/functions/NELaplacianPyramid.cpp6
-rw-r--r--src/runtime/NEON/functions/NELaplacianReconstruct.cpp8
-rw-r--r--src/runtime/NEON/functions/NEPadLayer.cpp22
-rw-r--r--src/runtime/NEON/functions/NEReduceMean.cpp18
-rw-r--r--src/runtime/NEON/functions/NESplit.cpp4
-rw-r--r--src/runtime/NEON/functions/NEStackLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NEUnstack.cpp2
-rw-r--r--src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp4
-rw-r--r--src/runtime/Pyramid.cpp26
70 files changed, 305 insertions, 289 deletions
diff --git a/src/core/AccessWindowAutoPadding.cpp b/src/core/AccessWindowAutoPadding.cpp
index 74af99bbb9..cfb36e1da1 100644
--- a/src/core/AccessWindowAutoPadding.cpp
+++ b/src/core/AccessWindowAutoPadding.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,10 +47,10 @@ ValidRegion AccessWindowAutoPadding::compute_valid_region() const
{
if(_info == nullptr)
{
- return ValidRegion();
+ return ValidRegion{};
}
- return ValidRegion(Coordinates(), _info->tensor_shape());
+ return ValidRegion{ Coordinates(), _info->tensor_shape() };
}
void AccessWindowAutoPadding::set_valid_region()
diff --git a/src/core/CL/kernels/CLComparisonKernel.cpp b/src/core/CL/kernels/CLComparisonKernel.cpp
index f5f5a0fbd6..4f44851ef8 100644
--- a/src/core/CL/kernels/CLComparisonKernel.cpp
+++ b/src/core/CL/kernels/CLComparisonKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -219,6 +219,6 @@ BorderSize CLComparisonKernel::border_size() const
const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
- return BorderSize(0, border, 0, 0);
+ return BorderSize{ 0, border, 0, 0 };
}
} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
index 37eeeb78bf..63c9244961 100644
--- a/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
+++ b/src/core/CL/kernels/CLElementwiseOperationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -281,7 +281,7 @@ BorderSize CLElementwiseOperationKernel::border_size() const
{
const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
- return BorderSize(0, border, 0, 0);
+ return BorderSize{ 0, border, 0, 0 };
}
/** Arithmetic operations with saturation*/
diff --git a/src/core/CL/kernels/CLGaussian5x5Kernel.cpp b/src/core/CL/kernels/CLGaussian5x5Kernel.cpp
index bd523c883d..3b45b07ed9 100644
--- a/src/core/CL/kernels/CLGaussian5x5Kernel.cpp
+++ b/src/core/CL/kernels/CLGaussian5x5Kernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,17 +29,17 @@ using namespace arm_compute;
void CLGaussian5x5HorKernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
- const int16_t matrix[] = { 1, 4, 6, 4, 1 };
+ const std::array<int16_t, 5> matrix = { 1, 4, 6, 4, 1 };
// Set arguments
- CLSeparableConvolution5x5HorKernel::configure(input, output, matrix, border_undefined);
+ CLSeparableConvolution5x5HorKernel::configure(input, output, matrix.data(), border_undefined);
}
void CLGaussian5x5VertKernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
{
- const uint32_t scale = 256;
- const int16_t matrix[] = { 1, 4, 6, 4, 1 };
+ const uint32_t scale = 256;
+ const std::array<int16_t, 5> matrix = { 1, 4, 6, 4, 1 };
// Set arguments
- CLSeparableConvolution5x5VertKernel::configure(input, output, matrix, scale, border_undefined);
+ CLSeparableConvolution5x5VertKernel::configure(input, output, matrix.data(), scale, border_undefined);
}
diff --git a/src/core/CL/kernels/CLGaussianPyramidKernel.cpp b/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
index 6b729c8585..c9c7bf39a9 100644
--- a/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
+++ b/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,7 +38,7 @@ CLGaussianPyramidHorKernel::CLGaussianPyramidHorKernel()
BorderSize CLGaussianPyramidHorKernel::border_size() const
{
- return BorderSize(0, 2);
+ return BorderSize{ 0, 2 };
}
void CLGaussianPyramidHorKernel::configure(const ICLTensor *input, ICLTensor *output)
@@ -130,7 +130,7 @@ CLGaussianPyramidVertKernel::CLGaussianPyramidVertKernel()
BorderSize CLGaussianPyramidVertKernel::border_size() const
{
- return BorderSize(2, 0);
+ return BorderSize{ 2, 0 };
}
void CLGaussianPyramidVertKernel::configure(const ICLTensor *input, ICLTensor *output)
diff --git a/src/core/CL/kernels/CLMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLMinMaxLayerKernel.cpp
index fa7b678e86..92b5f8d505 100644
--- a/src/core/CL/kernels/CLMinMaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLMinMaxLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -127,7 +127,7 @@ void CLMinMaxLayerKernel::reset(cl::CommandQueue &queue)
Iterator output(_output, window_output);
// Reset output
- execute_window_loop(window_output, [&](const Coordinates & id)
+ execute_window_loop(window_output, [&](const Coordinates &)
{
auto *ptr = reinterpret_cast<float *>(output.ptr());
ptr[0] = std::numeric_limits<float>::max();
diff --git a/src/core/CPP/kernels/CPPUpsampleKernel.cpp b/src/core/CPP/kernels/CPPUpsampleKernel.cpp
index f04728d30d..d29c0f72f1 100644
--- a/src/core/CPP/kernels/CPPUpsampleKernel.cpp
+++ b/src/core/CPP/kernels/CPPUpsampleKernel.cpp
@@ -94,7 +94,7 @@ void CPPUpsampleKernel::run(const Window &window, const ThreadInfo &info)
Iterator in(_input, window);
Iterator out(_output, window_out);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
memcpy(out.ptr(), in.ptr(), element_size);
},
diff --git a/src/core/Error.cpp b/src/core/Error.cpp
index e7b43655a2..45cce66804 100644
--- a/src/core/Error.cpp
+++ b/src/core/Error.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,6 +23,7 @@
*/
#include "arm_compute/core/Error.h"
+#include <array>
#include <cstdarg>
#include <cstdio>
#include <iostream>
@@ -32,11 +33,11 @@ using namespace arm_compute;
Status arm_compute::create_error_va_list(ErrorCode error_code, const char *function, const char *file, const int line, const char *msg, va_list args)
{
- char out[512];
- int offset = snprintf(out, sizeof(out), "in %s %s:%d: ", function, file, line);
- vsnprintf(out + offset, sizeof(out) - offset, msg, args);
+ std::array<char, 512> out{ 0 };
+ int offset = snprintf(out.data(), out.size(), "in %s %s:%d: ", function, file, line);
+ vsnprintf(out.data() + offset, out.size() - offset, msg, args);
- return Status(error_code, std::string(out));
+ return Status(error_code, std::string(out.data()));
}
Status arm_compute::create_error(ErrorCode error_code, const char *function, const char *file, const int line, const char *msg, ...)
diff --git a/src/core/GLES_COMPUTE/GCKernelLibrary.cpp b/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
index 25ac02e8f4..0af8c7d4cc 100644
--- a/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
+++ b/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -387,7 +387,7 @@ GCKernel GCKernelLibrary::create_kernel(const std::string &shader_name, const St
return kernel;
}
-const std::string GCKernelLibrary::preprocess_shader(const std::string &shader_source) const
+std::string GCKernelLibrary::preprocess_shader(const std::string &shader_source) const
{
enum class ParserStage
{
@@ -399,7 +399,7 @@ const std::string GCKernelLibrary::preprocess_shader(const std::string &shader_s
// Define a GLES compute shader parser function
std::function<std::string(const std::string &, ParserStage, int)> cs_parser;
- cs_parser = [&](const std::string & src, ParserStage stage, int nested_level) -> std::string
+ cs_parser = [&](const std::string & src, ParserStage stage, int) -> std::string
{
std::string dst;
diff --git a/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp
index f225ebde6b..50171a1015 100644
--- a/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCPoolingLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -333,7 +333,10 @@ void GCPoolingLayerKernel::run(const Window &window)
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
- unsigned int pool_pad_x, pool_pad_y, pool_stride_x, pool_stride_y = 0;
+ unsigned int pool_pad_x;
+ unsigned int pool_pad_y;
+ unsigned int pool_stride_x;
+ unsigned int pool_stride_y;
std::tie(pool_pad_x, pool_pad_y) = _pool_info.pad_stride_info().pad();
std::tie(pool_stride_x, pool_stride_y) = _pool_info.pad_stride_info().stride();
diff --git a/src/core/NEON/kernels/NECropKernel.cpp b/src/core/NEON/kernels/NECropKernel.cpp
index b6fe5819e4..f16eb3e6bd 100644
--- a/src/core/NEON/kernels/NECropKernel.cpp
+++ b/src/core/NEON/kernels/NECropKernel.cpp
@@ -178,7 +178,7 @@ inline void out_of_bounds_crop_window(const ITensor *output, float *output_ptr,
template <bool is_height_flipped, bool has_cols_in_bounds, bool has_cols_out_of_bounds_before, bool has_cols_out_of_bounds_after>
inline void execute_window(const ITensor *input, const ITensor *output, Coordinates input_offset, float extrapolation_value,
- const uint32_t rows_out_of_bounds[], const uint32_t cols_out_of_bounds[], NECropKernel::InBoundsCropFunction *in_bounds_crop_function)
+ const std::array<uint32_t, 2> &rows_out_of_bounds, const std::array<uint32_t, 2> &cols_out_of_bounds, NECropKernel::InBoundsCropFunction *in_bounds_crop_function)
{
// Output is always float.
const int window_step_x = 16 / sizeof(float);
diff --git a/src/core/NEON/kernels/NEDilateKernel.cpp b/src/core/NEON/kernels/NEDilateKernel.cpp
index 3ee00a47d3..e761815f9e 100644
--- a/src/core/NEON/kernels/NEDilateKernel.cpp
+++ b/src/core/NEON/kernels/NEDilateKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -94,7 +94,8 @@ void NEDilateKernel::run(const Window &window, const ThreadInfo &info)
uint8x8_t bot_high_data = vget_high_u8(bot_data);
uint8x8_t bot_low_data = vget_low_u8(bot_data);
- uint8x8_t p0, p1;
+ uint8x8_t p0;
+ uint8x8_t p1;
p0 = top_low_data;
p1 = vext_u8(top_low_data, top_high_data, 1);
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
index 162c4b1ace..d557cfa1bd 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -192,12 +192,12 @@ public:
execute_window_loop(window_out, [&](const Coordinates & id)
{
- const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y;
- uint8_t *out_ptr = out.ptr();
- int ih = 0;
- int oh = 0;
- float32x4_t accum0[small_tensor_size_optim] = { vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0) };
- float32x4_t accum1[small_tensor_size_optim] = { vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0) };
+ const uint8_t *input_ptr = in.ptr() - conv_pad_left * input_stride_x - conv_pad_top * input_stride_y;
+ uint8_t *out_ptr = out.ptr();
+ int ih = 0;
+ int oh = 0;
+ std::array<float32x4_t, 8> accum0 = { vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0) };
+ std::array<float32x4_t, 8> accum1 = { vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0), vdupq_n_f32(0) };
for(int oz = 0; oz < range_z; ++oz)
{
accum0[0] = accum0[1] = accum0[2] = accum0[3] = accum0[4] = accum0[5] = accum0[6] = accum0[7] = vdupq_n_f32(0.f);
diff --git a/src/core/NEON/kernels/NEErodeKernel.cpp b/src/core/NEON/kernels/NEErodeKernel.cpp
index 88c20f8174..2a538ecd0f 100644
--- a/src/core/NEON/kernels/NEErodeKernel.cpp
+++ b/src/core/NEON/kernels/NEErodeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -94,7 +94,8 @@ void NEErodeKernel::run(const Window &window, const ThreadInfo &info)
uint8x8_t bot_high_data = vget_high_u8(bot_data);
uint8x8_t bot_low_data = vget_low_u8(bot_data);
- uint8x8_t p0, p1;
+ uint8x8_t p0;
+ uint8x8_t p1;
p0 = top_low_data;
p1 = vext_u8(top_low_data, top_high_data, 1);
diff --git a/src/core/NEON/kernels/NEFillBorderKernel.cpp b/src/core/NEON/kernels/NEFillBorderKernel.cpp
index f4046e0851..4127dc8fbd 100644
--- a/src/core/NEON/kernels/NEFillBorderKernel.cpp
+++ b/src/core/NEON/kernels/NEFillBorderKernel.cpp
@@ -168,7 +168,7 @@ void NEFillBorderKernel::fill_replicate_single_channel(const Window &window)
Iterator vertical_it(_tensor, vertical);
- execute_window_loop(vertical, [&](const Coordinates & id)
+ execute_window_loop(vertical, [&](const Coordinates &)
{
uint8_t *base_addr = start_valid_region + vertical_it.offset();
// Fill left and right borders
@@ -188,7 +188,7 @@ void NEFillBorderKernel::fill_replicate_single_channel(const Window &window)
Iterator plane_it(_tensor, window);
// Iterate over all XY planes
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
uint8_t *base_addr = start_valid_region + plane_it.offset();
// Top border
@@ -224,7 +224,7 @@ void NEFillBorderKernel::fill_constant_value_single_channel(const Window &window
Iterator vertical_it(_tensor, vertical);
- execute_window_loop(vertical, [&](const Coordinates & id)
+ execute_window_loop(vertical, [&](const Coordinates &)
{
uint8_t *base_addr = start_valid_region + vertical_it.offset();
// Fill left and right borders
@@ -244,7 +244,7 @@ void NEFillBorderKernel::fill_constant_value_single_channel(const Window &window
Iterator plane_it(_tensor, window);
// Iterate over all XY planes
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
uint8_t *base_addr = start_valid_region + plane_it.offset();
// Top border
diff --git a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
index 7769d9eb8c..c929983162 100644
--- a/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.cpp
@@ -126,7 +126,7 @@ void gemm_interleave_16bit_elements(const ITensor *input, ITensor *output, const
win_out.set_dimension_step(Window::DimX, 16);
Iterator out(output, win_out);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint16x4x4_t data =
{
@@ -154,7 +154,7 @@ void gemm_interleave_32bit_elements(const ITensor *input, ITensor *output, const
win_out.set_dimension_step(Window::DimX, 16);
Iterator out(output, win_out);
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const uint32x4x4_t data =
{
diff --git a/src/core/NEON/kernels/NELKTrackerKernel.cpp b/src/core/NEON/kernels/NELKTrackerKernel.cpp
index 83593e7f0d..ddf869e303 100644
--- a/src/core/NEON/kernels/NELKTrackerKernel.cpp
+++ b/src/core/NEON/kernels/NELKTrackerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -405,9 +405,9 @@ void NELKTrackerKernel::run(const Window &window, const ThreadInfo &info)
init_keypoints(list_start, list_end);
- const int buffer_size = _window_dimension * _window_dimension;
- int32_t bilinear_ix[buffer_size];
- int32_t bilinear_iy[buffer_size];
+ const int buffer_size = _window_dimension * _window_dimension;
+ std::vector<int32_t> bilinear_ix(buffer_size);
+ std::vector<int32_t> bilinear_iy(buffer_size);
const int half_window = _window_dimension / 2;
@@ -444,7 +444,7 @@ void NELKTrackerKernel::run(const Window &window, const ThreadInfo &info)
int iA12 = 0;
int iA22 = 0;
- std::tie(iA11, iA12, iA22) = compute_spatial_gradient_matrix(old_keypoint, bilinear_ix, bilinear_iy);
+ std::tie(iA11, iA12, iA22) = compute_spatial_gradient_matrix(old_keypoint, bilinear_ix.data(), bilinear_iy.data());
const float A11 = iA11 * FLT_SCALE;
const float A12 = iA12 * FLT_SCALE;
@@ -490,7 +490,7 @@ void NELKTrackerKernel::run(const Window &window, const ThreadInfo &info)
int ib1 = 0;
int ib2 = 0;
- std::tie(ib1, ib2) = compute_image_mismatch_vector(old_keypoint, new_keypoint, bilinear_ix, bilinear_iy);
+ std::tie(ib1, ib2) = compute_image_mismatch_vector(old_keypoint, new_keypoint, bilinear_ix.data(), bilinear_iy.data());
double b1 = ib1 * FLT_SCALE;
double b2 = ib2 * FLT_SCALE;
diff --git a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
index 0b90d9f290..ac2ffa1988 100644
--- a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
@@ -352,7 +352,8 @@ void NEPoolingLayerKernel::configure(const ITensor *input, ITensor *output, cons
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_pool_info(pool_size.x(), pool_size.y()));
// Check output dimensions
- unsigned int pooled_w, pooled_h;
+ unsigned int pooled_w;
+ unsigned int pooled_h;
std::tie(pooled_w, pooled_h) = scaled_dimensions(input->info()->dimension(idx_width),
input->info()->dimension(idx_height),
pool_size.x(),
diff --git a/src/core/NEON/kernels/NERemapKernel.cpp b/src/core/NEON/kernels/NERemapKernel.cpp
index edb3ffe1df..3c871de73a 100644
--- a/src/core/NEON/kernels/NERemapKernel.cpp
+++ b/src/core/NEON/kernels/NERemapKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -113,8 +113,8 @@ void NERemapKernel::configure(const ITensor *input, const ITensor *map_x, const
AccessWindowStatic input_access(input->info(), -border_size().left, -border_size().top, access_right, input->info()->dimension(1) + border_size().bottom);
AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal mapx_access(map_x->info(), 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal mapy_access(map_y->info(), 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal mapx_access(map_x->info(), 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal mapy_access(map_y->info(), 0, num_elems_processed_per_iteration);
update_window_and_padding(win, input_access, mapx_access, mapy_access, output_access);
@@ -140,7 +140,7 @@ void NERemapKernel::remap_nearest(const Window &window)
const float32x4_t height = vdupq_n_f32(static_cast<float>(_input->info()->dimension(1)));
const int32x4_t in_stride = vdupq_n_s32(static_cast<int32_t>(_input->info()->strides_in_bytes()[1]));
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const auto mapx_ptr = reinterpret_cast<const float *>(mapx.ptr());
const auto mapy_ptr = reinterpret_cast<const float *>(mapy.ptr());
@@ -190,7 +190,7 @@ void NERemapKernel::remap_bilinear(const Window &window)
const size_t height = _input->info()->dimension(1);
const size_t in_stride = _input->info()->strides_in_bytes()[1];
- execute_window_loop(window, [&](const Coordinates & id)
+ execute_window_loop(window, [&](const Coordinates &)
{
const auto mapx_ptr = reinterpret_cast<float *>(mapx.ptr());
const auto mapy_ptr = reinterpret_cast<float *>(mapy.ptr());
diff --git a/src/core/NEON/kernels/NEScaleKernel.cpp b/src/core/NEON/kernels/NEScaleKernel.cpp
index 8c4a70cdad..33540393e4 100644
--- a/src/core/NEON/kernels/NEScaleKernel.cpp
+++ b/src/core/NEON/kernels/NEScaleKernel.cpp
@@ -249,7 +249,10 @@ inline void scale_bilinear_nhwc_core(const ITensor *input, const ITensor *offset
if(is_valid(offset, -border_size, input_width - 1 + border_size, in_yi, -border_size, input_height - 1 + border_size))
{
- T a00 = 0, a01 = 0, a10 = 0, a11 = 0;
+ T a00 = 0;
+ T a01 = 0;
+ T a10 = 0;
+ T a11 = 0;
if(border_mode == BorderMode::CONSTANT)
{
diff --git a/src/core/utils/helpers/tensor_transform.cpp b/src/core/utils/helpers/tensor_transform.cpp
index 7c56390fed..f6a54a59ee 100644
--- a/src/core/utils/helpers/tensor_transform.cpp
+++ b/src/core/utils/helpers/tensor_transform.cpp
@@ -117,6 +117,7 @@ std::tuple<Coordinates, Coordinates, Coordinates> calculate_strided_slice_coords
Coordinates starts_abs{};
Coordinates ends_abs{};
Coordinates final_strides{};
+
for(unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
{
const int start_i = calculate_start_on_index(input_shape, i, starts, strides, begin_mask);
diff --git a/src/core/utils/logging/LoggerRegistry.cpp b/src/core/utils/logging/LoggerRegistry.cpp
index 3a466963fd..055e770c75 100644
--- a/src/core/utils/logging/LoggerRegistry.cpp
+++ b/src/core/utils/logging/LoggerRegistry.cpp
@@ -42,12 +42,12 @@ LoggerRegistry &LoggerRegistry::get()
return _instance;
}
-void LoggerRegistry::create_logger(const std::string &name, LogLevel log_level, std::vector<std::shared_ptr<Printer>> printers)
+void LoggerRegistry::create_logger(const std::string &name, LogLevel log_level, const std::vector<std::shared_ptr<Printer>> &printers)
{
std::lock_guard<arm_compute::Mutex> lock(_mtx);
if((_loggers.find(name) == _loggers.end()) && (_reserved_loggers.find(name) == _reserved_loggers.end()))
{
- _loggers[name] = std::make_shared<Logger>(name, log_level, std::move(printers));
+ _loggers[name] = std::make_shared<Logger>(name, log_level, printers);
}
}
diff --git a/src/runtime/BlobLifetimeManager.cpp b/src/runtime/BlobLifetimeManager.cpp
index c5d42b1be1..1323bb3f8c 100644
--- a/src/runtime/BlobLifetimeManager.cpp
+++ b/src/runtime/BlobLifetimeManager.cpp
@@ -66,7 +66,7 @@ void BlobLifetimeManager::update_blobs_and_mappings()
std::vector<BlobInfo> group_sizes;
std::transform(std::begin(_free_blobs), std::end(_free_blobs), std::back_inserter(group_sizes), [](const Blob & b)
{
- return BlobInfo(b.max_size, b.max_alignment);
+ return BlobInfo{ b.max_size, b.max_alignment };
});
// Update blob sizes
@@ -75,7 +75,7 @@ void BlobLifetimeManager::update_blobs_and_mappings()
group_sizes.resize(max_size);
std::transform(std::begin(_blobs), std::end(_blobs), std::begin(group_sizes), std::begin(_blobs), [](BlobInfo lhs, BlobInfo rhs)
{
- return BlobInfo(std::max(lhs.size, rhs.size), std::max(lhs.alignment, rhs.alignment));
+ return BlobInfo{ std::max(lhs.size, rhs.size), std::max(lhs.alignment, rhs.alignment) };
});
// Calculate group mappings
diff --git a/src/runtime/CL/CLHelpers.cpp b/src/runtime/CL/CLHelpers.cpp
index 533e6fabfa..8bc7b8eb7b 100644
--- a/src/runtime/CL/CLHelpers.cpp
+++ b/src/runtime/CL/CLHelpers.cpp
@@ -47,7 +47,7 @@ void printf_callback(const char *buffer, unsigned int len, size_t complete, void
* @return A pointer to the context properties which can be used to create an opencl context
*/
-void initialise_context_properties(const cl::Platform &platform, const cl::Device &device, cl_context_properties prop[7])
+void initialise_context_properties(const cl::Platform &platform, const cl::Device &device, std::array<cl_context_properties, 7> &prop)
{
ARM_COMPUTE_UNUSED(device);
#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
@@ -55,7 +55,7 @@ void initialise_context_properties(const cl::Platform &platform, const cl::Devic
if(arm_compute::device_supports_extension(device, "cl_arm_printf"))
{
// Create a cl_context with a printf_callback and user specified buffer size.
- cl_context_properties properties_printf[] =
+ std::array<cl_context_properties, 7> properties_printf =
{
CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platform()),
// Enable a printf callback function for this context.
@@ -65,17 +65,17 @@ void initialise_context_properties(const cl::Platform &platform, const cl::Devic
CL_PRINTF_BUFFERSIZE_ARM, 0x1000,
0
};
- std::copy_n(properties_printf, 7, prop);
+ prop = properties_printf;
}
else
#endif // defined(ARM_COMPUTE_ASSERTS_ENABLED)
{
- cl_context_properties properties[] =
+ std::array<cl_context_properties, 3> properties =
{
CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platform()),
0
};
- std::copy_n(properties, 3, prop);
+ std::copy(properties.begin(), properties.end(), prop.begin());
};
}
} //namespace
@@ -94,11 +94,11 @@ create_opencl_context_and_device()
std::vector<cl::Device> platform_devices;
p.getDevices(CL_DEVICE_TYPE_DEFAULT, &platform_devices);
ARM_COMPUTE_ERROR_ON_MSG(platform_devices.size() == 0, "Couldn't find any OpenCL device");
- device = platform_devices[0];
- cl_int err = CL_SUCCESS;
- cl_context_properties properties[7] = { 0, 0, 0, 0, 0, 0, 0 };
+ device = platform_devices[0];
+ cl_int err = CL_SUCCESS;
+ std::array<cl_context_properties, 7> properties = { 0, 0, 0, 0, 0, 0, 0 };
initialise_context_properties(p, device, properties);
- cl::Context cl_context = cl::Context(device, properties, nullptr, nullptr, &err);
+ cl::Context cl_context = cl::Context(device, properties.data(), nullptr, nullptr, &err);
ARM_COMPUTE_ERROR_ON_MSG(err != CL_SUCCESS, "Failed to create OpenCL context");
return std::make_tuple(cl_context, device, err);
}
diff --git a/src/runtime/CL/CLMemory.cpp b/src/runtime/CL/CLMemory.cpp
index 5bea85cfae..557378b6f1 100644
--- a/src/runtime/CL/CLMemory.cpp
+++ b/src/runtime/CL/CLMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,8 +33,8 @@ CLMemory::CLMemory()
{
}
-CLMemory::CLMemory(std::shared_ptr<ICLMemoryRegion> memory)
- : _region(nullptr), _region_owned(std::move(memory))
+CLMemory::CLMemory(const std::shared_ptr<ICLMemoryRegion> &memory)
+ : _region(nullptr), _region_owned(memory)
{
_region_owned = memory;
_region = _region_owned.get();
diff --git a/src/runtime/CL/CLMultiHOG.cpp b/src/runtime/CL/CLMultiHOG.cpp
index 88d45acd12..2577ec08ac 100644
--- a/src/runtime/CL/CLMultiHOG.cpp
+++ b/src/runtime/CL/CLMultiHOG.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,8 +30,9 @@
using namespace arm_compute;
CLMultiHOG::CLMultiHOG(size_t num_models)
- : _num_models(num_models), _model(arm_compute::support::cpp14::make_unique<CLHOG[]>(_num_models))
+ : _num_models(num_models), _model()
{
+ _model.resize(_num_models);
}
size_t CLMultiHOG::num_models() const
@@ -42,11 +43,11 @@ size_t CLMultiHOG::num_models() const
ICLHOG *CLMultiHOG::cl_model(size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
}
const ICLHOG *CLMultiHOG::cl_model(size_t index) const
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
} \ No newline at end of file
diff --git a/src/runtime/CL/CLPyramid.cpp b/src/runtime/CL/CLPyramid.cpp
index 865f389f7f..6d5dba0031 100644
--- a/src/runtime/CL/CLPyramid.cpp
+++ b/src/runtime/CL/CLPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,7 +35,7 @@
using namespace arm_compute;
CLPyramid::CLPyramid()
- : _info(), _pyramid(nullptr)
+ : _info(), _pyramid()
{
}
@@ -51,8 +51,8 @@ void CLPyramid::init_auto_padding(const PyramidInfo &info)
void CLPyramid::internal_init(const PyramidInfo &info, bool auto_padding)
{
- _info = info;
- _pyramid = arm_compute::support::cpp14::make_unique<CLTensor[]>(_info.num_levels());
+ _info = info;
+ _pyramid.resize(_info.num_levels());
size_t w = _info.width();
size_t h = _info.height();
@@ -109,11 +109,9 @@ void CLPyramid::internal_init(const PyramidInfo &info, bool auto_padding)
void CLPyramid::allocate()
{
- ARM_COMPUTE_ERROR_ON(_pyramid == nullptr);
-
for(size_t i = 0; i < _info.num_levels(); ++i)
{
- (_pyramid.get() + i)->allocator()->allocate();
+ _pyramid[i].allocator()->allocate();
}
}
@@ -126,5 +124,5 @@ CLTensor *CLPyramid::get_pyramid_level(size_t index) const
{
ARM_COMPUTE_ERROR_ON(index >= _info.num_levels());
- return (_pyramid.get() + index);
+ return &_pyramid[index];
}
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index 2ce64551ae..101e4f1cd4 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -34,7 +34,7 @@ const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer();
namespace
{
-std::unique_ptr<ICLMemoryRegion> allocate_region(cl::Context context, size_t size, cl_uint alignment)
+std::unique_ptr<ICLMemoryRegion> allocate_region(const cl::Context &context, size_t size, cl_uint alignment)
{
// Try fine-grain SVM
std::unique_ptr<ICLMemoryRegion> region = support::cpp14::make_unique<CLFineSVMMemoryRegion>(context,
diff --git a/src/runtime/CL/CLTuner.cpp b/src/runtime/CL/CLTuner.cpp
index 8f8d3e7c3a..929def24cc 100644
--- a/src/runtime/CL/CLTuner.cpp
+++ b/src/runtime/CL/CLTuner.cpp
@@ -275,7 +275,7 @@ void CLTuner::save_to_file(const std::string &filename) const
std::ofstream fs;
fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
fs.open(filename, std::ios::out);
- for(auto kernel_data : _lws_table)
+ for(auto const &kernel_data : _lws_table)
{
fs << kernel_data.first << ";" << kernel_data.second[0] << ";" << kernel_data.second[1] << ";" << kernel_data.second[2] << std::endl;
}
diff --git a/src/runtime/CL/functions/CLConvolution.cpp b/src/runtime/CL/functions/CLConvolution.cpp
index 2f43ce1974..f09585e7ec 100644
--- a/src/runtime/CL/functions/CLConvolution.cpp
+++ b/src/runtime/CL/functions/CLConvolution.cpp
@@ -58,13 +58,13 @@ void CLConvolutionSquare<matrix_size>::configure(ICLTensor *input, ICLTensor *ou
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(conv == nullptr);
- int16_t conv_col[matrix_size];
- int16_t conv_row[matrix_size];
- _is_separable = separate_matrix(conv, conv_col, conv_row, matrix_size);
+ std::array<int16_t, matrix_size> conv_col{ 0 };
+ std::array<int16_t, matrix_size> conv_row{ 0 };
+ _is_separable = separate_matrix(conv, conv_col.data(), conv_row.data(), matrix_size);
if(_is_separable)
{
- std::pair<DataType, DataType> type_pair = data_type_for_convolution(conv_col, conv_row, matrix_size);
+ std::pair<DataType, DataType> type_pair = data_type_for_convolution(conv_col.data(), conv_row.data(), matrix_size);
_tmp.allocator()->init(TensorInfo(input->info()->tensor_shape(), 1, type_pair.first));
// Manage intermediate buffers
@@ -75,8 +75,8 @@ void CLConvolutionSquare<matrix_size>::configure(ICLTensor *input, ICLTensor *ou
scale = calculate_matrix_scale(conv, matrix_size);
}
- _kernel_hor.configure(input, &_tmp, conv_row, border_mode == BorderMode::UNDEFINED);
- _kernel_vert.configure(&_tmp, output, conv_col, scale, border_mode == BorderMode::UNDEFINED, type_pair.second);
+ _kernel_hor.configure(input, &_tmp, conv_row.data(), border_mode == BorderMode::UNDEFINED);
+ _kernel_vert.configure(&_tmp, output, conv_col.data(), scale, border_mode == BorderMode::UNDEFINED, type_pair.second);
_border_handler.configure(input, _kernel_hor.border_size(), border_mode, PixelValue(constant_border_value));
// Allocate intermediate buffer
diff --git a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
index 4a5f845631..f687e54552 100644
--- a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
@@ -36,8 +36,7 @@
using namespace arm_compute;
CLDepthConcatenateLayer::CLDepthConcatenateLayer() // NOLINT
- : _inputs_vector(),
- _concat_kernels_vector(),
+ : _concat_kernels_vector(),
_border_handlers_vector(),
_num_inputs(0)
{
@@ -53,8 +52,8 @@ void CLDepthConcatenateLayer::configure(const std::vector<ICLTensor *> &inputs_v
inputs_vector_info.emplace_back(inputs_vector.at(i)->info());
}
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<CLDepthConcatenateLayerKernel[]>(_num_inputs);
- _border_handlers_vector = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(_num_inputs);
+ _concat_kernels_vector.resize(_num_inputs);
+ _border_handlers_vector.resize(_num_inputs);
TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ);
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index 8211104bda..97b0a01331 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -322,7 +322,8 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w
const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_output_stage_kernel.configure(&_output_reshaped, biases, output, output_multiplier, output_shift, output_quant_info.offset);
_output_reshaped.allocator()->allocate();
diff --git a/src/runtime/CL/functions/CLFFT1D.cpp b/src/runtime/CL/functions/CLFFT1D.cpp
index 67111e7e5c..49b5a2a2e6 100644
--- a/src/runtime/CL/functions/CLFFT1D.cpp
+++ b/src/runtime/CL/functions/CLFFT1D.cpp
@@ -62,7 +62,7 @@ void CLFFT1D::configure(const ICLTensor *input, ICLTensor *output, const FFT1DIn
// Create and configure FFT kernels
unsigned int Nx = 1;
_num_ffts = decomposed_vector.size();
- _fft_kernels = arm_compute::support::cpp14::make_unique<CLFFTRadixStageKernel[]>(_num_ffts);
+ _fft_kernels.resize(_num_ffts);
for(unsigned int i = 0; i < _num_ffts; ++i)
{
const unsigned int radix_for_stage = decomposed_vector.at(i);
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 40ce6b4e0f..03d516f703 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -372,7 +372,9 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
const unsigned int kernel_width = weights->dimension(idx_width);
const unsigned int kernel_height = weights->dimension(idx_height);
- TensorInfo im2col_reshaped_info, info_gemm, weights_reshaped_info;
+ TensorInfo im2col_reshaped_info{};
+ TensorInfo info_gemm{};
+ TensorInfo weights_reshaped_info{};
const ITensorInfo *gemm_input_to_use = input;
const ITensorInfo *gemm_output_to_use = output;
const ITensorInfo *weights_to_use = weights;
diff --git a/src/runtime/CL/functions/CLGaussianPyramid.cpp b/src/runtime/CL/functions/CLGaussianPyramid.cpp
index fd82769004..b671b23c87 100644
--- a/src/runtime/CL/functions/CLGaussianPyramid.cpp
+++ b/src/runtime/CL/functions/CLGaussianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,10 +76,10 @@ void CLGaussianPyramidHalf::configure(ICLTensor *input, CLPyramid *pyramid, Bord
if(num_levels > 1)
{
- _horizontal_border_handler = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
- _vertical_border_handler = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
- _horizontal_reduction = arm_compute::support::cpp14::make_unique<CLGaussianPyramidHorKernel[]>(num_levels - 1);
- _vertical_reduction = arm_compute::support::cpp14::make_unique<CLGaussianPyramidVertKernel[]>(num_levels - 1);
+ _horizontal_border_handler.resize(num_levels - 1);
+ _vertical_border_handler.resize(num_levels - 1);
+ _horizontal_reduction.resize(num_levels - 1);
+ _vertical_reduction.resize(num_levels - 1);
// Apply half scale to the X dimension of the tensor shape
TensorShape tensor_shape = pyramid->info()->tensor_shape();
@@ -153,8 +153,8 @@ void CLGaussianPyramidOrb::configure(ICLTensor *input, CLPyramid *pyramid, Borde
if(num_levels > 1)
{
- _gauss5x5 = arm_compute::support::cpp14::make_unique<CLGaussian5x5[]>(num_levels - 1);
- _scale_nearest = arm_compute::support::cpp14::make_unique<CLScaleKernel[]>(num_levels - 1);
+ _gauss5x5.resize(num_levels - 1);
+ _scale_nearest.resize(num_levels - 1);
PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_ORB, pyramid->info()->tensor_shape(), Format::U8);
diff --git a/src/runtime/CL/functions/CLHOGMultiDetection.cpp b/src/runtime/CL/functions/CLHOGMultiDetection.cpp
index 0865f50fd5..f799d61b16 100644
--- a/src/runtime/CL/functions/CLHOGMultiDetection.cpp
+++ b/src/runtime/CL/functions/CLHOGMultiDetection.cpp
@@ -128,12 +128,11 @@ void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_h
_num_block_norm_kernel = input_block_norm.size(); // Number of CLHOGBlockNormalizationKernel kernels to compute
_num_hog_detect_kernel = input_hog_detect.size(); // Number of CLHOGDetector functions to compute
- _orient_bin_kernel = arm_compute::support::cpp14::make_unique<CLHOGOrientationBinningKernel[]>(_num_orient_bin_kernel);
- _block_norm_kernel = arm_compute::support::cpp14::make_unique<CLHOGBlockNormalizationKernel[]>(_num_block_norm_kernel);
- _hog_detect_kernel = arm_compute::support::cpp14::make_unique<CLHOGDetector[]>(_num_hog_detect_kernel);
- _non_maxima_kernel = arm_compute::support::cpp14::make_unique<CPPDetectionWindowNonMaximaSuppressionKernel>();
- _hog_space = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_orient_bin_kernel);
- _hog_norm_space = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_block_norm_kernel);
+ _orient_bin_kernel.resize(_num_orient_bin_kernel);
+ _block_norm_kernel.resize(_num_block_norm_kernel);
+ _hog_detect_kernel.resize(_num_hog_detect_kernel);
+ _hog_space.resize(_num_orient_bin_kernel);
+ _hog_norm_space.resize(_num_block_norm_kernel);
// Allocate tensors for magnitude and phase
TensorInfo info_mag(shape_img, Format::S16);
@@ -172,10 +171,10 @@ void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_h
_hog_space[i].allocator()->init(info_space);
// Manage intermediate buffers
- _memory_group.manage(_hog_space.get() + i);
+ _memory_group.manage(&_hog_space[i]);
// Initialise orientation binning kernel
- _orient_bin_kernel[i].configure(&_mag, &_phase, _hog_space.get() + i, multi_hog->model(idx_multi_hog)->info());
+ _orient_bin_kernel[i].configure(&_mag, &_phase, &_hog_space[i], multi_hog->model(idx_multi_hog)->info());
}
// Allocate intermediate tensors
@@ -193,10 +192,10 @@ void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_h
_hog_norm_space[i].allocator()->init(tensor_info);
// Manage intermediate buffers
- _memory_group.manage(_hog_norm_space.get() + i);
+ _memory_group.manage(&_hog_norm_space[i]);
// Initialize block normalization kernel
- _block_norm_kernel[i].configure(_hog_space.get() + idx_orient_bin, _hog_norm_space.get() + i, multi_hog->model(idx_multi_hog)->info());
+ _block_norm_kernel[i].configure(&_hog_space[idx_orient_bin], &_hog_norm_space[i], multi_hog->model(idx_multi_hog)->info());
}
// Allocate intermediate tensors
@@ -212,13 +211,13 @@ void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_h
{
const size_t idx_block_norm = input_hog_detect[i];
- _hog_detect_kernel[i].configure(_hog_norm_space.get() + idx_block_norm, multi_hog->cl_model(i), detection_windows, detection_window_strides->at(i), threshold, i);
+ _hog_detect_kernel[i].configure(&_hog_norm_space[idx_block_norm], multi_hog->cl_model(i), detection_windows, detection_window_strides->at(i), threshold, i);
}
detection_window_strides->unmap(CLScheduler::get().queue());
// Configure non maxima suppression kernel
- _non_maxima_kernel->configure(_detection_windows, min_distance);
+ _non_maxima_kernel.configure(_detection_windows, min_distance);
// Allocate intermediate tensors
for(size_t i = 0; i < _num_block_norm_kernel; ++i)
@@ -242,13 +241,13 @@ void CLHOGMultiDetection::run()
// Run orientation binning kernel
for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
{
- CLScheduler::get().enqueue(*(_orient_bin_kernel.get() + i), false);
+ CLScheduler::get().enqueue(_orient_bin_kernel[i], false);
}
// Run block normalization kernel
for(size_t i = 0; i < _num_block_norm_kernel; ++i)
{
- CLScheduler::get().enqueue(*(_block_norm_kernel.get() + i), false);
+ CLScheduler::get().enqueue(_block_norm_kernel[i], false);
}
// Run HOG detector kernel
@@ -262,7 +261,7 @@ void CLHOGMultiDetection::run()
{
// Map detection windows array before computing non maxima suppression
_detection_windows->map(CLScheduler::get().queue(), true);
- Scheduler::get().schedule(_non_maxima_kernel.get(), Window::DimY);
+ Scheduler::get().schedule(&_non_maxima_kernel, Window::DimY);
_detection_windows->unmap(CLScheduler::get().queue());
}
}
diff --git a/src/runtime/CL/functions/CLHarrisCorners.cpp b/src/runtime/CL/functions/CLHarrisCorners.cpp
index 342d1cad49..67f550d318 100644
--- a/src/runtime/CL/functions/CLHarrisCorners.cpp
+++ b/src/runtime/CL/functions/CLHarrisCorners.cpp
@@ -55,7 +55,7 @@ CLHarrisCorners::CLHarrisCorners(std::shared_ptr<IMemoryManager> memory_manager)
_gy(),
_score(),
_nonmax(),
- _corners_list(nullptr),
+ _corners_list(),
_num_corner_candidates(0),
_corners(nullptr)
{
@@ -84,7 +84,7 @@ void CLHarrisCorners::configure(ICLImage *input, float threshold, float min_dist
_score.allocator()->init(info_f32);
_nonmax.allocator()->init(info_f32);
- _corners_list = arm_compute::support::cpp14::make_unique<InternalKeypoint[]>(shape.x() * shape.y());
+ _corners_list.resize(shape.x() * shape.y());
// Manage intermediate buffers
_memory_group.manage(&_gx);
@@ -146,13 +146,13 @@ void CLHarrisCorners::configure(ICLImage *input, float threshold, float min_dist
_score.allocator()->allocate();
// Init corner candidates kernel
- _candidates.configure(&_nonmax, _corners_list.get(), &_num_corner_candidates);
+ _candidates.configure(&_nonmax, _corners_list.data(), &_num_corner_candidates);
// Allocate intermediate buffers
_nonmax.allocator()->allocate();
// Init euclidean distance
- _sort_euclidean.configure(_corners_list.get(), _corners, &_num_corner_candidates, min_dist);
+ _sort_euclidean.configure(_corners_list.data(), _corners, &_num_corner_candidates, min_dist);
}
void CLHarrisCorners::run()
diff --git a/src/runtime/CL/functions/CLLaplacianPyramid.cpp b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
index 559b57fd8d..a11851898c 100644
--- a/src/runtime/CL/functions/CLLaplacianPyramid.cpp
+++ b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,8 +70,8 @@ void CLLaplacianPyramid::configure(ICLTensor *input, CLPyramid *pyramid, ICLTens
// Create Gaussian Pyramid function
_gaussian_pyr_function.configure(input, &_gauss_pyr, border_mode, constant_border_value);
- _convf = arm_compute::support::cpp14::make_unique<CLGaussian5x5[]>(_num_levels);
- _subf = arm_compute::support::cpp14::make_unique<CLArithmeticSubtraction[]>(_num_levels);
+ _convf.resize(_num_levels);
+ _subf.resize(_num_levels);
for(unsigned int i = 0; i < _num_levels; ++i)
{
diff --git a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
index 911c9b3b27..13116bf08d 100644
--- a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
+++ b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,8 +63,8 @@ void CLLaplacianReconstruct::configure(const CLPyramid *pyramid, ICLTensor *inpu
_tmp_pyr.init(pyramid_info);
// Allocate add and scale functions. Level 0 does not need to be scaled.
- _addf = arm_compute::support::cpp14::make_unique<CLArithmeticAddition[]>(num_levels);
- _scalef = arm_compute::support::cpp14::make_unique<CLScale[]>(num_levels - 1);
+ _addf.resize(num_levels);
+ _scalef.resize(num_levels - 1);
const size_t last_level = num_levels - 1;
@@ -85,7 +85,7 @@ void CLLaplacianReconstruct::configure(const CLPyramid *pyramid, ICLTensor *inpu
void CLLaplacianReconstruct::run()
{
- ARM_COMPUTE_ERROR_ON_MSG(_addf == nullptr, "Unconfigured function");
+ ARM_COMPUTE_ERROR_ON_MSG(_addf.empty(), "Unconfigured function");
const size_t last_level = _tmp_pyr.info()->num_levels() - 1;
diff --git a/src/runtime/CL/functions/CLOpticalFlow.cpp b/src/runtime/CL/functions/CLOpticalFlow.cpp
index 7ef1c83d04..a013a1fe19 100644
--- a/src/runtime/CL/functions/CLOpticalFlow.cpp
+++ b/src/runtime/CL/functions/CLOpticalFlow.cpp
@@ -84,12 +84,12 @@ void CLOpticalFlow::configure(const CLPyramid *old_pyramid, const CLPyramid *new
const int old_values_list_length = list_length * window_dimension * window_dimension;
// Create kernels and tensors
- _tracker_init_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerInitKernel[]>(_num_levels);
- _tracker_stage0_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerStage0Kernel[]>(_num_levels);
- _tracker_stage1_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerStage1Kernel[]>(_num_levels);
- _func_scharr = arm_compute::support::cpp14::make_unique<CLScharr3x3[]>(_num_levels);
- _scharr_gx = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_levels);
- _scharr_gy = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_levels);
+ _tracker_init_kernel.resize(_num_levels);
+ _tracker_stage0_kernel.resize(_num_levels);
+ _tracker_stage1_kernel.resize(_num_levels);
+ _func_scharr.resize(_num_levels);
+ _scharr_gx.resize(_num_levels);
+ _scharr_gy.resize(_num_levels);
// Create internal keypoint arrays
_old_points_internal = arm_compute::support::cpp14::make_unique<CLLKInternalKeypointArray>(list_length);
@@ -118,8 +118,8 @@ void CLOpticalFlow::configure(const CLPyramid *old_pyramid, const CLPyramid *new
_scharr_gy[i].allocator()->init(tensor_info);
// Manage intermediate buffers
- _memory_group.manage(_scharr_gx.get() + i);
- _memory_group.manage(_scharr_gy.get() + i);
+ _memory_group.manage(&_scharr_gx[i]);
+ _memory_group.manage(&_scharr_gy[i]);
// Init Scharr kernel
_func_scharr[i].configure(old_ith_input, &_scharr_gx[i], &_scharr_gy[i], border_mode, constant_border_value);
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
index dba7f23f3b..99e312183a 100644
--- a/src/runtime/CL/functions/CLPadLayer.cpp
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -31,7 +31,7 @@
namespace arm_compute
{
CLPadLayer::CLPadLayer()
- : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(nullptr), _concat_functions(nullptr), _slice_results(nullptr), _concat_results(nullptr)
+ : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
{
}
@@ -67,11 +67,16 @@ void CLPadLayer::configure_reflect_symmetric_mode(ICLTensor *input, ICLTensor *o
// Two strided slice functions will be required for each dimension padded as well as a
// concatenate function and the tensors to hold the temporary results.
- _slice_functions = arm_compute::support::cpp14::make_unique<CLStridedSlice[]>(2 * _num_dimensions);
- _slice_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(2 * _num_dimensions);
- _concat_functions = arm_compute::support::cpp14::make_unique<CLConcatenateLayer[]>(_num_dimensions);
- _concat_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_dimensions - 1);
- Coordinates starts_before, ends_before, starts_after, ends_after, strides;
+ _slice_functions.resize(2 * _num_dimensions);
+ _slice_results.resize(2 * _num_dimensions);
+ _concat_functions.resize(_num_dimensions);
+ _concat_results.resize(_num_dimensions - 1);
+
+ Coordinates starts_before{};
+ Coordinates ends_before{};
+ Coordinates starts_after{};
+ Coordinates ends_after{};
+ Coordinates strides{};
ICLTensor *prev = input;
for(uint32_t i = 0; i < _num_dimensions; ++i)
{
diff --git a/src/runtime/CL/functions/CLReduceMean.cpp b/src/runtime/CL/functions/CLReduceMean.cpp
index 702ce34a4d..15091f9066 100644
--- a/src/runtime/CL/functions/CLReduceMean.cpp
+++ b/src/runtime/CL/functions/CLReduceMean.cpp
@@ -40,10 +40,10 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
- _reduction_ops = reduction_axis.num_dimensions();
- _reduction_kernels = arm_compute::support::cpp14::make_unique<CLReductionOperation[]>(_reduction_ops);
- _reduced_outs = arm_compute::support::cpp14::make_unique<CLTensor[]>(_reduction_ops - (keep_dims ? 1 : 0));
- _keep_dims = keep_dims;
+ _reduction_ops = reduction_axis.num_dimensions();
+ _reduction_kernels.resize(_reduction_ops);
+ _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0));
+ _keep_dims = keep_dims;
Coordinates axis_local = reduction_axis;
const int input_dims = input->info()->num_dimensions();
@@ -57,9 +57,9 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
// Perform reduction for every axis
for(unsigned int i = 0; i < _reduction_ops; ++i)
{
- TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (_reduced_outs.get() + i - 1)->info()->tensor_shape();
+ TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
out_shape.set(axis_local[i], 1);
- auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1);
+ auto in = (i == 0) ? input : (&_reduced_outs[i - 1]);
if(i == _reduction_ops - 1 && keep_dims)
{
@@ -68,8 +68,8 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
else
{
_reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info()));
- _memory_group.manage(_reduced_outs.get() + i);
- _reduction_kernels[i].configure(in, _reduced_outs.get() + i, axis_local[i], ReductionOperation::MEAN_SUM);
+ _memory_group.manage(&_reduced_outs[i]);
+ _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i], ReductionOperation::MEAN_SUM);
}
}
@@ -92,7 +92,7 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
out_shape.remove_dimension(axis_local[i] - i);
}
auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape));
- _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output);
+ _reshape.configure(&_reduced_outs[_reduction_ops - 1], output);
}
}
diff --git a/src/runtime/CL/functions/CLReductionOperation.cpp b/src/runtime/CL/functions/CLReductionOperation.cpp
index bb285d7cc8..9f99d2db6f 100644
--- a/src/runtime/CL/functions/CLReductionOperation.cpp
+++ b/src/runtime/CL/functions/CLReductionOperation.cpp
@@ -71,7 +71,7 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf
else
{
// Create temporary tensor infos
- auto sums_vector = arm_compute::support::cpp14::make_unique<TensorInfo[]>(num_of_stages - 1);
+ std::vector<TensorInfo> sums_vector(num_of_stages - 1);
// Create intermediate tensor info
TensorShape shape{ input->tensor_shape() };
@@ -110,17 +110,17 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf
}
// Validate ReductionOperation only on first kernel
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, sums_vector.get(), axis, first_kernel_op));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, &sums_vector[0], axis, first_kernel_op));
// Validate ReductionOperation on intermediate stages
for(unsigned int i = 1; i < num_of_stages - 1; ++i)
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + i - 1, sums_vector.get() + i, axis, intermediate_kernel_op));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[i - 1], &sums_vector[i], axis, intermediate_kernel_op));
}
// Validate ReductionOperation on the last stage
const unsigned int last_stage = num_of_stages - 1;
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + last_stage - 1, output, axis, last_kernel_op, input->dimension(0)));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[last_stage - 1], output, axis, last_kernel_op, input->dimension(0)));
}
return Status{};
@@ -133,7 +133,7 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign
_is_serial = is_data_type_quantized(input->info()->data_type()) || axis != 0;
// Configure reduction operation kernels
- _reduction_kernels_vector = arm_compute::support::cpp14::make_unique<CLReductionOperationKernel[]>(_num_of_stages);
+ _reduction_kernels_vector.resize(_num_of_stages);
// Create temporary tensors
if(_is_serial)
@@ -142,8 +142,8 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign
}
else
{
- _border_handlers_vector = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(_num_of_stages);
- _results_vector = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_of_stages - 1);
+ _border_handlers_vector.resize(_num_of_stages);
+ _results_vector.resize(_num_of_stages - 1);
TensorShape shape{ input->info()->tensor_shape() };
for(unsigned int i = 0; i < _num_of_stages - 1; i++)
{
@@ -152,7 +152,7 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign
}
// Apply ReductionOperation only on first kernel
- _memory_group.manage(_results_vector.get());
+ _memory_group.manage(&_results_vector[0]);
ReductionOperation first_kernel_op;
ReductionOperation intermediate_kernel_op;
@@ -183,23 +183,23 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign
ARM_COMPUTE_ERROR("Not supported");
}
- _reduction_kernels_vector[0].configure(input, _results_vector.get(), axis, first_kernel_op);
+ _reduction_kernels_vector[0].configure(input, &_results_vector[0], axis, first_kernel_op);
_border_handlers_vector[0].configure(input, _reduction_kernels_vector[0].border_size(), BorderMode::CONSTANT, pixelValue);
// Apply ReductionOperation on intermediate stages
for(unsigned int i = 1; i < _num_of_stages - 1; ++i)
{
- _memory_group.manage(_results_vector.get() + i);
- _reduction_kernels_vector[i].configure(_results_vector.get() + i - 1, _results_vector.get() + i, axis, intermediate_kernel_op);
- _border_handlers_vector[i].configure(_results_vector.get() + i - 1, _reduction_kernels_vector[i].border_size(), BorderMode::CONSTANT, pixelValue);
+ _memory_group.manage(&_results_vector[i]);
+ _reduction_kernels_vector[i].configure(&_results_vector[i - 1], &_results_vector[i], axis, intermediate_kernel_op);
+ _border_handlers_vector[i].configure(&_results_vector[i - 1], _reduction_kernels_vector[i].border_size(), BorderMode::CONSTANT, pixelValue);
_results_vector[i - 1].allocator()->allocate();
}
// Apply ReductionOperation on the last stage
const unsigned int last_stage = _num_of_stages - 1;
const unsigned int input_width = input->info()->dimension(0);
- _reduction_kernels_vector[last_stage].configure(_results_vector.get() + last_stage - 1, output, axis, last_kernel_op, input_width);
- _border_handlers_vector[last_stage].configure(_results_vector.get() + last_stage - 1, _reduction_kernels_vector[last_stage].border_size(), BorderMode::CONSTANT, pixelValue);
+ _reduction_kernels_vector[last_stage].configure(&_results_vector[last_stage - 1], output, axis, last_kernel_op, input_width);
+ _border_handlers_vector[last_stage].configure(&_results_vector[last_stage - 1], _reduction_kernels_vector[last_stage].border_size(), BorderMode::CONSTANT, pixelValue);
_results_vector[last_stage - 1].allocator()->allocate();
}
}
diff --git a/src/runtime/CL/functions/CLSplit.cpp b/src/runtime/CL/functions/CLSplit.cpp
index f0843517e7..8d37d538c8 100644
--- a/src/runtime/CL/functions/CLSplit.cpp
+++ b/src/runtime/CL/functions/CLSplit.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,8 +42,8 @@ CLSplit::CLSplit()
void CLSplit::configure(const ICLTensor *input, const std::vector<ICLTensor *> &outputs, unsigned int axis)
{
// Create Slice functions
- _num_outputs = outputs.size();
- _slice_functions = arm_compute::support::cpp14::make_unique<CLSlice[]>(_num_outputs);
+ _num_outputs = outputs.size();
+ _slice_functions.resize(_num_outputs);
// Get output shape
const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs);
diff --git a/src/runtime/CL/functions/CLStackLayer.cpp b/src/runtime/CL/functions/CLStackLayer.cpp
index 71327fead4..2700b49272 100644
--- a/src/runtime/CL/functions/CLStackLayer.cpp
+++ b/src/runtime/CL/functions/CLStackLayer.cpp
@@ -46,8 +46,8 @@ CLStackLayer::CLStackLayer() // NOLINT
void CLStackLayer::configure(const std::vector<ICLTensor *> &input, int axis, ICLTensor *output)
{
- _num_inputs = input.size();
- _stack_kernels = arm_compute::support::cpp14::make_unique<CLStackLayerKernel[]>(_num_inputs);
+ _num_inputs = input.size();
+ _stack_kernels.resize(_num_inputs);
// Wrap around negative values
const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->info()->num_dimensions() + 1));
diff --git a/src/runtime/CL/functions/CLUnstack.cpp b/src/runtime/CL/functions/CLUnstack.cpp
index 428d09148b..eb1dd8cd44 100644
--- a/src/runtime/CL/functions/CLUnstack.cpp
+++ b/src/runtime/CL/functions/CLUnstack.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -74,7 +74,7 @@ void CLUnstack::configure(const ICLTensor *input, const std::vector<ICLTensor *>
// Wrap around negative values
const unsigned int axis_u = wrap_axis(axis, input->info());
_num_slices = std::min(outputs_vector_info.size(), input->info()->dimension(axis_u));
- _strided_slice_vector = arm_compute::support::cpp14::make_unique<CLStridedSlice[]>(_num_slices);
+ _strided_slice_vector.resize(_num_slices);
Coordinates slice_start;
int32_t slice_end_mask;
diff --git a/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp b/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
index 6e42377a07..a8667c3138 100644
--- a/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
@@ -109,7 +109,7 @@ void CLWidthConcatenateLayer::configure(std::vector<ICLTensor *> inputs_vector,
break;
default:
// Configure generic case WidthConcatenate kernels
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<CLWidthConcatenateLayerKernel[]>(_num_inputs);
+ _concat_kernels_vector.resize(_num_inputs);
unsigned int width_offset = 0;
for(unsigned int i = 0; i < _num_inputs; ++i)
diff --git a/src/runtime/CL/tuners/CLLWSList.cpp b/src/runtime/CL/tuners/CLLWSList.cpp
index 97134b1b2c..6eb251420c 100644
--- a/src/runtime/CL/tuners/CLLWSList.cpp
+++ b/src/runtime/CL/tuners/CLLWSList.cpp
@@ -36,7 +36,7 @@ cl::NDRange CLLWSListExhaustive::operator[](size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= size());
auto coords = index2coords(search_space_shape, index);
- return cl::NDRange(coords[0] + 1, coords[1] + 1, coords[2] + 1);
+ return cl::NDRange{ coords[0] + 1U, coords[1] + 1U, coords[2] + 1U };
}
CLLWSListExhaustive::CLLWSListExhaustive(const cl::NDRange &gws)
@@ -49,7 +49,7 @@ cl::NDRange CLLWSListNormal::operator[](size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= size());
auto coords = index2coords(search_space_shape, index);
- return cl::NDRange(_lws_x[coords[0]], _lws_y[coords[1]], _lws_z[coords[2]]);
+ return cl::NDRange{ _lws_x[coords[0]], _lws_y[coords[1]], _lws_z[coords[2]] };
}
CLLWSListNormal::CLLWSListNormal(const cl::NDRange &gws)
diff --git a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
index 79e619cfd6..9a141cb73a 100644
--- a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
+++ b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
@@ -600,7 +600,7 @@ void CPPDetectionOutputLayer::run()
if(_info.keep_top_k() > -1 && num_det > _info.keep_top_k())
{
std::vector<std::pair<float, std::pair<int, int>>> score_index_pairs;
- for(auto it : indices)
+ for(auto const &it : indices)
{
const int label = it.first;
const std::vector<int> &label_indices = it.second;
@@ -614,7 +614,7 @@ void CPPDetectionOutputLayer::run()
for(auto idx : label_indices)
{
ARM_COMPUTE_ERROR_ON(idx > static_cast<int>(scores.size()));
- score_index_pairs.push_back(std::make_pair(scores[idx], std::make_pair(label, idx)));
+ score_index_pairs.emplace_back(std::make_pair(scores[idx], std::make_pair(label, idx)));
}
}
diff --git a/src/runtime/CPUUtils.cpp b/src/runtime/CPUUtils.cpp
index f3355a740b..f7240db99e 100644
--- a/src/runtime/CPUUtils.cpp
+++ b/src/runtime/CPUUtils.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,16 +54,16 @@
/* Make sure the bits we care about are defined, just in case asm/hwcap.h is
* out of date (or for bare metal mode) */
#ifndef HWCAP_ASIMDHP
-#define HWCAP_ASIMDHP (1 << 10)
-#endif /* HWCAP_ASIMDHP */
+#define HWCAP_ASIMDHP (1 << 10) // NOLINT
+#endif /* HWCAP_ASIMDHP */
#ifndef HWCAP_CPUID
-#define HWCAP_CPUID (1 << 11)
-#endif /* HWCAP_CPUID */
+#define HWCAP_CPUID (1 << 11) // NOLINT
+#endif /* HWCAP_CPUID */
#ifndef HWCAP_ASIMDDP
-#define HWCAP_ASIMDDP (1 << 20)
-#endif /* HWCAP_ASIMDDP */
+#define HWCAP_ASIMDDP (1 << 20) // NOLINT
+#endif /* HWCAP_ASIMDDP */
namespace
{
@@ -146,12 +146,12 @@ CPUModel midr_to_model(const unsigned int midr)
break;
}
}
- else if(implementer == 0x48) // HiSilicon CPUs
+ else if(implementer == 0x48)
{
// Only CPUs we have code paths for are detected. All other CPUs can be safely classed as "GENERIC"
switch(cpunum)
{
- case 0xd40: // A76 (Kirin 980)
+ case 0xd40: // A76
model = CPUModel::GENERIC_FP16_DOT;
break;
default:
@@ -220,8 +220,8 @@ void populate_models_cpuinfo(std::vector<CPUModel> &cpusv)
while(bool(getline(file, line)))
{
- regmatch_t match[2];
- ret_status = regexec(&proc_regex, line.c_str(), 2, match, 0);
+ std::array<regmatch_t, 2> match;
+ ret_status = regexec(&proc_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string id = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -244,7 +244,7 @@ void populate_models_cpuinfo(std::vector<CPUModel> &cpusv)
continue;
}
- ret_status = regexec(&imp_regex, line.c_str(), 2, match, 0);
+ ret_status = regexec(&imp_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string subexp = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -254,7 +254,7 @@ void populate_models_cpuinfo(std::vector<CPUModel> &cpusv)
continue;
}
- ret_status = regexec(&var_regex, line.c_str(), 2, match, 0);
+ ret_status = regexec(&var_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string subexp = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -264,7 +264,7 @@ void populate_models_cpuinfo(std::vector<CPUModel> &cpusv)
continue;
}
- ret_status = regexec(&part_regex, line.c_str(), 2, match, 0);
+ ret_status = regexec(&part_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string subexp = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -274,7 +274,7 @@ void populate_models_cpuinfo(std::vector<CPUModel> &cpusv)
continue;
}
- ret_status = regexec(&rev_regex, line.c_str(), 2, match, 0);
+ ret_status = regexec(&rev_regex, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string subexp = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
@@ -302,8 +302,7 @@ void populate_models_cpuinfo(std::vector<CPUModel> &cpusv)
int get_max_cpus()
{
- int max_cpus = 1;
-#if !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__))
+ int max_cpus = 1;
std::ifstream CPUspresent;
CPUspresent.open("/sys/devices/system/cpu/present", std::ios::in);
bool success = false;
@@ -341,7 +340,6 @@ int get_max_cpus()
{
max_cpus = std::thread::hardware_concurrency();
}
-#endif /* BARE_METAL */
return max_cpus;
}
#endif /* !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__)) */
@@ -427,8 +425,8 @@ unsigned int get_threads_hint()
std::string line;
while(bool(getline(cpuinfo, line)))
{
- regmatch_t match[2];
- ret_status = regexec(&cpu_part_rgx, line.c_str(), 2, match, 0);
+ std::array<regmatch_t, 2> match;
+ ret_status = regexec(&cpu_part_rgx, line.c_str(), 2, match.data(), 0);
if(ret_status == 0)
{
std::string cpu_part = line.substr(match[1].rm_so, (match[1].rm_eo - match[1].rm_so));
diff --git a/src/runtime/Distribution1D.cpp b/src/runtime/Distribution1D.cpp
index 3431834c48..9e6fce4e03 100644
--- a/src/runtime/Distribution1D.cpp
+++ b/src/runtime/Distribution1D.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,12 +31,11 @@
using namespace arm_compute;
Distribution1D::Distribution1D(size_t num_bins, int32_t offset, uint32_t range)
- : IDistribution1D(num_bins, offset, range), _data(arm_compute::support::cpp14::make_unique<uint32_t[]>(num_bins))
+ : IDistribution1D(num_bins, offset, range), _data(num_bins)
{
}
uint32_t *Distribution1D::buffer() const
{
- ARM_COMPUTE_ERROR_ON(nullptr == _data);
- return _data.get();
+ return _data.data();
}
diff --git a/src/runtime/GLES_COMPUTE/GCMemory.cpp b/src/runtime/GLES_COMPUTE/GCMemory.cpp
index fed4a158a3..f1457c4d6e 100644
--- a/src/runtime/GLES_COMPUTE/GCMemory.cpp
+++ b/src/runtime/GLES_COMPUTE/GCMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,8 +33,8 @@ GCMemory::GCMemory()
{
}
-GCMemory::GCMemory(std::shared_ptr<IGCMemoryRegion> memory)
- : _region(nullptr), _region_owned(std::move(memory))
+GCMemory::GCMemory(const std::shared_ptr<IGCMemoryRegion> &memory)
+ : _region(nullptr), _region_owned(memory)
{
_region_owned = memory;
_region = _region_owned.get();
diff --git a/src/runtime/GLES_COMPUTE/GCScheduler.cpp b/src/runtime/GLES_COMPUTE/GCScheduler.cpp
index f7812730fc..6a39e7c360 100644
--- a/src/runtime/GLES_COMPUTE/GCScheduler.cpp
+++ b/src/runtime/GLES_COMPUTE/GCScheduler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -97,7 +97,7 @@ void GCScheduler::setup_context()
ARM_COMPUTE_ERROR_ON_MSG((strstr(egl_extension_st, "EGL_KHR_surfaceless_context") == nullptr), "Failed to query EGL_KHR_surfaceless_context");
ARM_COMPUTE_UNUSED(egl_extension_st);
- const EGLint config_attribs[] =
+ const std::array<EGLint, 3> config_attribs =
{
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES3_BIT_KHR,
EGL_NONE
@@ -105,7 +105,7 @@ void GCScheduler::setup_context()
EGLConfig cfg;
EGLint count;
- res = eglChooseConfig(_display, config_attribs, &cfg, 1, &count);
+ res = eglChooseConfig(_display, config_attribs.data(), &cfg, 1, &count);
ARM_COMPUTE_ERROR_ON_MSG(res == EGL_FALSE, "Failed to choose config: 0x%x.", eglGetError());
ARM_COMPUTE_UNUSED(res);
@@ -114,7 +114,7 @@ void GCScheduler::setup_context()
ARM_COMPUTE_ERROR_ON_MSG(res == EGL_FALSE, "Failed to bind api: 0x%x.", eglGetError());
- const EGLint attribs[] =
+ const std::array<EGLint, 3> attribs =
{
EGL_CONTEXT_CLIENT_VERSION, 3,
EGL_NONE
@@ -122,7 +122,7 @@ void GCScheduler::setup_context()
_context = eglCreateContext(_display,
cfg,
EGL_NO_CONTEXT,
- attribs);
+ attribs.data());
ARM_COMPUTE_ERROR_ON_MSG(_context == EGL_NO_CONTEXT, "Failed to create context: 0x%x.", eglGetError());
ARM_COMPUTE_UNUSED(res);
diff --git a/src/runtime/HOG.cpp b/src/runtime/HOG.cpp
index 01640bb0ac..e9f38c4d20 100644
--- a/src/runtime/HOG.cpp
+++ b/src/runtime/HOG.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,20 +29,19 @@
using namespace arm_compute;
HOG::HOG()
- : IHOG(), _info(), _descriptor(nullptr)
+ : IHOG(), _info(), _descriptor()
{
}
void HOG::init(const HOGInfo &input)
{
- ARM_COMPUTE_ERROR_ON(nullptr != _descriptor);
- _info = input;
- _descriptor = arm_compute::support::cpp14::make_unique<float[]>(_info.descriptor_size());
+ _info = input;
+ _descriptor.resize(_info.descriptor_size());
}
float *HOG::descriptor() const
{
- return _descriptor.get();
+ return _descriptor.data();
}
const HOGInfo *HOG::info() const
diff --git a/src/runtime/LutAllocator.cpp b/src/runtime/LutAllocator.cpp
index eb9051cc0a..0db5217c90 100644
--- a/src/runtime/LutAllocator.cpp
+++ b/src/runtime/LutAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,23 +28,23 @@
using namespace arm_compute;
LutAllocator::LutAllocator()
- : _buffer(nullptr)
+ : _buffer()
{
}
uint8_t *LutAllocator::data() const
{
- return _buffer.get();
+ return _buffer.data();
}
void LutAllocator::allocate()
{
- _buffer = arm_compute::support::cpp14::make_unique<uint8_t[]>(size());
+ _buffer.resize(size());
}
uint8_t *LutAllocator::lock()
{
- return _buffer.get();
+ return _buffer.data();
}
void LutAllocator::unlock()
diff --git a/src/runtime/Memory.cpp b/src/runtime/Memory.cpp
index d116624679..c6b956d929 100644
--- a/src/runtime/Memory.cpp
+++ b/src/runtime/Memory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,8 +32,8 @@ Memory::Memory()
{
}
-Memory::Memory(std::shared_ptr<IMemoryRegion> memory)
- : _region(nullptr), _region_owned(std::move(memory))
+Memory::Memory(const std::shared_ptr<IMemoryRegion> &memory)
+ : _region(nullptr), _region_owned(memory)
{
_region_owned = memory;
_region = _region_owned.get();
diff --git a/src/runtime/MultiHOG.cpp b/src/runtime/MultiHOG.cpp
index e0b60b1137..154bbd7acd 100644
--- a/src/runtime/MultiHOG.cpp
+++ b/src/runtime/MultiHOG.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,8 +30,9 @@
using namespace arm_compute;
MultiHOG::MultiHOG(size_t num_models)
- : _num_models(num_models), _model(arm_compute::support::cpp14::make_unique<HOG[]>(_num_models))
+ : _num_models(num_models), _model()
{
+ _model.resize(_num_models);
}
size_t MultiHOG::num_models() const
@@ -42,11 +43,11 @@ size_t MultiHOG::num_models() const
IHOG *MultiHOG::model(size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
}
const IHOG *MultiHOG::model(size_t index) const
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
}
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index b8cfa2b8f2..71af560fb0 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -51,6 +51,7 @@ void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector,
_num_inputs = inputs_vector.size();
std::vector<ITensorInfo *> inputs_vector_info;
+ inputs_vector_info.reserve(_num_inputs);
for(unsigned int i = 0; i < _num_inputs; ++i)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i));
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index fe1f2da457..55e067f52d 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -35,7 +35,7 @@ namespace arm_compute
{
namespace
{
-std::unique_ptr<IFunction> create_function_all_types(arm_gemm::KernelDescription gemm_kernel_info,
+std::unique_ptr<IFunction> create_function_all_types(const arm_gemm::KernelDescription &gemm_kernel_info,
const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, bool pretranspose_hint,
std::shared_ptr<IMemoryManager> memory_manager)
diff --git a/src/runtime/NEON/functions/NEHarrisCorners.cpp b/src/runtime/NEON/functions/NEHarrisCorners.cpp
index 15cecc25a0..3eadbee45d 100644
--- a/src/runtime/NEON/functions/NEHarrisCorners.cpp
+++ b/src/runtime/NEON/functions/NEHarrisCorners.cpp
@@ -90,7 +90,7 @@ void NEHarrisCorners::configure(IImage *input, float threshold, float min_dist,
_score.allocator()->init(tensor_info_score);
_nonmax.allocator()->init(tensor_info_score);
- _corners_list = arm_compute::support::cpp14::make_unique<InternalKeypoint[]>(shape.x() * shape.y());
+ _corners_list.resize(shape.x() * shape.y());
// Set/init Sobel kernel accordingly with gradient_size
switch(gradient_size)
@@ -171,13 +171,13 @@ void NEHarrisCorners::configure(IImage *input, float threshold, float min_dist,
_score.allocator()->allocate();
// Init corner candidates kernel
- _candidates.configure(&_nonmax, _corners_list.get(), &_num_corner_candidates);
+ _candidates.configure(&_nonmax, _corners_list.data(), &_num_corner_candidates);
// Allocate once all the configure methods have been called
_nonmax.allocator()->allocate();
// Init euclidean distance
- _sort_euclidean.configure(_corners_list.get(), corners, &_num_corner_candidates, min_dist);
+ _sort_euclidean.configure(_corners_list.data(), corners, &_num_corner_candidates, min_dist);
}
void NEHarrisCorners::run()
diff --git a/src/runtime/NEON/functions/NEHistogram.cpp b/src/runtime/NEON/functions/NEHistogram.cpp
index f333ecb1f8..d56bd7cb16 100644
--- a/src/runtime/NEON/functions/NEHistogram.cpp
+++ b/src/runtime/NEON/functions/NEHistogram.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,7 +34,7 @@
using namespace arm_compute;
NEHistogram::NEHistogram()
- : _histogram_kernel(), _local_hist(), _window_lut(arm_compute::support::cpp14::make_unique<uint32_t[]>(window_lut_default_size)), _local_hist_size(0)
+ : _histogram_kernel(), _local_hist(), _window_lut(window_lut_default_size), _local_hist_size(0)
{
}
@@ -45,10 +45,10 @@ void NEHistogram::configure(const IImage *input, IDistribution1D *output)
// Allocate space for threads local histograms
_local_hist_size = output->num_bins() * NEScheduler::get().num_threads();
- _local_hist = arm_compute::support::cpp14::make_unique<uint32_t[]>(_local_hist_size);
+ _local_hist.resize(_local_hist_size);
// Configure kernel
- _histogram_kernel.configure(input, output, _local_hist.get(), _window_lut.get());
+ _histogram_kernel.configure(input, output, _local_hist.data(), _window_lut.data());
}
void NEHistogram::run()
diff --git a/src/runtime/NEON/functions/NELaplacianPyramid.cpp b/src/runtime/NEON/functions/NELaplacianPyramid.cpp
index 0e149d4176..5174a1357e 100644
--- a/src/runtime/NEON/functions/NELaplacianPyramid.cpp
+++ b/src/runtime/NEON/functions/NELaplacianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -92,8 +92,8 @@ void NELaplacianPyramid::configure(const ITensor *input, IPyramid *pyramid, ITen
// Create Gaussian Pyramid function
_gaussian_pyr_function.configure(input, &_gauss_pyr, border_mode, constant_border_value);
- _convf = arm_compute::support::cpp14::make_unique<NEGaussian5x5[]>(_num_levels);
- _subf = arm_compute::support::cpp14::make_unique<NEArithmeticSubtraction[]>(_num_levels);
+ _convf.resize(_num_levels);
+ _subf.resize(_num_levels);
for(unsigned int i = 0; i < _num_levels; ++i)
{
diff --git a/src/runtime/NEON/functions/NELaplacianReconstruct.cpp b/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
index 9ad9689b13..b2d889b07f 100644
--- a/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
+++ b/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,8 +64,8 @@ void NELaplacianReconstruct::configure(const IPyramid *pyramid, ITensor *input,
_tmp_pyr.init(pyramid_info);
// Allocate add and scale functions. Level 0 does not need to be scaled.
- _addf = arm_compute::support::cpp14::make_unique<NEArithmeticAddition[]>(num_levels);
- _scalef = arm_compute::support::cpp14::make_unique<NEScale[]>(num_levels - 1);
+ _addf.resize(num_levels);
+ _scalef.resize(num_levels - 1);
const size_t last_level = num_levels - 1;
@@ -86,7 +86,7 @@ void NELaplacianReconstruct::configure(const IPyramid *pyramid, ITensor *input,
void NELaplacianReconstruct::run()
{
- ARM_COMPUTE_ERROR_ON_MSG(_addf == nullptr, "Unconfigured function");
+ ARM_COMPUTE_ERROR_ON_MSG(_addf.empty(), "Unconfigured function");
const size_t last_level = _tmp_pyr.info()->num_levels() - 1;
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index 6af2ee8868..c608edfdee 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -76,8 +76,7 @@ uint32_t last_padding_dimension(const PaddingList &padding)
} // namespace
NEPadLayer::NEPadLayer()
- : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(nullptr), _concat_functions(nullptr), _slice_results(nullptr), _concat_results(nullptr),
- _output_subtensor()
+ : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results(), _output_subtensor()
{
}
@@ -108,11 +107,16 @@ void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *outpu
// Two strided slice functions will be required for each dimension padded as well as a
// concatenate function and the tensors to hold the temporary results.
- _slice_functions = arm_compute::support::cpp14::make_unique<NEStridedSlice[]>(2 * _num_dimensions);
- _slice_results = arm_compute::support::cpp14::make_unique<Tensor[]>(2 * _num_dimensions);
- _concat_functions = arm_compute::support::cpp14::make_unique<NEConcatenateLayer[]>(_num_dimensions);
- _concat_results = arm_compute::support::cpp14::make_unique<Tensor[]>(_num_dimensions - 1);
- Coordinates starts_before, ends_before, starts_after, ends_after, strides;
+ _slice_functions.resize(2 * _num_dimensions);
+ _slice_results.resize(2 * _num_dimensions);
+ _concat_functions.resize(_num_dimensions);
+ _concat_results.resize(_num_dimensions - 1);
+
+ Coordinates starts_before{};
+ Coordinates ends_before{};
+ Coordinates starts_after{};
+ Coordinates ends_after{};
+ Coordinates strides{};
ITensor *prev = input;
for(uint32_t i = 0; i < _num_dimensions; ++i)
{
@@ -158,7 +162,7 @@ void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *outpu
if(i < prev->info()->num_dimensions())
{
_slice_functions[2 * i].configure(prev, &_slice_results[2 * i], starts_before, ends_before, strides, begin_mask_before, end_mask_before);
- concat_vector.push_back(&_slice_results[2 * i]);
+ concat_vector.emplace_back(&_slice_results[2 * i]);
}
else
{
@@ -172,7 +176,7 @@ void NEPadLayer::configure_reflect_symmetric_mode(ITensor *input, ITensor *outpu
if(i < prev->info()->num_dimensions())
{
_slice_functions[2 * i + 1].configure(prev, &_slice_results[2 * i + 1], starts_after, ends_after, strides, begin_mask_after, end_mask_after);
- concat_vector.push_back(&_slice_results[2 * i + 1]);
+ concat_vector.emplace_back(&_slice_results[2 * i + 1]);
}
else
{
diff --git a/src/runtime/NEON/functions/NEReduceMean.cpp b/src/runtime/NEON/functions/NEReduceMean.cpp
index 98d3ab943d..38adaa2a92 100644
--- a/src/runtime/NEON/functions/NEReduceMean.cpp
+++ b/src/runtime/NEON/functions/NEReduceMean.cpp
@@ -78,10 +78,10 @@ void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis,
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
- _reduction_ops = reduction_axis.num_dimensions();
- _reduction_kernels = arm_compute::support::cpp14::make_unique<NEReductionOperation[]>(_reduction_ops);
- _reduced_outs = arm_compute::support::cpp14::make_unique<Tensor[]>(_reduction_ops - (keep_dims ? 1 : 0));
- _keep_dims = keep_dims;
+ _reduction_ops = reduction_axis.num_dimensions();
+ _reduction_kernels.resize(_reduction_ops);
+ _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0));
+ _keep_dims = keep_dims;
Coordinates axis_local = reduction_axis;
const int input_dims = input->info()->num_dimensions();
@@ -96,9 +96,9 @@ void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis,
// Perform reduction for every axis
for(unsigned int i = 0; i < _reduction_ops; ++i)
{
- TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (_reduced_outs.get() + i - 1)->info()->tensor_shape();
+ TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
out_shape.set(axis_local[i], 1);
- auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1);
+ auto in = (i == 0) ? input : (&_reduced_outs[i - 1]);
if(i == _reduction_ops - 1 && keep_dims)
{
@@ -107,8 +107,8 @@ void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis,
else
{
_reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info()));
- _memory_group.manage(_reduced_outs.get() + i);
- _reduction_kernels[i].configure(in, _reduced_outs.get() + i, axis_local[i], ReductionOperation::MEAN_SUM);
+ _memory_group.manage(&_reduced_outs[i]);
+ _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i], ReductionOperation::MEAN_SUM);
}
}
@@ -131,7 +131,7 @@ void NEReduceMean::configure(ITensor *input, const Coordinates &reduction_axis,
out_shape.remove_dimension(axis_local[i] - i);
}
auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape));
- _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output);
+ _reshape.configure(&_reduced_outs[_reduction_ops - 1], output);
}
}
diff --git a/src/runtime/NEON/functions/NESplit.cpp b/src/runtime/NEON/functions/NESplit.cpp
index e947657934..0373ab6f88 100644
--- a/src/runtime/NEON/functions/NESplit.cpp
+++ b/src/runtime/NEON/functions/NESplit.cpp
@@ -42,8 +42,8 @@ NESplit::NESplit()
void NESplit::configure(const ITensor *input, const std::vector<ITensor *> &outputs, unsigned int axis)
{
// Create Slice functions
- _num_outputs = outputs.size();
- _slice_functions = arm_compute::support::cpp14::make_unique<NESlice[]>(_num_outputs);
+ _num_outputs = outputs.size();
+ _slice_functions.resize(_num_outputs);
// Get output shape
const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs);
diff --git a/src/runtime/NEON/functions/NEStackLayer.cpp b/src/runtime/NEON/functions/NEStackLayer.cpp
index 2f49c225a4..32350b052c 100644
--- a/src/runtime/NEON/functions/NEStackLayer.cpp
+++ b/src/runtime/NEON/functions/NEStackLayer.cpp
@@ -43,8 +43,8 @@ NEStackLayer::NEStackLayer() // NOLINT
void NEStackLayer::configure(const std::vector<ITensor *> &input, int axis, ITensor *output)
{
- _num_inputs = input.size();
- _stack_kernels = arm_compute::support::cpp14::make_unique<NEStackLayerKernel[]>(_num_inputs);
+ _num_inputs = input.size();
+ _stack_kernels.resize(_num_inputs);
// Wrap around negative values
const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->info()->num_dimensions() + 1));
diff --git a/src/runtime/NEON/functions/NEUnstack.cpp b/src/runtime/NEON/functions/NEUnstack.cpp
index 7532020973..21f35f8312 100644
--- a/src/runtime/NEON/functions/NEUnstack.cpp
+++ b/src/runtime/NEON/functions/NEUnstack.cpp
@@ -74,7 +74,7 @@ void NEUnstack::configure(const ITensor *input, const std::vector<ITensor *> &ou
// Wrap around negative values
const unsigned int axis_u = wrap_axis(axis, input->info());
_num_slices = std::min(outputs_vector_info.size(), input->info()->dimension(axis_u));
- _strided_slice_vector = arm_compute::support::cpp14::make_unique<NEStridedSlice[]>(_num_slices);
+ _strided_slice_vector.resize(_num_slices);
Coordinates slice_start;
int32_t slice_end_mask;
diff --git a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
index 9fce13cbd7..25b5216305 100644
--- a/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEWidthConcatenateLayer.cpp
@@ -79,7 +79,7 @@ inline void NEWidthConcatenateLayer::configure_internal(std::vector<TensorType *
unsigned int width_offset = 0;
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<NEWidthConcatenateLayerKernel[]>(_num_inputs);
+ _concat_kernels_vector.resize(_num_inputs);
for(unsigned int i = 0; i < _num_inputs; ++i)
{
@@ -112,6 +112,6 @@ void NEWidthConcatenateLayer::run()
{
for(unsigned i = 0; i < _num_inputs; ++i)
{
- NEScheduler::get().schedule(_concat_kernels_vector.get() + i, Window::DimY);
+ NEScheduler::get().schedule(&_concat_kernels_vector[i], Window::DimY);
}
}
diff --git a/src/runtime/Pyramid.cpp b/src/runtime/Pyramid.cpp
index ebd65702a6..bc7b5501a0 100644
--- a/src/runtime/Pyramid.cpp
+++ b/src/runtime/Pyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,8 +45,8 @@ void Pyramid::init_auto_padding(const PyramidInfo &info)
void Pyramid::internal_init(const PyramidInfo &info, bool auto_padding)
{
- _info = info;
- _pyramid = arm_compute::support::cpp14::make_unique<Tensor[]>(_info.num_levels());
+ _info = info;
+ _pyramid.resize(_info.num_levels());
size_t w = _info.width();
size_t h = _info.height();
@@ -56,11 +56,11 @@ void Pyramid::internal_init(const PyramidInfo &info, bool auto_padding)
TensorShape tensor_shape = _info.tensor_shape();
// Note: Look-up table used by the OpenVX sample implementation
- const float c_orbscale[4] = { 0.5f,
- SCALE_PYRAMID_ORB,
- SCALE_PYRAMID_ORB * SCALE_PYRAMID_ORB,
- SCALE_PYRAMID_ORB *SCALE_PYRAMID_ORB * SCALE_PYRAMID_ORB
- };
+ const std::array<float, 4> c_orbscale = { 0.5f,
+ SCALE_PYRAMID_ORB,
+ SCALE_PYRAMID_ORB * SCALE_PYRAMID_ORB,
+ SCALE_PYRAMID_ORB *SCALE_PYRAMID_ORB * SCALE_PYRAMID_ORB
+ };
for(size_t i = 0; i < _info.num_levels(); ++i)
{
@@ -71,7 +71,7 @@ void Pyramid::internal_init(const PyramidInfo &info, bool auto_padding)
tensor_info.auto_padding();
}
- (_pyramid.get() + i)->allocator()->init(tensor_info);
+ _pyramid[i].allocator()->init(tensor_info);
if(is_orb_scale)
{
@@ -99,11 +99,9 @@ void Pyramid::internal_init(const PyramidInfo &info, bool auto_padding)
void Pyramid::allocate()
{
- ARM_COMPUTE_ERROR_ON(_pyramid == nullptr);
-
for(size_t i = 0; i < _info.num_levels(); ++i)
{
- (_pyramid.get() + i)->allocator()->allocate();
+ _pyramid[i].allocator()->allocate();
}
}
@@ -116,5 +114,5 @@ Tensor *Pyramid::get_pyramid_level(size_t index) const
{
ARM_COMPUTE_ERROR_ON(index >= _info.num_levels());
- return (_pyramid.get() + index);
-}
+ return &_pyramid[index];
+} \ No newline at end of file