aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/CL')
-rw-r--r--src/runtime/CL/CLHelpers.cpp18
-rw-r--r--src/runtime/CL/CLMemory.cpp6
-rw-r--r--src/runtime/CL/CLMultiHOG.cpp9
-rw-r--r--src/runtime/CL/CLPyramid.cpp14
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp2
-rw-r--r--src/runtime/CL/CLTuner.cpp2
-rw-r--r--src/runtime/CL/functions/CLConvolution.cpp12
-rw-r--r--src/runtime/CL/functions/CLDepthConcatenateLayer.cpp7
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLFFT1D.cpp2
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp4
-rw-r--r--src/runtime/CL/functions/CLGaussianPyramid.cpp14
-rw-r--r--src/runtime/CL/functions/CLHOGMultiDetection.cpp29
-rw-r--r--src/runtime/CL/functions/CLHarrisCorners.cpp8
-rw-r--r--src/runtime/CL/functions/CLLaplacianPyramid.cpp6
-rw-r--r--src/runtime/CL/functions/CLLaplacianReconstruct.cpp8
-rw-r--r--src/runtime/CL/functions/CLOpticalFlow.cpp16
-rw-r--r--src/runtime/CL/functions/CLPadLayer.cpp17
-rw-r--r--src/runtime/CL/functions/CLReduceMean.cpp18
-rw-r--r--src/runtime/CL/functions/CLReductionOperation.cpp28
-rw-r--r--src/runtime/CL/functions/CLSplit.cpp6
-rw-r--r--src/runtime/CL/functions/CLStackLayer.cpp4
-rw-r--r--src/runtime/CL/functions/CLUnstack.cpp4
-rw-r--r--src/runtime/CL/functions/CLWidthConcatenateLayer.cpp2
-rw-r--r--src/runtime/CL/tuners/CLLWSList.cpp4
25 files changed, 124 insertions, 119 deletions
diff --git a/src/runtime/CL/CLHelpers.cpp b/src/runtime/CL/CLHelpers.cpp
index 533e6fabfa..8bc7b8eb7b 100644
--- a/src/runtime/CL/CLHelpers.cpp
+++ b/src/runtime/CL/CLHelpers.cpp
@@ -47,7 +47,7 @@ void printf_callback(const char *buffer, unsigned int len, size_t complete, void
* @return A pointer to the context properties which can be used to create an opencl context
*/
-void initialise_context_properties(const cl::Platform &platform, const cl::Device &device, cl_context_properties prop[7])
+void initialise_context_properties(const cl::Platform &platform, const cl::Device &device, std::array<cl_context_properties, 7> &prop)
{
ARM_COMPUTE_UNUSED(device);
#if defined(ARM_COMPUTE_ASSERTS_ENABLED)
@@ -55,7 +55,7 @@ void initialise_context_properties(const cl::Platform &platform, const cl::Devic
if(arm_compute::device_supports_extension(device, "cl_arm_printf"))
{
// Create a cl_context with a printf_callback and user specified buffer size.
- cl_context_properties properties_printf[] =
+ std::array<cl_context_properties, 7> properties_printf =
{
CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platform()),
// Enable a printf callback function for this context.
@@ -65,17 +65,17 @@ void initialise_context_properties(const cl::Platform &platform, const cl::Devic
CL_PRINTF_BUFFERSIZE_ARM, 0x1000,
0
};
- std::copy_n(properties_printf, 7, prop);
+ prop = properties_printf;
}
else
#endif // defined(ARM_COMPUTE_ASSERTS_ENABLED)
{
- cl_context_properties properties[] =
+ std::array<cl_context_properties, 3> properties =
{
CL_CONTEXT_PLATFORM, reinterpret_cast<cl_context_properties>(platform()),
0
};
- std::copy_n(properties, 3, prop);
+ std::copy(properties.begin(), properties.end(), prop.begin());
};
}
} //namespace
@@ -94,11 +94,11 @@ create_opencl_context_and_device()
std::vector<cl::Device> platform_devices;
p.getDevices(CL_DEVICE_TYPE_DEFAULT, &platform_devices);
ARM_COMPUTE_ERROR_ON_MSG(platform_devices.size() == 0, "Couldn't find any OpenCL device");
- device = platform_devices[0];
- cl_int err = CL_SUCCESS;
- cl_context_properties properties[7] = { 0, 0, 0, 0, 0, 0, 0 };
+ device = platform_devices[0];
+ cl_int err = CL_SUCCESS;
+ std::array<cl_context_properties, 7> properties = { 0, 0, 0, 0, 0, 0, 0 };
initialise_context_properties(p, device, properties);
- cl::Context cl_context = cl::Context(device, properties, nullptr, nullptr, &err);
+ cl::Context cl_context = cl::Context(device, properties.data(), nullptr, nullptr, &err);
ARM_COMPUTE_ERROR_ON_MSG(err != CL_SUCCESS, "Failed to create OpenCL context");
return std::make_tuple(cl_context, device, err);
}
diff --git a/src/runtime/CL/CLMemory.cpp b/src/runtime/CL/CLMemory.cpp
index 5bea85cfae..557378b6f1 100644
--- a/src/runtime/CL/CLMemory.cpp
+++ b/src/runtime/CL/CLMemory.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,8 +33,8 @@ CLMemory::CLMemory()
{
}
-CLMemory::CLMemory(std::shared_ptr<ICLMemoryRegion> memory)
- : _region(nullptr), _region_owned(std::move(memory))
+CLMemory::CLMemory(const std::shared_ptr<ICLMemoryRegion> &memory)
+ : _region(nullptr), _region_owned(memory)
{
_region_owned = memory;
_region = _region_owned.get();
diff --git a/src/runtime/CL/CLMultiHOG.cpp b/src/runtime/CL/CLMultiHOG.cpp
index 88d45acd12..2577ec08ac 100644
--- a/src/runtime/CL/CLMultiHOG.cpp
+++ b/src/runtime/CL/CLMultiHOG.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,8 +30,9 @@
using namespace arm_compute;
CLMultiHOG::CLMultiHOG(size_t num_models)
- : _num_models(num_models), _model(arm_compute::support::cpp14::make_unique<CLHOG[]>(_num_models))
+ : _num_models(num_models), _model()
{
+ _model.resize(_num_models);
}
size_t CLMultiHOG::num_models() const
@@ -42,11 +43,11 @@ size_t CLMultiHOG::num_models() const
ICLHOG *CLMultiHOG::cl_model(size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
}
const ICLHOG *CLMultiHOG::cl_model(size_t index) const
{
ARM_COMPUTE_ERROR_ON(index >= _num_models);
- return (_model.get() + index);
+ return (&_model[index]);
} \ No newline at end of file
diff --git a/src/runtime/CL/CLPyramid.cpp b/src/runtime/CL/CLPyramid.cpp
index 865f389f7f..6d5dba0031 100644
--- a/src/runtime/CL/CLPyramid.cpp
+++ b/src/runtime/CL/CLPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,7 +35,7 @@
using namespace arm_compute;
CLPyramid::CLPyramid()
- : _info(), _pyramid(nullptr)
+ : _info(), _pyramid()
{
}
@@ -51,8 +51,8 @@ void CLPyramid::init_auto_padding(const PyramidInfo &info)
void CLPyramid::internal_init(const PyramidInfo &info, bool auto_padding)
{
- _info = info;
- _pyramid = arm_compute::support::cpp14::make_unique<CLTensor[]>(_info.num_levels());
+ _info = info;
+ _pyramid.resize(_info.num_levels());
size_t w = _info.width();
size_t h = _info.height();
@@ -109,11 +109,9 @@ void CLPyramid::internal_init(const PyramidInfo &info, bool auto_padding)
void CLPyramid::allocate()
{
- ARM_COMPUTE_ERROR_ON(_pyramid == nullptr);
-
for(size_t i = 0; i < _info.num_levels(); ++i)
{
- (_pyramid.get() + i)->allocator()->allocate();
+ _pyramid[i].allocator()->allocate();
}
}
@@ -126,5 +124,5 @@ CLTensor *CLPyramid::get_pyramid_level(size_t index) const
{
ARM_COMPUTE_ERROR_ON(index >= _info.num_levels());
- return (_pyramid.get() + index);
+ return &_pyramid[index];
}
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index 2ce64551ae..101e4f1cd4 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -34,7 +34,7 @@ const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer();
namespace
{
-std::unique_ptr<ICLMemoryRegion> allocate_region(cl::Context context, size_t size, cl_uint alignment)
+std::unique_ptr<ICLMemoryRegion> allocate_region(const cl::Context &context, size_t size, cl_uint alignment)
{
// Try fine-grain SVM
std::unique_ptr<ICLMemoryRegion> region = support::cpp14::make_unique<CLFineSVMMemoryRegion>(context,
diff --git a/src/runtime/CL/CLTuner.cpp b/src/runtime/CL/CLTuner.cpp
index 8f8d3e7c3a..929def24cc 100644
--- a/src/runtime/CL/CLTuner.cpp
+++ b/src/runtime/CL/CLTuner.cpp
@@ -275,7 +275,7 @@ void CLTuner::save_to_file(const std::string &filename) const
std::ofstream fs;
fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
fs.open(filename, std::ios::out);
- for(auto kernel_data : _lws_table)
+ for(auto const &kernel_data : _lws_table)
{
fs << kernel_data.first << ";" << kernel_data.second[0] << ";" << kernel_data.second[1] << ";" << kernel_data.second[2] << std::endl;
}
diff --git a/src/runtime/CL/functions/CLConvolution.cpp b/src/runtime/CL/functions/CLConvolution.cpp
index 2f43ce1974..f09585e7ec 100644
--- a/src/runtime/CL/functions/CLConvolution.cpp
+++ b/src/runtime/CL/functions/CLConvolution.cpp
@@ -58,13 +58,13 @@ void CLConvolutionSquare<matrix_size>::configure(ICLTensor *input, ICLTensor *ou
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
ARM_COMPUTE_ERROR_ON(conv == nullptr);
- int16_t conv_col[matrix_size];
- int16_t conv_row[matrix_size];
- _is_separable = separate_matrix(conv, conv_col, conv_row, matrix_size);
+ std::array<int16_t, matrix_size> conv_col{ 0 };
+ std::array<int16_t, matrix_size> conv_row{ 0 };
+ _is_separable = separate_matrix(conv, conv_col.data(), conv_row.data(), matrix_size);
if(_is_separable)
{
- std::pair<DataType, DataType> type_pair = data_type_for_convolution(conv_col, conv_row, matrix_size);
+ std::pair<DataType, DataType> type_pair = data_type_for_convolution(conv_col.data(), conv_row.data(), matrix_size);
_tmp.allocator()->init(TensorInfo(input->info()->tensor_shape(), 1, type_pair.first));
// Manage intermediate buffers
@@ -75,8 +75,8 @@ void CLConvolutionSquare<matrix_size>::configure(ICLTensor *input, ICLTensor *ou
scale = calculate_matrix_scale(conv, matrix_size);
}
- _kernel_hor.configure(input, &_tmp, conv_row, border_mode == BorderMode::UNDEFINED);
- _kernel_vert.configure(&_tmp, output, conv_col, scale, border_mode == BorderMode::UNDEFINED, type_pair.second);
+ _kernel_hor.configure(input, &_tmp, conv_row.data(), border_mode == BorderMode::UNDEFINED);
+ _kernel_vert.configure(&_tmp, output, conv_col.data(), scale, border_mode == BorderMode::UNDEFINED, type_pair.second);
_border_handler.configure(input, _kernel_hor.border_size(), border_mode, PixelValue(constant_border_value));
// Allocate intermediate buffer
diff --git a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
index 4a5f845631..f687e54552 100644
--- a/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthConcatenateLayer.cpp
@@ -36,8 +36,7 @@
using namespace arm_compute;
CLDepthConcatenateLayer::CLDepthConcatenateLayer() // NOLINT
- : _inputs_vector(),
- _concat_kernels_vector(),
+ : _concat_kernels_vector(),
_border_handlers_vector(),
_num_inputs(0)
{
@@ -53,8 +52,8 @@ void CLDepthConcatenateLayer::configure(const std::vector<ICLTensor *> &inputs_v
inputs_vector_info.emplace_back(inputs_vector.at(i)->info());
}
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<CLDepthConcatenateLayerKernel[]>(_num_inputs);
- _border_handlers_vector = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(_num_inputs);
+ _concat_kernels_vector.resize(_num_inputs);
+ _border_handlers_vector.resize(_num_inputs);
TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector_info, Window::DimZ);
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index 8211104bda..97b0a01331 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -322,7 +322,8 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w
const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
- int output_multiplier, output_shift;
+ int output_multiplier;
+ int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
_output_stage_kernel.configure(&_output_reshaped, biases, output, output_multiplier, output_shift, output_quant_info.offset);
_output_reshaped.allocator()->allocate();
diff --git a/src/runtime/CL/functions/CLFFT1D.cpp b/src/runtime/CL/functions/CLFFT1D.cpp
index 67111e7e5c..49b5a2a2e6 100644
--- a/src/runtime/CL/functions/CLFFT1D.cpp
+++ b/src/runtime/CL/functions/CLFFT1D.cpp
@@ -62,7 +62,7 @@ void CLFFT1D::configure(const ICLTensor *input, ICLTensor *output, const FFT1DIn
// Create and configure FFT kernels
unsigned int Nx = 1;
_num_ffts = decomposed_vector.size();
- _fft_kernels = arm_compute::support::cpp14::make_unique<CLFFTRadixStageKernel[]>(_num_ffts);
+ _fft_kernels.resize(_num_ffts);
for(unsigned int i = 0; i < _num_ffts; ++i)
{
const unsigned int radix_for_stage = decomposed_vector.at(i);
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 40ce6b4e0f..03d516f703 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -372,7 +372,9 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
const unsigned int kernel_width = weights->dimension(idx_width);
const unsigned int kernel_height = weights->dimension(idx_height);
- TensorInfo im2col_reshaped_info, info_gemm, weights_reshaped_info;
+ TensorInfo im2col_reshaped_info{};
+ TensorInfo info_gemm{};
+ TensorInfo weights_reshaped_info{};
const ITensorInfo *gemm_input_to_use = input;
const ITensorInfo *gemm_output_to_use = output;
const ITensorInfo *weights_to_use = weights;
diff --git a/src/runtime/CL/functions/CLGaussianPyramid.cpp b/src/runtime/CL/functions/CLGaussianPyramid.cpp
index fd82769004..b671b23c87 100644
--- a/src/runtime/CL/functions/CLGaussianPyramid.cpp
+++ b/src/runtime/CL/functions/CLGaussianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,10 +76,10 @@ void CLGaussianPyramidHalf::configure(ICLTensor *input, CLPyramid *pyramid, Bord
if(num_levels > 1)
{
- _horizontal_border_handler = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
- _vertical_border_handler = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
- _horizontal_reduction = arm_compute::support::cpp14::make_unique<CLGaussianPyramidHorKernel[]>(num_levels - 1);
- _vertical_reduction = arm_compute::support::cpp14::make_unique<CLGaussianPyramidVertKernel[]>(num_levels - 1);
+ _horizontal_border_handler.resize(num_levels - 1);
+ _vertical_border_handler.resize(num_levels - 1);
+ _horizontal_reduction.resize(num_levels - 1);
+ _vertical_reduction.resize(num_levels - 1);
// Apply half scale to the X dimension of the tensor shape
TensorShape tensor_shape = pyramid->info()->tensor_shape();
@@ -153,8 +153,8 @@ void CLGaussianPyramidOrb::configure(ICLTensor *input, CLPyramid *pyramid, Borde
if(num_levels > 1)
{
- _gauss5x5 = arm_compute::support::cpp14::make_unique<CLGaussian5x5[]>(num_levels - 1);
- _scale_nearest = arm_compute::support::cpp14::make_unique<CLScaleKernel[]>(num_levels - 1);
+ _gauss5x5.resize(num_levels - 1);
+ _scale_nearest.resize(num_levels - 1);
PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_ORB, pyramid->info()->tensor_shape(), Format::U8);
diff --git a/src/runtime/CL/functions/CLHOGMultiDetection.cpp b/src/runtime/CL/functions/CLHOGMultiDetection.cpp
index 0865f50fd5..f799d61b16 100644
--- a/src/runtime/CL/functions/CLHOGMultiDetection.cpp
+++ b/src/runtime/CL/functions/CLHOGMultiDetection.cpp
@@ -128,12 +128,11 @@ void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_h
_num_block_norm_kernel = input_block_norm.size(); // Number of CLHOGBlockNormalizationKernel kernels to compute
_num_hog_detect_kernel = input_hog_detect.size(); // Number of CLHOGDetector functions to compute
- _orient_bin_kernel = arm_compute::support::cpp14::make_unique<CLHOGOrientationBinningKernel[]>(_num_orient_bin_kernel);
- _block_norm_kernel = arm_compute::support::cpp14::make_unique<CLHOGBlockNormalizationKernel[]>(_num_block_norm_kernel);
- _hog_detect_kernel = arm_compute::support::cpp14::make_unique<CLHOGDetector[]>(_num_hog_detect_kernel);
- _non_maxima_kernel = arm_compute::support::cpp14::make_unique<CPPDetectionWindowNonMaximaSuppressionKernel>();
- _hog_space = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_orient_bin_kernel);
- _hog_norm_space = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_block_norm_kernel);
+ _orient_bin_kernel.resize(_num_orient_bin_kernel);
+ _block_norm_kernel.resize(_num_block_norm_kernel);
+ _hog_detect_kernel.resize(_num_hog_detect_kernel);
+ _hog_space.resize(_num_orient_bin_kernel);
+ _hog_norm_space.resize(_num_block_norm_kernel);
// Allocate tensors for magnitude and phase
TensorInfo info_mag(shape_img, Format::S16);
@@ -172,10 +171,10 @@ void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_h
_hog_space[i].allocator()->init(info_space);
// Manage intermediate buffers
- _memory_group.manage(_hog_space.get() + i);
+ _memory_group.manage(&_hog_space[i]);
// Initialise orientation binning kernel
- _orient_bin_kernel[i].configure(&_mag, &_phase, _hog_space.get() + i, multi_hog->model(idx_multi_hog)->info());
+ _orient_bin_kernel[i].configure(&_mag, &_phase, &_hog_space[i], multi_hog->model(idx_multi_hog)->info());
}
// Allocate intermediate tensors
@@ -193,10 +192,10 @@ void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_h
_hog_norm_space[i].allocator()->init(tensor_info);
// Manage intermediate buffers
- _memory_group.manage(_hog_norm_space.get() + i);
+ _memory_group.manage(&_hog_norm_space[i]);
// Initialize block normalization kernel
- _block_norm_kernel[i].configure(_hog_space.get() + idx_orient_bin, _hog_norm_space.get() + i, multi_hog->model(idx_multi_hog)->info());
+ _block_norm_kernel[i].configure(&_hog_space[idx_orient_bin], &_hog_norm_space[i], multi_hog->model(idx_multi_hog)->info());
}
// Allocate intermediate tensors
@@ -212,13 +211,13 @@ void CLHOGMultiDetection::configure(ICLTensor *input, const ICLMultiHOG *multi_h
{
const size_t idx_block_norm = input_hog_detect[i];
- _hog_detect_kernel[i].configure(_hog_norm_space.get() + idx_block_norm, multi_hog->cl_model(i), detection_windows, detection_window_strides->at(i), threshold, i);
+ _hog_detect_kernel[i].configure(&_hog_norm_space[idx_block_norm], multi_hog->cl_model(i), detection_windows, detection_window_strides->at(i), threshold, i);
}
detection_window_strides->unmap(CLScheduler::get().queue());
// Configure non maxima suppression kernel
- _non_maxima_kernel->configure(_detection_windows, min_distance);
+ _non_maxima_kernel.configure(_detection_windows, min_distance);
// Allocate intermediate tensors
for(size_t i = 0; i < _num_block_norm_kernel; ++i)
@@ -242,13 +241,13 @@ void CLHOGMultiDetection::run()
// Run orientation binning kernel
for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
{
- CLScheduler::get().enqueue(*(_orient_bin_kernel.get() + i), false);
+ CLScheduler::get().enqueue(_orient_bin_kernel[i], false);
}
// Run block normalization kernel
for(size_t i = 0; i < _num_block_norm_kernel; ++i)
{
- CLScheduler::get().enqueue(*(_block_norm_kernel.get() + i), false);
+ CLScheduler::get().enqueue(_block_norm_kernel[i], false);
}
// Run HOG detector kernel
@@ -262,7 +261,7 @@ void CLHOGMultiDetection::run()
{
// Map detection windows array before computing non maxima suppression
_detection_windows->map(CLScheduler::get().queue(), true);
- Scheduler::get().schedule(_non_maxima_kernel.get(), Window::DimY);
+ Scheduler::get().schedule(&_non_maxima_kernel, Window::DimY);
_detection_windows->unmap(CLScheduler::get().queue());
}
}
diff --git a/src/runtime/CL/functions/CLHarrisCorners.cpp b/src/runtime/CL/functions/CLHarrisCorners.cpp
index 342d1cad49..67f550d318 100644
--- a/src/runtime/CL/functions/CLHarrisCorners.cpp
+++ b/src/runtime/CL/functions/CLHarrisCorners.cpp
@@ -55,7 +55,7 @@ CLHarrisCorners::CLHarrisCorners(std::shared_ptr<IMemoryManager> memory_manager)
_gy(),
_score(),
_nonmax(),
- _corners_list(nullptr),
+ _corners_list(),
_num_corner_candidates(0),
_corners(nullptr)
{
@@ -84,7 +84,7 @@ void CLHarrisCorners::configure(ICLImage *input, float threshold, float min_dist
_score.allocator()->init(info_f32);
_nonmax.allocator()->init(info_f32);
- _corners_list = arm_compute::support::cpp14::make_unique<InternalKeypoint[]>(shape.x() * shape.y());
+ _corners_list.resize(shape.x() * shape.y());
// Manage intermediate buffers
_memory_group.manage(&_gx);
@@ -146,13 +146,13 @@ void CLHarrisCorners::configure(ICLImage *input, float threshold, float min_dist
_score.allocator()->allocate();
// Init corner candidates kernel
- _candidates.configure(&_nonmax, _corners_list.get(), &_num_corner_candidates);
+ _candidates.configure(&_nonmax, _corners_list.data(), &_num_corner_candidates);
// Allocate intermediate buffers
_nonmax.allocator()->allocate();
// Init euclidean distance
- _sort_euclidean.configure(_corners_list.get(), _corners, &_num_corner_candidates, min_dist);
+ _sort_euclidean.configure(_corners_list.data(), _corners, &_num_corner_candidates, min_dist);
}
void CLHarrisCorners::run()
diff --git a/src/runtime/CL/functions/CLLaplacianPyramid.cpp b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
index 559b57fd8d..a11851898c 100644
--- a/src/runtime/CL/functions/CLLaplacianPyramid.cpp
+++ b/src/runtime/CL/functions/CLLaplacianPyramid.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,8 +70,8 @@ void CLLaplacianPyramid::configure(ICLTensor *input, CLPyramid *pyramid, ICLTens
// Create Gaussian Pyramid function
_gaussian_pyr_function.configure(input, &_gauss_pyr, border_mode, constant_border_value);
- _convf = arm_compute::support::cpp14::make_unique<CLGaussian5x5[]>(_num_levels);
- _subf = arm_compute::support::cpp14::make_unique<CLArithmeticSubtraction[]>(_num_levels);
+ _convf.resize(_num_levels);
+ _subf.resize(_num_levels);
for(unsigned int i = 0; i < _num_levels; ++i)
{
diff --git a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
index 911c9b3b27..13116bf08d 100644
--- a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
+++ b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,8 +63,8 @@ void CLLaplacianReconstruct::configure(const CLPyramid *pyramid, ICLTensor *inpu
_tmp_pyr.init(pyramid_info);
// Allocate add and scale functions. Level 0 does not need to be scaled.
- _addf = arm_compute::support::cpp14::make_unique<CLArithmeticAddition[]>(num_levels);
- _scalef = arm_compute::support::cpp14::make_unique<CLScale[]>(num_levels - 1);
+ _addf.resize(num_levels);
+ _scalef.resize(num_levels - 1);
const size_t last_level = num_levels - 1;
@@ -85,7 +85,7 @@ void CLLaplacianReconstruct::configure(const CLPyramid *pyramid, ICLTensor *inpu
void CLLaplacianReconstruct::run()
{
- ARM_COMPUTE_ERROR_ON_MSG(_addf == nullptr, "Unconfigured function");
+ ARM_COMPUTE_ERROR_ON_MSG(_addf.empty(), "Unconfigured function");
const size_t last_level = _tmp_pyr.info()->num_levels() - 1;
diff --git a/src/runtime/CL/functions/CLOpticalFlow.cpp b/src/runtime/CL/functions/CLOpticalFlow.cpp
index 7ef1c83d04..a013a1fe19 100644
--- a/src/runtime/CL/functions/CLOpticalFlow.cpp
+++ b/src/runtime/CL/functions/CLOpticalFlow.cpp
@@ -84,12 +84,12 @@ void CLOpticalFlow::configure(const CLPyramid *old_pyramid, const CLPyramid *new
const int old_values_list_length = list_length * window_dimension * window_dimension;
// Create kernels and tensors
- _tracker_init_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerInitKernel[]>(_num_levels);
- _tracker_stage0_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerStage0Kernel[]>(_num_levels);
- _tracker_stage1_kernel = arm_compute::support::cpp14::make_unique<CLLKTrackerStage1Kernel[]>(_num_levels);
- _func_scharr = arm_compute::support::cpp14::make_unique<CLScharr3x3[]>(_num_levels);
- _scharr_gx = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_levels);
- _scharr_gy = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_levels);
+ _tracker_init_kernel.resize(_num_levels);
+ _tracker_stage0_kernel.resize(_num_levels);
+ _tracker_stage1_kernel.resize(_num_levels);
+ _func_scharr.resize(_num_levels);
+ _scharr_gx.resize(_num_levels);
+ _scharr_gy.resize(_num_levels);
// Create internal keypoint arrays
_old_points_internal = arm_compute::support::cpp14::make_unique<CLLKInternalKeypointArray>(list_length);
@@ -118,8 +118,8 @@ void CLOpticalFlow::configure(const CLPyramid *old_pyramid, const CLPyramid *new
_scharr_gy[i].allocator()->init(tensor_info);
// Manage intermediate buffers
- _memory_group.manage(_scharr_gx.get() + i);
- _memory_group.manage(_scharr_gy.get() + i);
+ _memory_group.manage(&_scharr_gx[i]);
+ _memory_group.manage(&_scharr_gy[i]);
// Init Scharr kernel
_func_scharr[i].configure(old_ith_input, &_scharr_gx[i], &_scharr_gy[i], border_mode, constant_border_value);
diff --git a/src/runtime/CL/functions/CLPadLayer.cpp b/src/runtime/CL/functions/CLPadLayer.cpp
index dba7f23f3b..99e312183a 100644
--- a/src/runtime/CL/functions/CLPadLayer.cpp
+++ b/src/runtime/CL/functions/CLPadLayer.cpp
@@ -31,7 +31,7 @@
namespace arm_compute
{
CLPadLayer::CLPadLayer()
- : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(nullptr), _concat_functions(nullptr), _slice_results(nullptr), _concat_results(nullptr)
+ : _copy_kernel(), _mode(), _padding(), _memset_kernel(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
{
}
@@ -67,11 +67,16 @@ void CLPadLayer::configure_reflect_symmetric_mode(ICLTensor *input, ICLTensor *o
// Two strided slice functions will be required for each dimension padded as well as a
// concatenate function and the tensors to hold the temporary results.
- _slice_functions = arm_compute::support::cpp14::make_unique<CLStridedSlice[]>(2 * _num_dimensions);
- _slice_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(2 * _num_dimensions);
- _concat_functions = arm_compute::support::cpp14::make_unique<CLConcatenateLayer[]>(_num_dimensions);
- _concat_results = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_dimensions - 1);
- Coordinates starts_before, ends_before, starts_after, ends_after, strides;
+ _slice_functions.resize(2 * _num_dimensions);
+ _slice_results.resize(2 * _num_dimensions);
+ _concat_functions.resize(_num_dimensions);
+ _concat_results.resize(_num_dimensions - 1);
+
+ Coordinates starts_before{};
+ Coordinates ends_before{};
+ Coordinates starts_after{};
+ Coordinates ends_after{};
+ Coordinates strides{};
ICLTensor *prev = input;
for(uint32_t i = 0; i < _num_dimensions; ++i)
{
diff --git a/src/runtime/CL/functions/CLReduceMean.cpp b/src/runtime/CL/functions/CLReduceMean.cpp
index 702ce34a4d..15091f9066 100644
--- a/src/runtime/CL/functions/CLReduceMean.cpp
+++ b/src/runtime/CL/functions/CLReduceMean.cpp
@@ -40,10 +40,10 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
- _reduction_ops = reduction_axis.num_dimensions();
- _reduction_kernels = arm_compute::support::cpp14::make_unique<CLReductionOperation[]>(_reduction_ops);
- _reduced_outs = arm_compute::support::cpp14::make_unique<CLTensor[]>(_reduction_ops - (keep_dims ? 1 : 0));
- _keep_dims = keep_dims;
+ _reduction_ops = reduction_axis.num_dimensions();
+ _reduction_kernels.resize(_reduction_ops);
+ _reduced_outs.resize(_reduction_ops - (keep_dims ? 1 : 0));
+ _keep_dims = keep_dims;
Coordinates axis_local = reduction_axis;
const int input_dims = input->info()->num_dimensions();
@@ -57,9 +57,9 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
// Perform reduction for every axis
for(unsigned int i = 0; i < _reduction_ops; ++i)
{
- TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (_reduced_outs.get() + i - 1)->info()->tensor_shape();
+ TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (&_reduced_outs[i - 1])->info()->tensor_shape();
out_shape.set(axis_local[i], 1);
- auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1);
+ auto in = (i == 0) ? input : (&_reduced_outs[i - 1]);
if(i == _reduction_ops - 1 && keep_dims)
{
@@ -68,8 +68,8 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
else
{
_reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info()));
- _memory_group.manage(_reduced_outs.get() + i);
- _reduction_kernels[i].configure(in, _reduced_outs.get() + i, axis_local[i], ReductionOperation::MEAN_SUM);
+ _memory_group.manage(&_reduced_outs[i]);
+ _reduction_kernels[i].configure(in, &_reduced_outs[i], axis_local[i], ReductionOperation::MEAN_SUM);
}
}
@@ -92,7 +92,7 @@ void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis
out_shape.remove_dimension(axis_local[i] - i);
}
auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape));
- _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output);
+ _reshape.configure(&_reduced_outs[_reduction_ops - 1], output);
}
}
diff --git a/src/runtime/CL/functions/CLReductionOperation.cpp b/src/runtime/CL/functions/CLReductionOperation.cpp
index bb285d7cc8..9f99d2db6f 100644
--- a/src/runtime/CL/functions/CLReductionOperation.cpp
+++ b/src/runtime/CL/functions/CLReductionOperation.cpp
@@ -71,7 +71,7 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf
else
{
// Create temporary tensor infos
- auto sums_vector = arm_compute::support::cpp14::make_unique<TensorInfo[]>(num_of_stages - 1);
+ std::vector<TensorInfo> sums_vector(num_of_stages - 1);
// Create intermediate tensor info
TensorShape shape{ input->tensor_shape() };
@@ -110,17 +110,17 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf
}
// Validate ReductionOperation only on first kernel
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, sums_vector.get(), axis, first_kernel_op));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, &sums_vector[0], axis, first_kernel_op));
// Validate ReductionOperation on intermediate stages
for(unsigned int i = 1; i < num_of_stages - 1; ++i)
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + i - 1, sums_vector.get() + i, axis, intermediate_kernel_op));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[i - 1], &sums_vector[i], axis, intermediate_kernel_op));
}
// Validate ReductionOperation on the last stage
const unsigned int last_stage = num_of_stages - 1;
- ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + last_stage - 1, output, axis, last_kernel_op, input->dimension(0)));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[last_stage - 1], output, axis, last_kernel_op, input->dimension(0)));
}
return Status{};
@@ -133,7 +133,7 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign
_is_serial = is_data_type_quantized(input->info()->data_type()) || axis != 0;
// Configure reduction operation kernels
- _reduction_kernels_vector = arm_compute::support::cpp14::make_unique<CLReductionOperationKernel[]>(_num_of_stages);
+ _reduction_kernels_vector.resize(_num_of_stages);
// Create temporary tensors
if(_is_serial)
@@ -142,8 +142,8 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign
}
else
{
- _border_handlers_vector = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(_num_of_stages);
- _results_vector = arm_compute::support::cpp14::make_unique<CLTensor[]>(_num_of_stages - 1);
+ _border_handlers_vector.resize(_num_of_stages);
+ _results_vector.resize(_num_of_stages - 1);
TensorShape shape{ input->info()->tensor_shape() };
for(unsigned int i = 0; i < _num_of_stages - 1; i++)
{
@@ -152,7 +152,7 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign
}
// Apply ReductionOperation only on first kernel
- _memory_group.manage(_results_vector.get());
+ _memory_group.manage(&_results_vector[0]);
ReductionOperation first_kernel_op;
ReductionOperation intermediate_kernel_op;
@@ -183,23 +183,23 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign
ARM_COMPUTE_ERROR("Not supported");
}
- _reduction_kernels_vector[0].configure(input, _results_vector.get(), axis, first_kernel_op);
+ _reduction_kernels_vector[0].configure(input, &_results_vector[0], axis, first_kernel_op);
_border_handlers_vector[0].configure(input, _reduction_kernels_vector[0].border_size(), BorderMode::CONSTANT, pixelValue);
// Apply ReductionOperation on intermediate stages
for(unsigned int i = 1; i < _num_of_stages - 1; ++i)
{
- _memory_group.manage(_results_vector.get() + i);
- _reduction_kernels_vector[i].configure(_results_vector.get() + i - 1, _results_vector.get() + i, axis, intermediate_kernel_op);
- _border_handlers_vector[i].configure(_results_vector.get() + i - 1, _reduction_kernels_vector[i].border_size(), BorderMode::CONSTANT, pixelValue);
+ _memory_group.manage(&_results_vector[i]);
+ _reduction_kernels_vector[i].configure(&_results_vector[i - 1], &_results_vector[i], axis, intermediate_kernel_op);
+ _border_handlers_vector[i].configure(&_results_vector[i - 1], _reduction_kernels_vector[i].border_size(), BorderMode::CONSTANT, pixelValue);
_results_vector[i - 1].allocator()->allocate();
}
// Apply ReductionOperation on the last stage
const unsigned int last_stage = _num_of_stages - 1;
const unsigned int input_width = input->info()->dimension(0);
- _reduction_kernels_vector[last_stage].configure(_results_vector.get() + last_stage - 1, output, axis, last_kernel_op, input_width);
- _border_handlers_vector[last_stage].configure(_results_vector.get() + last_stage - 1, _reduction_kernels_vector[last_stage].border_size(), BorderMode::CONSTANT, pixelValue);
+ _reduction_kernels_vector[last_stage].configure(&_results_vector[last_stage - 1], output, axis, last_kernel_op, input_width);
+ _border_handlers_vector[last_stage].configure(&_results_vector[last_stage - 1], _reduction_kernels_vector[last_stage].border_size(), BorderMode::CONSTANT, pixelValue);
_results_vector[last_stage - 1].allocator()->allocate();
}
}
diff --git a/src/runtime/CL/functions/CLSplit.cpp b/src/runtime/CL/functions/CLSplit.cpp
index f0843517e7..8d37d538c8 100644
--- a/src/runtime/CL/functions/CLSplit.cpp
+++ b/src/runtime/CL/functions/CLSplit.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,8 +42,8 @@ CLSplit::CLSplit()
void CLSplit::configure(const ICLTensor *input, const std::vector<ICLTensor *> &outputs, unsigned int axis)
{
// Create Slice functions
- _num_outputs = outputs.size();
- _slice_functions = arm_compute::support::cpp14::make_unique<CLSlice[]>(_num_outputs);
+ _num_outputs = outputs.size();
+ _slice_functions.resize(_num_outputs);
// Get output shape
const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_split_shape(input->info(), axis, _num_outputs);
diff --git a/src/runtime/CL/functions/CLStackLayer.cpp b/src/runtime/CL/functions/CLStackLayer.cpp
index 71327fead4..2700b49272 100644
--- a/src/runtime/CL/functions/CLStackLayer.cpp
+++ b/src/runtime/CL/functions/CLStackLayer.cpp
@@ -46,8 +46,8 @@ CLStackLayer::CLStackLayer() // NOLINT
void CLStackLayer::configure(const std::vector<ICLTensor *> &input, int axis, ICLTensor *output)
{
- _num_inputs = input.size();
- _stack_kernels = arm_compute::support::cpp14::make_unique<CLStackLayerKernel[]>(_num_inputs);
+ _num_inputs = input.size();
+ _stack_kernels.resize(_num_inputs);
// Wrap around negative values
const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->info()->num_dimensions() + 1));
diff --git a/src/runtime/CL/functions/CLUnstack.cpp b/src/runtime/CL/functions/CLUnstack.cpp
index 428d09148b..eb1dd8cd44 100644
--- a/src/runtime/CL/functions/CLUnstack.cpp
+++ b/src/runtime/CL/functions/CLUnstack.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -74,7 +74,7 @@ void CLUnstack::configure(const ICLTensor *input, const std::vector<ICLTensor *>
// Wrap around negative values
const unsigned int axis_u = wrap_axis(axis, input->info());
_num_slices = std::min(outputs_vector_info.size(), input->info()->dimension(axis_u));
- _strided_slice_vector = arm_compute::support::cpp14::make_unique<CLStridedSlice[]>(_num_slices);
+ _strided_slice_vector.resize(_num_slices);
Coordinates slice_start;
int32_t slice_end_mask;
diff --git a/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp b/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
index 6e42377a07..a8667c3138 100644
--- a/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLWidthConcatenateLayer.cpp
@@ -109,7 +109,7 @@ void CLWidthConcatenateLayer::configure(std::vector<ICLTensor *> inputs_vector,
break;
default:
// Configure generic case WidthConcatenate kernels
- _concat_kernels_vector = arm_compute::support::cpp14::make_unique<CLWidthConcatenateLayerKernel[]>(_num_inputs);
+ _concat_kernels_vector.resize(_num_inputs);
unsigned int width_offset = 0;
for(unsigned int i = 0; i < _num_inputs; ++i)
diff --git a/src/runtime/CL/tuners/CLLWSList.cpp b/src/runtime/CL/tuners/CLLWSList.cpp
index 97134b1b2c..6eb251420c 100644
--- a/src/runtime/CL/tuners/CLLWSList.cpp
+++ b/src/runtime/CL/tuners/CLLWSList.cpp
@@ -36,7 +36,7 @@ cl::NDRange CLLWSListExhaustive::operator[](size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= size());
auto coords = index2coords(search_space_shape, index);
- return cl::NDRange(coords[0] + 1, coords[1] + 1, coords[2] + 1);
+ return cl::NDRange{ coords[0] + 1U, coords[1] + 1U, coords[2] + 1U };
}
CLLWSListExhaustive::CLLWSListExhaustive(const cl::NDRange &gws)
@@ -49,7 +49,7 @@ cl::NDRange CLLWSListNormal::operator[](size_t index)
{
ARM_COMPUTE_ERROR_ON(index >= size());
auto coords = index2coords(search_space_shape, index);
- return cl::NDRange(_lws_x[coords[0]], _lws_y[coords[1]], _lws_z[coords[2]]);
+ return cl::NDRange{ _lws_x[coords[0]], _lws_y[coords[1]], _lws_z[coords[2]] };
}
CLLWSListNormal::CLLWSListNormal(const cl::NDRange &gws)