aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-09-08 19:47:30 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitbaf174e85ddb5399355281cd34d0f459d92124a7 (patch)
treed69904df66f7e5ad55edd268d16735542445f36f /src/runtime/CL
parent1c8409d7ce90ea449437076574c98a4ea90d9368 (diff)
downloadComputeLibrary-baf174e85ddb5399355281cd34d0f459d92124a7.tar.gz
COMPMID-485: Memory Manager
Change-Id: Ib421b7622838f050038cd81e7426bb1413a7d6e6 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/87376 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/runtime/CL')
-rw-r--r--src/runtime/CL/CLBufferAllocator.cpp49
-rw-r--r--src/runtime/CL/CLTensor.cpp2
-rw-r--r--src/runtime/CL/CLTensorAllocator.cpp35
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp32
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp11
-rw-r--r--src/runtime/CL/functions/CLSoftmaxLayer.cpp14
6 files changed, 119 insertions, 24 deletions
diff --git a/src/runtime/CL/CLBufferAllocator.cpp b/src/runtime/CL/CLBufferAllocator.cpp
new file mode 100644
index 0000000000..9a5c13ac5a
--- /dev/null
+++ b/src/runtime/CL/CLBufferAllocator.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/CLBufferAllocator.h"
+
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Error.h"
+
+#include <cstddef>
+
+using namespace arm_compute;
+
+CLBufferAllocator::CLBufferAllocator(cl::Context context)
+ : _context(std::move(context))
+{
+}
+
+void *CLBufferAllocator::allocate(size_t size, size_t alignment)
+{
+ ARM_COMPUTE_UNUSED(alignment);
+ cl_mem buf = clCreateBuffer(_context.get(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size, nullptr, nullptr);
+ return static_cast<void *>(buf);
+}
+
+void CLBufferAllocator::free(void *ptr)
+{
+ ARM_COMPUTE_ERROR_ON(ptr == nullptr);
+ clReleaseMemObject(static_cast<cl_mem>(ptr));
+}
diff --git a/src/runtime/CL/CLTensor.cpp b/src/runtime/CL/CLTensor.cpp
index eefa0331d5..bc513d139b 100644
--- a/src/runtime/CL/CLTensor.cpp
+++ b/src/runtime/CL/CLTensor.cpp
@@ -28,7 +28,7 @@
using namespace arm_compute;
CLTensor::CLTensor()
- : _allocator()
+ : _allocator(this)
{
}
diff --git a/src/runtime/CL/CLTensorAllocator.cpp b/src/runtime/CL/CLTensorAllocator.cpp
index 8112a7148f..ad165fad7d 100644
--- a/src/runtime/CL/CLTensorAllocator.cpp
+++ b/src/runtime/CL/CLTensorAllocator.cpp
@@ -25,15 +25,21 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
using namespace arm_compute;
-CLTensorAllocator::CLTensorAllocator()
- : _buffer(), _mapping(nullptr)
+CLTensorAllocator::CLTensorAllocator(CLTensor *owner)
+ : _associated_memory_group(nullptr), _buffer(), _mapping(nullptr), _owner(owner)
{
}
+CLTensorAllocator::~CLTensorAllocator()
+{
+ _buffer = cl::Buffer();
+}
+
uint8_t *CLTensorAllocator::data()
{
return _mapping;
@@ -47,17 +53,32 @@ const cl::Buffer &CLTensorAllocator::cl_data() const
void CLTensorAllocator::allocate()
{
ARM_COMPUTE_ERROR_ON(_buffer.get() != nullptr);
-
- _buffer = cl::Buffer(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, info().total_size());
+ if(_associated_memory_group == nullptr)
+ {
+ _buffer = cl::Buffer(CLScheduler::get().context(), CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, info().total_size());
+ }
+ else
+ {
+ _associated_memory_group->finalize_memory(_owner, reinterpret_cast<void **>(&_buffer()), info().total_size());
+ }
info().set_is_resizable(false);
}
void CLTensorAllocator::free()
{
- ARM_COMPUTE_ERROR_ON(_buffer.get() == nullptr);
+ if(_associated_memory_group == nullptr)
+ {
+ _buffer = cl::Buffer();
+ info().set_is_resizable(true);
+ }
+}
- _buffer = cl::Buffer();
- info().set_is_resizable(true);
+void CLTensorAllocator::set_associated_memory_group(CLMemoryGroup *associated_memory_group)
+{
+ ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
+ ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr);
+ ARM_COMPUTE_ERROR_ON(_buffer.get() != nullptr);
+ _associated_memory_group = associated_memory_group;
}
uint8_t *CLTensorAllocator::lock()
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index 0bbec94e78..4b1bfd8b8f 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -30,12 +30,13 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
#include <cmath>
+#include <memory>
#include <tuple>
using namespace arm_compute;
-CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights()
- : _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped(), _transpose1xW(false)
+CLConvolutionLayerReshapeWeights::CLConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _weights_reshape_kernel(), _weights_transposed_kernel(), _weights_reshaped(), _transpose1xW(false)
{
}
@@ -68,6 +69,7 @@ void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const
TensorInfo info_wr(shape_wr, 1, dt, fixed_point_position);
_weights_reshaped.allocator()->init(info_wr);
+ _memory_group.manage(&_weights_reshaped);
_weights_reshape_kernel.configure(weights, biases, &_weights_reshaped);
_weights_transposed_kernel.configure(&_weights_reshaped, output);
_weights_reshaped.allocator()->allocate();
@@ -80,17 +82,21 @@ void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const
void CLConvolutionLayerReshapeWeights::run()
{
+ _memory_group.acquire();
+
cl::CommandQueue q = CLScheduler::get().queue();
CLScheduler::get().enqueue(_weights_reshape_kernel);
if(_transpose1xW)
{
CLScheduler::get().enqueue(_weights_transposed_kernel);
}
+
+ _memory_group.release();
}
-CLConvolutionLayer::CLConvolutionLayer()
- : _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _input_im2col_reshaped(), _input_interleaved_reshaped(), _weights_reshaped(),
- _weights_transposed(), _gemm_output(), _has_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
+CLConvolutionLayer::CLConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _input_im2col_reshaped(),
+ _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _has_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
{
}
@@ -179,6 +185,7 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
shape_im2col.set(1, mat_input_rows);
shape_im2col.set(2, 1);
_input_im2col_reshaped.allocator()->init(TensorInfo(shape_im2col, 1, dt, fixed_point_position));
+ _memory_group.manage(&_input_im2col_reshaped);
// Create tensor (interleave) to prepare input tensor for GEMM
if(!_is_fully_connected_convolution)
@@ -187,6 +194,7 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
shape_interleaved.set(0, shape_interleaved.x() * 4);
shape_interleaved.set(1, std::ceil(shape_interleaved.y() / 4.f));
_input_interleaved_reshaped.allocator()->init(TensorInfo(shape_interleaved, 1, dt, fixed_point_position));
+ _memory_group.manage(&_input_interleaved_reshaped);
}
// Create GEMM output tensor
@@ -194,6 +202,7 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
shape_gemm.set(0, mat_weights_cols);
shape_gemm.set(1, mat_input_rows);
_gemm_output.allocator()->init(TensorInfo(shape_gemm, 1, dt, fixed_point_position));
+ _memory_group.manage(&_gemm_output);
// Configure kernels
_input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _has_bias);
@@ -208,8 +217,11 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
{
_input_interleave_kernel.configure(&_input_im2col_reshaped, &_input_interleaved_reshaped);
_mm_kernel.configure(&_input_interleaved_reshaped, weights, &_gemm_output, 1.0f);
+ _input_interleaved_reshaped.allocator()->allocate();
}
+ _input_im2col_reshaped.allocator()->allocate();
_output_col2im_kernel.configure(&_gemm_output, output, std::make_pair(conv_w, conv_h));
+ _gemm_output.allocator()->allocate();
ARM_COMPUTE_ERROR_ON_MSG((output->info()->dimension(0) != conv_w) || (output->info()->dimension(1) != conv_h), "Output shape does not match the expected one");
@@ -218,12 +230,6 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
{
_weights_reshaped.allocator()->allocate();
}
- _input_im2col_reshaped.allocator()->allocate();
- if(!_is_fully_connected_convolution)
- {
- _input_interleaved_reshaped.allocator()->allocate();
- }
- _gemm_output.allocator()->allocate();
}
void CLConvolutionLayer::run()
@@ -235,6 +241,8 @@ void CLConvolutionLayer::run()
_reshape_weights.run();
}
+ _memory_group.acquire();
+
// Run input reshaping
CLScheduler::get().enqueue(_input_im2col_kernel);
if(!_is_fully_connected_convolution)
@@ -247,4 +255,6 @@ void CLConvolutionLayer::run()
// Reshape output matrix
CLScheduler::get().enqueue(_output_col2im_kernel, false);
+
+ _memory_group.release();
}
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index f7cea551f6..ee1558fe71 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -39,9 +39,9 @@ void CLFullyConnectedLayerReshapeWeights::configure(const ICLTensor *input, ICLT
_kernel = std::move(k);
}
-CLFullyConnectedLayer::CLFullyConnectedLayer()
- : _im2col_kernel(), _reshape_weights_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _reshape_weights_output(), _are_weights_reshaped(true), _is_fc_after_conv(true),
- _accumulate_biases(false)
+CLFullyConnectedLayer::CLFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _im2col_kernel(), _reshape_weights_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _reshape_weights_output(),
+ _are_weights_reshaped(true), _is_fc_after_conv(true), _accumulate_biases(false)
{
}
@@ -63,6 +63,7 @@ void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLT
_im2col_output.allocator()->init(TensorInfo(shape_im2col, 1, dt, fixed_point_position));
// Configure im2col kernel
+ _memory_group.manage(&_im2col_output);
_im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
// Configure matrix multiply kernel
@@ -158,6 +159,8 @@ void CLFullyConnectedLayer::run()
_reshape_weights_kernel.run();
}
+ _memory_group.acquire();
+
// Linearize input if it comes from a convolutional layer
if(_is_fc_after_conv)
{
@@ -172,4 +175,6 @@ void CLFullyConnectedLayer::run()
{
CLScheduler::get().enqueue(_accumulate_biases_kernel);
}
+
+ _memory_group.release();
}
diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
index 850eb2c6f8..7505a2c974 100644
--- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp
+++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
@@ -25,12 +25,13 @@
#include "arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h"
#include "arm_compute/core/Helpers.h"
+#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
using namespace arm_compute;
-CLSoftmaxLayer::CLSoftmaxLayer()
- : _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _max(), _sum(), _tmp()
+CLSoftmaxLayer::CLSoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _max(), _sum(), _tmp()
{
}
@@ -47,6 +48,11 @@ void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output)
_max.allocator()->init(tensor_info_max_sum);
_sum.allocator()->init(tensor_info_max_sum);
+ // Manage intermediate buffers
+ _memory_group.manage(&_tmp);
+ _memory_group.manage(&_max);
+ _memory_group.manage(&_sum);
+
// Configure Kernels
_max_kernel.configure(input, &_max);
_shift_exp_sum_kernel.configure(input, &_max, &_tmp, &_sum);
@@ -60,7 +66,11 @@ void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output)
void CLSoftmaxLayer::run()
{
+ _memory_group.acquire();
+
CLScheduler::get().enqueue(_max_kernel, false);
CLScheduler::get().enqueue(_shift_exp_sum_kernel, false);
CLScheduler::get().enqueue(_norm_kernel);
+
+ _memory_group.release();
}