aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/GLES_COMPUTE/functions
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2018-03-15 14:41:34 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit9e9cbafa9e6cc6b543c89a96d52fc9c5fde04ceb (patch)
treeeb8ae82627c447e530e2745788c371f708c887a5 /src/runtime/GLES_COMPUTE/functions
parentbe0ae93c50bfa3e588111585025278daa8cb0694 (diff)
downloadComputeLibrary-9e9cbafa9e6cc6b543c89a96d52fc9c5fde04ceb.tar.gz
COMPMID-1004 GLES: Add memory manager to GLES functions
Change-Id: I80fc9c0dd02afd79b501abde751036f9599b7bf2 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125103 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/runtime/GLES_COMPUTE/functions')
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp13
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp9
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCGEMM.cpp7
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCNormalizationLayer.cpp9
-rw-r--r--src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp15
5 files changed, 39 insertions, 14 deletions
diff --git a/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
index 5689722340..f4c073668a 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCConvolutionLayer.cpp
@@ -91,9 +91,9 @@ void GCConvolutionLayerReshapeWeights::run()
}
}
-GCConvolutionLayer::GCConvolutionLayer()
- : _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _fill_border(), _input_im2col_reshaped(), _input_interleaved_reshaped(),
- _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _append_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
+GCConvolutionLayer::GCConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _reshape_weights(), _input_im2col_kernel(), _input_interleave_kernel(), _mm_kernel(), _output_col2im_kernel(), _fill_border(), _input_im2col_reshaped(),
+ _input_interleaved_reshaped(), _weights_reshaped(), _weights_transposed(), _gemm_output(), _tmp_output(), _append_bias(false), _is_fully_connected_convolution(false), _are_weights_reshaped(false)
{
}
@@ -196,6 +196,7 @@ void GCConvolutionLayer::configure(const IGCTensor *input, const IGCTensor *weig
// FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
TensorInfo im2col_reshaped_info(shape_im2col, 1, dt, input->info()->fixed_point_position());
_input_im2col_reshaped.allocator()->init(im2col_reshaped_info);
+ _memory_group.manage(&_input_im2col_reshaped);
// Create tensor (interleave) to prepare input tensor for GEMM
if(run_interleaved)
@@ -207,6 +208,7 @@ void GCConvolutionLayer::configure(const IGCTensor *input, const IGCTensor *weig
// FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
TensorInfo interleaved_info(shape_interleaved, 1, dt, input->info()->fixed_point_position());
_input_interleaved_reshaped.allocator()->init(interleaved_info);
+ _memory_group.manage(&_input_interleaved_reshaped);
}
// Create GEMM output tensor
@@ -218,6 +220,7 @@ void GCConvolutionLayer::configure(const IGCTensor *input, const IGCTensor *weig
// FIXME: input->clone() doesn't work with subtensors for grouped convolutions.
TensorInfo info_gemm(shape_gemm, 1, gemm_data_type, input->info()->fixed_point_position());
_gemm_output.allocator()->init(info_gemm);
+ _memory_group.manage(&_gemm_output);
// Configure kernels
if(dt == DataType::F16)
@@ -263,6 +266,8 @@ void GCConvolutionLayer::run()
_reshape_weights.run();
}
+ _memory_group.acquire();
+
// Run im2col
GCScheduler::get().dispatch(_fill_border);
GCScheduler::get().memory_barrier();
@@ -282,4 +287,6 @@ void GCConvolutionLayer::run()
GCScheduler::get().memory_barrier();
// Reshape output matrix
GCScheduler::get().dispatch(_output_col2im_kernel, false);
+
+ _memory_group.release();
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
index 9e4f0f6c95..0f8f8e6c94 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCFullyConnectedLayer.cpp
@@ -38,9 +38,9 @@ void GCFullyConnectedLayerReshapeWeights::configure(const IGCTensor *input, IGCT
_kernel = std::move(k);
}
-GCFullyConnectedLayer::GCFullyConnectedLayer()
- : _im2col_kernel(), _reshape_weights_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _reshape_weights_output(), _are_weights_reshaped(true), _is_fc_after_conv(true),
- _accumulate_biases(false)
+GCFullyConnectedLayer::GCFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _im2col_kernel(), _reshape_weights_kernel(), _mm_kernel(), _accumulate_biases_kernel(), _im2col_output(), _reshape_weights_output(),
+ _are_weights_reshaped(true), _is_fc_after_conv(true), _accumulate_biases(false)
{
}
@@ -61,6 +61,7 @@ void GCFullyConnectedLayer::configure_conv_fc(const IGCTensor *input, const IGCT
_im2col_output.allocator()->init(TensorInfo(shape_im2col, 1, dt));
// Configure im2col kernel
+ _memory_group.manage(&_im2col_output);
_im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
// Configure matrix multiply kernel
@@ -156,6 +157,7 @@ void GCFullyConnectedLayer::run()
_reshape_weights_kernel.run();
}
+ _memory_group.acquire();
// Linearize input if it comes from a convolutional layer
if(_is_fc_after_conv)
{
@@ -177,4 +179,5 @@ void GCFullyConnectedLayer::run()
GCScheduler::get().dispatch(_accumulate_biases_kernel);
}
+ _memory_group.release();
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCGEMM.cpp b/src/runtime/GLES_COMPUTE/functions/GCGEMM.cpp
index 5122c20504..46424a59f5 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCGEMM.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCGEMM.cpp
@@ -40,8 +40,8 @@
using namespace arm_compute;
using namespace arm_compute::gles_compute;
-GCGEMM::GCGEMM()
- : _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _ma_kernel(), _tmp_a(), _tmp_b(), _is_interleaved_transposed(false), _run_addition(false)
+GCGEMM::GCGEMM(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _ma_kernel(), _tmp_a(), _tmp_b(), _is_interleaved_transposed(false), _run_addition(false)
{
}
@@ -88,6 +88,7 @@ void GCGEMM::configure(const IGCTensor *a, const IGCTensor *b, const IGCTensor *
TensorInfo info_a(shape_tmp_a, 1, a->info()->data_type(), a->info()->fixed_point_position());
_tmp_a.allocator()->init(info_a);
+ _memory_group.manage(&_tmp_a);
TensorInfo info_b(shape_tmp_b, 1, b->info()->data_type(), b->info()->fixed_point_position());
_tmp_b.allocator()->init(info_b);
@@ -118,6 +119,7 @@ void GCGEMM::configure(const IGCTensor *a, const IGCTensor *b, const IGCTensor *
void GCGEMM::run()
{
+ _memory_group.acquire();
if(_is_interleaved_transposed)
{
// Run interleave kernel
@@ -137,4 +139,5 @@ void GCGEMM::run()
GCScheduler::get().memory_barrier();
GCScheduler::get().dispatch(_ma_kernel);
}
+ _memory_group.release();
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCNormalizationLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCNormalizationLayer.cpp
index fc3882dbda..13213d2b54 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCNormalizationLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCNormalizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,8 +33,8 @@
using namespace arm_compute;
-GCNormalizationLayer::GCNormalizationLayer()
- : _squared_input(), _norm_kernel(), _multiply_kernel(), _border_handler()
+GCNormalizationLayer::GCNormalizationLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _squared_input(), _norm_kernel(), _multiply_kernel(), _border_handler()
{
}
@@ -43,6 +43,7 @@ void GCNormalizationLayer::configure(const IGCTensor *input, IGCTensor *output,
ARM_COMPUTE_ERROR_ON(input == nullptr);
_squared_input.allocator()->init(TensorInfo(input->info()->tensor_shape(), 1, input->info()->data_type()));
+ _memory_group.manage(&_squared_input);
_norm_kernel.configure(input, &_squared_input, output, norm_info);
_multiply_kernel.configure(input, input, &_squared_input, 1.0f);
@@ -55,9 +56,11 @@ void GCNormalizationLayer::configure(const IGCTensor *input, IGCTensor *output,
void GCNormalizationLayer::run()
{
+ _memory_group.acquire();
GCScheduler::get().dispatch(_multiply_kernel, false);
GCScheduler::get().memory_barrier();
GCScheduler::get().dispatch(_border_handler, false);
GCScheduler::get().memory_barrier();
GCScheduler::get().dispatch(_norm_kernel, true);
+ _memory_group.release();
}
diff --git a/src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp
index 5221c5cc5d..1748a5952b 100644
--- a/src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp
+++ b/src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,8 +29,8 @@
using namespace arm_compute;
-GCSoftmaxLayer::GCSoftmaxLayer()
- : _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _max(), _sum(), _tmp()
+GCSoftmaxLayer::GCSoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _max(), _sum(), _tmp()
{
}
@@ -50,6 +50,11 @@ void GCSoftmaxLayer::configure(const IGCTensor *input, IGCTensor *output, float
_max.allocator()->init(tensor_info_max_sum);
_sum.allocator()->init(tensor_info_max_sum);
+ // Manage intermediate buffers
+ _memory_group.manage(&_tmp);
+ _memory_group.manage(&_max);
+ _memory_group.manage(&_sum);
+
// Configure Kernels
_max_kernel.configure(input, &_max);
_shift_exp_sum_kernel.configure(input, &_max, &_tmp, &_sum);
@@ -63,9 +68,13 @@ void GCSoftmaxLayer::configure(const IGCTensor *input, IGCTensor *output, float
void GCSoftmaxLayer::run()
{
+ _memory_group.acquire();
+
GCScheduler::get().dispatch(_max_kernel, false);
GCScheduler::get().memory_barrier();
GCScheduler::get().dispatch(_shift_exp_sum_kernel, false);
GCScheduler::get().memory_barrier();
GCScheduler::get().dispatch(_norm_kernel);
+
+ _memory_group.release();
}