aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/functions
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-06-04 19:27:13 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:52:54 +0000
commit17812ba9f7cf2c8f5121c11760ac45fbbdb7aeaf (patch)
tree28c7bb65a8306e82de91a644fdcc1c0947c6f6d7 /src/runtime/CL/functions
parentf8d8f3aff04faf731f20411ecb91027eab4365c5 (diff)
downloadComputeLibrary-17812ba9f7cf2c8f5121c11760ac45fbbdb7aeaf.tar.gz
COMPMID-817: Tuner: Port kernels to new design.
Change-Id: Iaabb1153c2abe0400ec79d51a21347debe92d642 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/134062 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/runtime/CL/functions')
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp2
-rw-r--r--src/runtime/CL/functions/CLFlattenLayer.cpp4
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp1
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp2
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp8
-rw-r--r--src/runtime/CL/functions/CLLocallyConnectedLayer.cpp2
-rw-r--r--src/runtime/CL/functions/CLPoolingLayer.cpp3
7 files changed, 18 insertions, 4 deletions
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index 676a121a76..c2b24e3c20 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -134,6 +134,7 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w
_input_reshaped.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col));
_im2col_kernel.set_target(gpu_target);
_im2col_kernel.configure(input, &_input_reshaped, Size2D(weights_w, weights_h), conv_info, append_bias, depth_multiplier);
+ CLScheduler::get().tune_kernel_static(_im2col_kernel);
// Weights reshape configuration
const TensorShape shape_weights_reshape(patch_size, weights_z);
@@ -149,6 +150,7 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w
_v2mm_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_data_type(v2mm_dt).set_tensor_shape(shape_v2mm_out));
_v2mm_kernel.set_target(gpu_target);
_v2mm_kernel.configure(&_input_reshaped, &_weights_reshaped, &_v2mm_output);
+ CLScheduler::get().tune_kernel_static(_v2mm_kernel);
_output_reshaped.allocator()->init(_v2mm_output.info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(output_shape));
_vector_to_tensor_kernel.configure(&_v2mm_output, (_is_quantized) ? &_output_reshaped : output, conv_w, conv_h);
diff --git a/src/runtime/CL/functions/CLFlattenLayer.cpp b/src/runtime/CL/functions/CLFlattenLayer.cpp
index 9f571b2036..f5809a218a 100644
--- a/src/runtime/CL/functions/CLFlattenLayer.cpp
+++ b/src/runtime/CL/functions/CLFlattenLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
#include "arm_compute/core/Size2D.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
#include "support/ToolchainSupport.h"
using namespace arm_compute;
@@ -34,4 +35,5 @@ void CLFlattenLayer::configure(const ICLTensor *input, ICLTensor *output)
auto k = arm_compute::support::cpp14::make_unique<CLIm2ColKernel>();
k->configure(input, output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
_kernel = std::move(k);
+ CLScheduler::get().tune_kernel_static(*_kernel);
}
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 44bf28374f..9248bc559b 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -117,6 +117,7 @@ void CLFullyConnectedLayer::configure_conv_fc(const ICLTensor *input, const ICLT
// Configure im2col kernel
_memory_group.manage(&_im2col_output);
_im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false);
+ CLScheduler::get().tune_kernel_static(_im2col_kernel);
// Configure matrix multiply kernel
configure_mm(&_im2col_output, weights, output);
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index 7f37520f10..a0ec66f804 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -143,7 +143,9 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
_transpose_kernel.configure(b, &_tmp_b, mult_transpose1xW_width);
}
+ // Configure and tune matrix multiply kernel
_mm_kernel.configure(matrix_a, matrix_b, output, alpha, _is_interleaved_transposed, GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height));
+ CLScheduler::get().tune_kernel_static(_mm_kernel);
if(_is_interleaved_transposed)
{
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 4f87043373..27bed44098 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -230,10 +230,11 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
_gemm_output.allocator()->init(info_gemm);
_memory_group.manage(&_gemm_output);
- // Configure im2col
+ // Configure and tune im2col
_im2col_kernel.configure(input, &_im2col_output, Size2D(kernel_width, kernel_height), conv_info, append_bias, dilation);
+ CLScheduler::get().tune_kernel_static(_im2col_kernel);
- // Configure GEMM
+ // Configure and tune GEMM
configure_mm(&_im2col_output, weights, &_gemm_output);
_im2col_output.allocator()->allocate();
@@ -250,8 +251,9 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
_gemmlowp_output_stage.configure(&_gemm_output, biases, &_tmp_output, output_multiplier, output_shift, output_quant_info.offset);
}
- // Configure Col2Im
+ // Configure and tune Col2Im
_col2im_kernel.configure(_is_quantized ? &_tmp_output : &_gemm_output, output, std::make_pair(conv_w, conv_h));
+ CLScheduler::get().tune_kernel_static(_col2im_kernel);
if(_is_quantized)
{
_tmp_output.allocator()->allocate();
diff --git a/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp b/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
index 986fe00973..31d5cd5a7e 100644
--- a/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLLocallyConnectedLayer.cpp
@@ -163,6 +163,8 @@ void CLLocallyConnectedLayer::configure(const ICLTensor *input, const ICLTensor
_weights_reshaped.allocator()->allocate();
_input_im2col_reshaped.allocator()->allocate();
_gemm_output.allocator()->allocate();
+
+ CLScheduler::get().tune_kernel_static(_input_im2col_kernel);
}
void CLLocallyConnectedLayer::run()
diff --git a/src/runtime/CL/functions/CLPoolingLayer.cpp b/src/runtime/CL/functions/CLPoolingLayer.cpp
index 17875a38ad..cbe1ce3b47 100644
--- a/src/runtime/CL/functions/CLPoolingLayer.cpp
+++ b/src/runtime/CL/functions/CLPoolingLayer.cpp
@@ -63,6 +63,9 @@ void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const Poolin
ARM_COMPUTE_ERROR("Data layout not supported");
}
_border_handler.configure(input, _kernel->border_size(), border_mode, pixel_value);
+
+ // Tune kernels
+ CLScheduler::get().tune_kernel_static(*_kernel);
}
Status CLPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info)