aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp3
-rw-r--r--src/runtime/CL/functions/CLDirectConvolutionLayer.cpp8
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp3
3 files changed, 13 insertions, 1 deletions
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index ff94e9d7a2..b1b83985d0 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -113,6 +113,9 @@ void CLConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weig
const DataType dt = input->info()->data_type();
const int fixed_point_position = input->info()->fixed_point_position();
+ // Set the GPU target for matrix multiply
+ _mm_kernel.set_target(CLScheduler::get().target());
+
_has_bias = (biases != nullptr);
_are_weights_reshaped = weights_info.are_reshaped();
diff --git a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
index 65be417afb..6fafd9c7f1 100644
--- a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
@@ -38,13 +38,21 @@ CLDirectConvolutionLayer::CLDirectConvolutionLayer()
void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info)
{
+ // Set GPU target
+ _direct_conv_kernel.set_target(CLScheduler::get().target());
+
+ // Configure direct convolution
_direct_conv_kernel.configure(input, weights, biases, output, conv_info);
+ // Configure border handler
_input_border_handler.configure(input, _direct_conv_kernel.border_size(), BorderMode::CONSTANT, PixelValue(0));
}
void CLDirectConvolutionLayer::run()
{
+ // Run border handler
CLScheduler::get().enqueue(_input_border_handler, false);
+
+ // Run direct convolution
CLScheduler::get().enqueue(_direct_conv_kernel);
}
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index 935e856333..e81d8a6b97 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -59,6 +59,8 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
+ _mm_kernel.set_target(CLScheduler::get().target());
+
// Check if the first input tensor is a vector. If so, all the kernels for reshaping the tensors can be skipped
if(a->info()->dimension(1) != 1)
{
@@ -87,7 +89,6 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
_transpose_kernel.configure(b, &_tmp_b);
// Configure matrix multiply kernel
- _mm_kernel.set_target(CLScheduler::get().target());
_mm_kernel.configure(&_tmp_a, &_tmp_b, output, alpha);
// Allocate intermediate tensors