aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/functions/CLGEMM.cpp
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2017-08-16 18:38:32 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit1246b63ca04cb067f26ae860688647224d6ba24e (patch)
treea805ff1faa1de1b06f3569926ec2f09b63ecdb5f /src/runtime/CL/functions/CLGEMM.cpp
parentf583fb74ecaad9d57672e1422d68566a78bb503e (diff)
downloadComputeLibrary-1246b63ca04cb067f26ae860688647224d6ba24e.tar.gz
COMPMID-477 - Optimized Direct Convolution 3x3 and 5x5 (f32) for Bifrost.
Each work-item computes 4x3 output elements in case of 3x3 convolution and 4x2 in case of 5x5 convolution Change-Id: I6ebbaff8b7e971c1f90d5845c0b58d2a40f39df5 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/84345 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'src/runtime/CL/functions/CLGEMM.cpp')
-rw-r--r--src/runtime/CL/functions/CLGEMM.cpp3
1 files changed, 2 insertions, 1 deletions
diff --git a/src/runtime/CL/functions/CLGEMM.cpp b/src/runtime/CL/functions/CLGEMM.cpp
index 935e856333..e81d8a6b97 100644
--- a/src/runtime/CL/functions/CLGEMM.cpp
+++ b/src/runtime/CL/functions/CLGEMM.cpp
@@ -59,6 +59,8 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
+ _mm_kernel.set_target(CLScheduler::get().target());
+
// Check if the first input tensor is a vector. If so, all the kernels for reshaping the tensors can be skipped
if(a->info()->dimension(1) != 1)
{
@@ -87,7 +89,6 @@ void CLGEMM::configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *
_transpose_kernel.configure(b, &_tmp_b);
// Configure matrix multiply kernel
- _mm_kernel.set_target(CLScheduler::get().target());
_mm_kernel.configure(&_tmp_a, &_tmp_b, output, alpha);
// Allocate intermediate tensors