aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/heuristics/matmul_native
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/heuristics/matmul_native')
-rw-r--r--src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.cpp396
-rw-r--r--src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.h11
-rw-r--r--src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp42
-rw-r--r--src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.h15
-rw-r--r--src/runtime/heuristics/matmul_native/ClMatMulNativeKernelConfig.h4
-rw-r--r--src/runtime/heuristics/matmul_native/IClMatMulNativeKernelConfig.h9
6 files changed, 185 insertions, 292 deletions
diff --git a/src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.cpp b/src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.cpp
index 01102b3d60..b3c8d891dc 100644
--- a/src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.cpp
+++ b/src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.cpp
@@ -28,30 +28,33 @@
#include "arm_compute/core/GPUTarget.h"
#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/TensorInfo.h"
-#include "src/gpu/cl/kernels/ClMatMulNativeKernel.h"
-#include <utility>
+#include "src/gpu/cl/kernels/ClMatMulNativeKernel.h"
#include "src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.h"
+#include <utility>
+
namespace arm_compute
{
namespace cl_matmul
{
-ClMatMulNativeDefaultConfigValhall::ClMatMulNativeDefaultConfigValhall(GPUTarget gpu)
- : IClMatMulNativeKernelConfig(gpu)
+ClMatMulNativeDefaultConfigValhall::ClMatMulNativeDefaultConfigValhall(GPUTarget gpu) : IClMatMulNativeKernelConfig(gpu)
{
}
-MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure(const ITensorInfo *lhs, const ITensorInfo *rhs, const MatMulInfo &info)
+MatMulKernelInfo
+ClMatMulNativeDefaultConfigValhall::configure(const ITensorInfo *lhs, const ITensorInfo *rhs, const MatMulInfo &info)
{
- using ConfigurationFunctionExecutorPtr = MatMulKernelInfo (ClMatMulNativeDefaultConfigValhall::*)(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo & info);
+ using ConfigurationFunctionExecutorPtr = MatMulKernelInfo (ClMatMulNativeDefaultConfigValhall::*)(
+ unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info);
- ClMatMulNativeConfigArray<ConfigurationFunctionExecutorPtr> configs_G710(&ClMatMulNativeDefaultConfigValhall::configure_G710_f32,
- &ClMatMulNativeDefaultConfigValhall::configure_G710_f16,
- &ClMatMulNativeDefaultConfigValhall::configure_G710_u8);
+ ClMatMulNativeConfigArray<ConfigurationFunctionExecutorPtr> configs_G710(
+ &ClMatMulNativeDefaultConfigValhall::configure_G710_f32,
+ &ClMatMulNativeDefaultConfigValhall::configure_G710_f16,
+ &ClMatMulNativeDefaultConfigValhall::configure_G710_u8);
ConfigurationFunctionExecutorPtr func = nullptr;
- switch(_target)
+ switch (_target)
{
case GPUTarget::G710:
default:
@@ -67,7 +70,7 @@ MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure(const ITensorInfo
const bool is_batched = lhs_shape.num_dimensions() > 2;
- if(is_batched == true)
+ if (is_batched == true)
{
lhs_shape.collapse_from(2);
}
@@ -81,103 +84,48 @@ MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure(const ITensorInfo
return (this->*func)(m, n, k, b, rhs->lock_paddings(), info);
}
-MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info)
+MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_f32(
+ unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info)
{
- const MatMulNativeConfigsMatrix configs_mnkb_best_nt_nt =
- {
- { 3136, 64, 64, 36, 4, 4, 16, 1 },
- { 4096, 48, 32, 36, 4, 4, 4, 1 },
- { 688, 92, 68, 32, 2, 8, 4, 1 },
- { 24, 464, 412, 24, 2, 8, 4, 1 },
- { 112, 184, 144, 28, 4, 4, 16, 1 },
- { 5776, 64, 32, 36, 2, 4, 16, 1 },
- { 1568, 64, 40, 36, 2, 8, 8, 1 },
- { 2920, 64, 64, 24, 4, 4, 16, 1 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_fallback_nt_nt =
- {
- { 3136, 64, 64, 36, 4, 4, 8, 0 },
- { 4096, 48, 32, 36, 4, 4, 8, 0 },
- { 688, 92, 68, 32, 5, 4, 4, 0 },
- { 24, 464, 412, 24, 6, 2, 8, 0 },
- { 112, 184, 144, 28, 6, 4, 4, 0 },
- { 5776, 64, 32, 36, 5, 4, 4, 0 },
- { 1568, 64, 40, 36, 4, 4, 8, 0 },
- { 2920, 64, 64, 24, 4, 4, 8, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_nt_t =
- {
- { 3136, 64, 64, 36, 4, 4, 4, 1 },
- { 4096, 48, 32, 36, 2, 2, 16, 1 },
- { 688, 92, 68, 32, 4, 4, 4, 1 },
- { 24, 464, 412, 24, 6, 2, 8, 1 },
- { 112, 184, 144, 28, 4, 2, 16, 1 },
- { 5776, 64, 32, 36, 4, 4, 4, 1 },
- { 1568, 64, 40, 36, 4, 4, 8, 1 },
- { 2920, 64, 64, 24, 4, 4, 4, 1 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_fallback_nt_t =
- {
- { 3136, 64, 64, 36, 5, 4, 4, 0 },
- { 4096, 48, 32, 36, 5, 4, 4, 0 },
- { 688, 92, 68, 32, 5, 4, 4, 0 },
- { 24, 464, 412, 24, 6, 2, 4, 0 },
- { 112, 184, 144, 28, 5, 4, 4, 0 },
- { 5776, 64, 32, 36, 5, 4, 4, 0 },
- { 1568, 64, 40, 36, 5, 4, 4, 0 },
- { 2920, 64, 64, 24, 6, 2, 4, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_t_nt =
- {
- { 3136, 64, 64, 36, 4, 4, 16, 1 },
- { 4096, 48, 32, 36, 4, 4, 4, 1 },
- { 688, 92, 68, 32, 2, 8, 4, 1 },
- { 24, 464, 412, 24, 2, 8, 4, 1 },
- { 112, 184, 144, 28, 4, 4, 16, 1 },
- { 5776, 64, 32, 36, 2, 8, 8, 1 },
- { 1568, 64, 40, 36, 4, 4, 8, 1 },
- { 2920, 64, 64, 24, 4, 4, 16, 1 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_fallback_t_nt =
- {
- { 3136, 64, 64, 36, 4, 4, 4, 0 },
- { 4096, 48, 32, 36, 4, 4, 4, 0 },
- { 688, 92, 68, 32, 4, 4, 4, 0 },
- { 24, 464, 412, 24, 4, 4, 4, 0 },
- { 112, 184, 144, 28, 4, 4, 4, 0 },
- { 5776, 64, 32, 36, 4, 4, 8, 0 },
- { 1568, 64, 40, 36, 4, 4, 4, 0 },
- { 2920, 64, 64, 24, 4, 4, 4, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_t_t =
- {
- { 3136, 64, 64, 36, 4, 4, 4, 1 },
- { 4096, 48, 32, 36, 4, 4, 4, 1 },
- { 688, 92, 68, 32, 4, 4, 4, 1 },
- { 24, 464, 412, 24, 2, 2, 16, 1 },
- { 112, 184, 144, 28, 4, 4, 4, 1 },
- { 5776, 64, 32, 36, 4, 4, 4, 1 },
- { 1568, 64, 40, 36, 4, 4, 4, 1 },
- { 2920, 64, 64, 24, 4, 4, 4, 1 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_fallback_t_t =
- {
- { 3136, 64, 64, 36, 4, 4, 4, 0 },
- { 4096, 48, 32, 36, 4, 4, 4, 0 },
- { 688, 92, 68, 32, 4, 4, 4, 0 },
- { 24, 464, 412, 24, 4, 2, 8, 0 },
- { 112, 184, 144, 28, 4, 4, 4, 0 },
- { 5776, 64, 32, 36, 4, 4, 4, 0 },
- { 1568, 64, 40, 36, 4, 4, 4, 0 },
- { 2920, 64, 64, 24, 4, 4, 4, 0 }
- };
+ const MatMulNativeConfigsMatrix configs_mnkb_best_nt_nt = {
+ {3136, 64, 64, 36, 4, 4, 16, 1}, {4096, 48, 32, 36, 4, 4, 4, 1}, {688, 92, 68, 32, 2, 8, 4, 1},
+ {24, 464, 412, 24, 2, 8, 4, 1}, {112, 184, 144, 28, 4, 4, 16, 1}, {5776, 64, 32, 36, 2, 4, 16, 1},
+ {1568, 64, 40, 36, 2, 8, 8, 1}, {2920, 64, 64, 24, 4, 4, 16, 1}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_fallback_nt_nt = {
+ {3136, 64, 64, 36, 4, 4, 8, 0}, {4096, 48, 32, 36, 4, 4, 8, 0}, {688, 92, 68, 32, 5, 4, 4, 0},
+ {24, 464, 412, 24, 6, 2, 8, 0}, {112, 184, 144, 28, 6, 4, 4, 0}, {5776, 64, 32, 36, 5, 4, 4, 0},
+ {1568, 64, 40, 36, 4, 4, 8, 0}, {2920, 64, 64, 24, 4, 4, 8, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_nt_t = {
+ {3136, 64, 64, 36, 4, 4, 4, 1}, {4096, 48, 32, 36, 2, 2, 16, 1}, {688, 92, 68, 32, 4, 4, 4, 1},
+ {24, 464, 412, 24, 6, 2, 8, 1}, {112, 184, 144, 28, 4, 2, 16, 1}, {5776, 64, 32, 36, 4, 4, 4, 1},
+ {1568, 64, 40, 36, 4, 4, 8, 1}, {2920, 64, 64, 24, 4, 4, 4, 1}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_fallback_nt_t = {
+ {3136, 64, 64, 36, 5, 4, 4, 0}, {4096, 48, 32, 36, 5, 4, 4, 0}, {688, 92, 68, 32, 5, 4, 4, 0},
+ {24, 464, 412, 24, 6, 2, 4, 0}, {112, 184, 144, 28, 5, 4, 4, 0}, {5776, 64, 32, 36, 5, 4, 4, 0},
+ {1568, 64, 40, 36, 5, 4, 4, 0}, {2920, 64, 64, 24, 6, 2, 4, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_t_nt = {
+ {3136, 64, 64, 36, 4, 4, 16, 1}, {4096, 48, 32, 36, 4, 4, 4, 1}, {688, 92, 68, 32, 2, 8, 4, 1},
+ {24, 464, 412, 24, 2, 8, 4, 1}, {112, 184, 144, 28, 4, 4, 16, 1}, {5776, 64, 32, 36, 2, 8, 8, 1},
+ {1568, 64, 40, 36, 4, 4, 8, 1}, {2920, 64, 64, 24, 4, 4, 16, 1}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_fallback_t_nt = {
+ {3136, 64, 64, 36, 4, 4, 4, 0}, {4096, 48, 32, 36, 4, 4, 4, 0}, {688, 92, 68, 32, 4, 4, 4, 0},
+ {24, 464, 412, 24, 4, 4, 4, 0}, {112, 184, 144, 28, 4, 4, 4, 0}, {5776, 64, 32, 36, 4, 4, 8, 0},
+ {1568, 64, 40, 36, 4, 4, 4, 0}, {2920, 64, 64, 24, 4, 4, 4, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_t_t = {
+ {3136, 64, 64, 36, 4, 4, 4, 1}, {4096, 48, 32, 36, 4, 4, 4, 1}, {688, 92, 68, 32, 4, 4, 4, 1},
+ {24, 464, 412, 24, 2, 2, 16, 1}, {112, 184, 144, 28, 4, 4, 4, 1}, {5776, 64, 32, 36, 4, 4, 4, 1},
+ {1568, 64, 40, 36, 4, 4, 4, 1}, {2920, 64, 64, 24, 4, 4, 4, 1}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_fallback_t_t = {
+ {3136, 64, 64, 36, 4, 4, 4, 0}, {4096, 48, 32, 36, 4, 4, 4, 0}, {688, 92, 68, 32, 4, 4, 4, 0},
+ {24, 464, 412, 24, 4, 2, 8, 0}, {112, 184, 144, 28, 4, 4, 4, 0}, {5776, 64, 32, 36, 4, 4, 4, 0},
+ {1568, 64, 40, 36, 4, 4, 4, 0}, {2920, 64, 64, 24, 4, 4, 4, 0}};
const bool adj_lhs = info.adj_lhs();
const bool adj_rhs = info.adj_rhs();
@@ -185,17 +133,17 @@ MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_f32(unsigned
const MatMulNativeConfigsMatrix *configs_best_to_use = nullptr;
const MatMulNativeConfigsMatrix *configs_fallback_to_use = nullptr;
- if((adj_lhs == false) && (adj_rhs == false))
+ if ((adj_lhs == false) && (adj_rhs == false))
{
configs_best_to_use = &configs_mnkb_best_nt_nt;
configs_fallback_to_use = &configs_mnkb_fallback_nt_nt;
}
- else if((adj_lhs == false) && (adj_rhs == true))
+ else if ((adj_lhs == false) && (adj_rhs == true))
{
configs_best_to_use = &configs_mnkb_best_nt_t;
configs_fallback_to_use = &configs_mnkb_fallback_nt_t;
}
- else if((adj_lhs == true) && (adj_rhs == false))
+ else if ((adj_lhs == true) && (adj_rhs == false))
{
configs_best_to_use = &configs_mnkb_best_t_nt;
configs_fallback_to_use = &configs_mnkb_fallback_t_nt;
@@ -209,108 +157,51 @@ MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_f32(unsigned
MatMulKernelInfo desc0 = find_info(*configs_best_to_use, adj_lhs, adj_rhs, m, n, k, b);
MatMulKernelInfo desc1 = find_info(*configs_fallback_to_use, adj_lhs, adj_rhs, m, n, k, b);
- return select_info(desc0,
- desc1,
- m, n, k, b, DataType::F32, rhs_lock_padding);
+ return select_info(desc0, desc1, m, n, k, b, DataType::F32, rhs_lock_padding);
}
-MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info)
+MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_f16(
+ unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info)
{
- const MatMulNativeConfigsMatrix configs_mnkb_best_nt_nt =
- {
- { 3136, 64, 64, 36, 4, 4, 16, 1 },
- { 4096, 48, 32, 36, 4, 4, 8, 1 },
- { 688, 92, 68, 32, 4, 4, 16, 1 },
- { 24, 464, 412, 24, 4, 4, 4, 1 },
- { 112, 184, 144, 28, 4, 4, 16, 1 },
- { 5776, 64, 32, 36, 4, 4, 8, 1 },
- { 1568, 64, 40, 36, 4, 4, 8, 1 },
- { 2920, 64, 64, 24, 4, 4, 16, 1 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_fallback_nt_nt =
- {
- { 3136, 64, 64, 36, 6, 4, 8, 0 },
- { 4096, 48, 32, 36, 6, 4, 8, 0 },
- { 688, 92, 68, 32, 6, 4, 8, 0 },
- { 24, 464, 412, 24, 4, 4, 8, 0 },
- { 112, 184, 144, 28, 6, 4, 8, 0 },
- { 5776, 64, 32, 36, 6, 4, 8, 0 },
- { 1568, 64, 40, 36, 6, 4, 8, 0 },
- { 2920, 64, 64, 24, 6, 4, 8, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_nt_t =
- {
- { 3136, 64, 64, 36, 6, 4, 8, 1 },
- { 4096, 48, 32, 36, 6, 4, 8, 1 },
- { 688, 92, 68, 32, 4, 4, 4, 1 },
- { 24, 464, 412, 24, 6, 2, 4, 1 },
- { 112, 184, 144, 28, 4, 2, 16, 1 },
- { 5776, 64, 32, 36, 6, 4, 8, 1 },
- { 1568, 64, 40, 36, 6, 4, 8, 1 },
- { 2920, 64, 64, 24, 6, 4, 8, 1 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_fallback_nt_t =
- {
- { 3136, 64, 64, 36, 6, 2, 16, 0 },
- { 4096, 48, 32, 36, 5, 4, 8, 0 },
- { 688, 92, 68, 32, 6, 2, 16, 0 },
- { 24, 464, 412, 24, 6, 2, 16, 0 },
- { 112, 184, 144, 28, 6, 2, 16, 0 },
- { 5776, 64, 32, 36, 5, 4, 8, 0 },
- { 1568, 64, 40, 36, 5, 4, 8, 0 },
- { 2920, 64, 64, 24, 6, 2, 16, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_t_nt =
- {
- { 3136, 64, 64, 36, 4, 4, 16, 1 },
- { 4096, 48, 32, 36, 4, 4, 4, 1 },
- { 688, 92, 68, 32, 4, 4, 4, 1 },
- { 24, 464, 412, 24, 4, 4, 4, 1 },
- { 112, 184, 144, 28, 4, 4, 4, 1 },
- { 5776, 64, 32, 36, 4, 4, 4, 1 },
- { 1568, 64, 40, 36, 4, 4, 4, 1 },
- { 2920, 64, 64, 24, 4, 4, 4, 1 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_fallback_t_nt =
- {
- { 3136, 64, 64, 36, 4, 4, 4, 0 },
- { 4096, 48, 32, 36, 4, 4, 4, 0 },
- { 688, 92, 68, 32, 4, 4, 4, 0 },
- { 24, 464, 412, 24, 4, 4, 4, 0 },
- { 112, 184, 144, 28, 4, 4, 4, 0 },
- { 5776, 64, 32, 36, 4, 4, 4, 0 },
- { 1568, 64, 40, 36, 4, 4, 4, 0 },
- { 2920, 64, 64, 24, 4, 4, 4, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_t_t =
- {
- { 3136, 64, 64, 36, 4, 4, 16, 1 },
- { 4096, 48, 32, 36, 4, 4, 8, 1 },
- { 688, 92, 68, 32, 4, 4, 4, 1 },
- { 24, 464, 412, 24, 4, 2, 8, 1 },
- { 112, 184, 144, 28, 4, 2, 16, 1 },
- { 5776, 64, 32, 36, 4, 4, 16, 1 },
- { 1568, 64, 40, 36, 4, 4, 8, 1 },
- { 2920, 64, 64, 24, 4, 4, 16, 1 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_fallback_t_t =
- {
- { 3136, 64, 64, 36, 4, 4, 8, 0 },
- { 4096, 48, 32, 36, 4, 4, 8, 0 },
- { 688, 92, 68, 32, 4, 4, 8, 0 },
- { 24, 464, 412, 24, 4, 4, 8, 0 },
- { 112, 184, 144, 28, 4, 4, 8, 0 },
- { 5776, 64, 32, 36, 4, 4, 8, 0 },
- { 1568, 64, 40, 36, 4, 4, 8, 0 },
- { 2920, 64, 64, 24, 4, 4, 8, 0 }
- };
+ const MatMulNativeConfigsMatrix configs_mnkb_best_nt_nt = {
+ {3136, 64, 64, 36, 4, 4, 16, 1}, {4096, 48, 32, 36, 4, 4, 8, 1}, {688, 92, 68, 32, 4, 4, 16, 1},
+ {24, 464, 412, 24, 4, 4, 4, 1}, {112, 184, 144, 28, 4, 4, 16, 1}, {5776, 64, 32, 36, 4, 4, 8, 1},
+ {1568, 64, 40, 36, 4, 4, 8, 1}, {2920, 64, 64, 24, 4, 4, 16, 1}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_fallback_nt_nt = {
+ {3136, 64, 64, 36, 6, 4, 8, 0}, {4096, 48, 32, 36, 6, 4, 8, 0}, {688, 92, 68, 32, 6, 4, 8, 0},
+ {24, 464, 412, 24, 4, 4, 8, 0}, {112, 184, 144, 28, 6, 4, 8, 0}, {5776, 64, 32, 36, 6, 4, 8, 0},
+ {1568, 64, 40, 36, 6, 4, 8, 0}, {2920, 64, 64, 24, 6, 4, 8, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_nt_t = {
+ {3136, 64, 64, 36, 6, 4, 8, 1}, {4096, 48, 32, 36, 6, 4, 8, 1}, {688, 92, 68, 32, 4, 4, 4, 1},
+ {24, 464, 412, 24, 6, 2, 4, 1}, {112, 184, 144, 28, 4, 2, 16, 1}, {5776, 64, 32, 36, 6, 4, 8, 1},
+ {1568, 64, 40, 36, 6, 4, 8, 1}, {2920, 64, 64, 24, 6, 4, 8, 1}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_fallback_nt_t = {
+ {3136, 64, 64, 36, 6, 2, 16, 0}, {4096, 48, 32, 36, 5, 4, 8, 0}, {688, 92, 68, 32, 6, 2, 16, 0},
+ {24, 464, 412, 24, 6, 2, 16, 0}, {112, 184, 144, 28, 6, 2, 16, 0}, {5776, 64, 32, 36, 5, 4, 8, 0},
+ {1568, 64, 40, 36, 5, 4, 8, 0}, {2920, 64, 64, 24, 6, 2, 16, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_t_nt = {
+ {3136, 64, 64, 36, 4, 4, 16, 1}, {4096, 48, 32, 36, 4, 4, 4, 1}, {688, 92, 68, 32, 4, 4, 4, 1},
+ {24, 464, 412, 24, 4, 4, 4, 1}, {112, 184, 144, 28, 4, 4, 4, 1}, {5776, 64, 32, 36, 4, 4, 4, 1},
+ {1568, 64, 40, 36, 4, 4, 4, 1}, {2920, 64, 64, 24, 4, 4, 4, 1}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_fallback_t_nt = {
+ {3136, 64, 64, 36, 4, 4, 4, 0}, {4096, 48, 32, 36, 4, 4, 4, 0}, {688, 92, 68, 32, 4, 4, 4, 0},
+ {24, 464, 412, 24, 4, 4, 4, 0}, {112, 184, 144, 28, 4, 4, 4, 0}, {5776, 64, 32, 36, 4, 4, 4, 0},
+ {1568, 64, 40, 36, 4, 4, 4, 0}, {2920, 64, 64, 24, 4, 4, 4, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_t_t = {
+ {3136, 64, 64, 36, 4, 4, 16, 1}, {4096, 48, 32, 36, 4, 4, 8, 1}, {688, 92, 68, 32, 4, 4, 4, 1},
+ {24, 464, 412, 24, 4, 2, 8, 1}, {112, 184, 144, 28, 4, 2, 16, 1}, {5776, 64, 32, 36, 4, 4, 16, 1},
+ {1568, 64, 40, 36, 4, 4, 8, 1}, {2920, 64, 64, 24, 4, 4, 16, 1}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_fallback_t_t = {
+ {3136, 64, 64, 36, 4, 4, 8, 0}, {4096, 48, 32, 36, 4, 4, 8, 0}, {688, 92, 68, 32, 4, 4, 8, 0},
+ {24, 464, 412, 24, 4, 4, 8, 0}, {112, 184, 144, 28, 4, 4, 8, 0}, {5776, 64, 32, 36, 4, 4, 8, 0},
+ {1568, 64, 40, 36, 4, 4, 8, 0}, {2920, 64, 64, 24, 4, 4, 8, 0}};
const bool adj_lhs = info.adj_lhs();
const bool adj_rhs = info.adj_rhs();
@@ -318,17 +209,17 @@ MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_f16(unsigned
const MatMulNativeConfigsMatrix *configs_best_to_use = nullptr;
const MatMulNativeConfigsMatrix *configs_fallback_to_use = nullptr;
- if((adj_lhs == false) && (adj_rhs == false))
+ if ((adj_lhs == false) && (adj_rhs == false))
{
configs_best_to_use = &configs_mnkb_best_nt_nt;
configs_fallback_to_use = &configs_mnkb_fallback_nt_nt;
}
- else if((adj_lhs == false) && (adj_rhs == true))
+ else if ((adj_lhs == false) && (adj_rhs == true))
{
configs_best_to_use = &configs_mnkb_best_nt_t;
configs_fallback_to_use = &configs_mnkb_fallback_nt_t;
}
- else if((adj_lhs == true) && (adj_rhs == false))
+ else if ((adj_lhs == true) && (adj_rhs == false))
{
configs_best_to_use = &configs_mnkb_best_t_nt;
configs_fallback_to_use = &configs_mnkb_fallback_t_nt;
@@ -342,75 +233,46 @@ MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_f16(unsigned
MatMulKernelInfo desc0 = find_info(*configs_best_to_use, adj_lhs, adj_rhs, m, n, k, b);
MatMulKernelInfo desc1 = find_info(*configs_fallback_to_use, adj_lhs, adj_rhs, m, n, k, b);
- return select_info(desc0,
- desc1,
- m, n, k, b, DataType::F16, rhs_lock_padding);
+ return select_info(desc0, desc1, m, n, k, b, DataType::F16, rhs_lock_padding);
}
-MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info)
+MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_u8(
+ unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info)
{
ARM_COMPUTE_UNUSED(rhs_lock_padding);
- const MatMulNativeConfigsMatrix configs_mnkb_best_nt_nt =
- {
- { 3136, 64, 64, 36, 6, 4, 4, 0 },
- { 4096, 48, 32, 36, 6, 4, 4, 0 },
- { 688, 92, 68, 32, 2, 8, 4, 0 },
- { 24, 464, 412, 24, 4, 4, 4, 0 },
- { 112, 184, 144, 28, 6, 4, 4, 0 },
- { 5776, 64, 32, 36, 6, 4, 4, 0 },
- { 1568, 64, 40, 36, 6, 4, 4, 0 },
- { 2920, 64, 64, 24, 5, 4, 4, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_nt_t =
- {
- { 3136, 64, 64, 36, 4, 4, 16, 0 },
- { 4096, 48, 32, 36, 4, 4, 16, 0 },
- { 688, 92, 68, 32, 4, 4, 16, 0 },
- { 24, 464, 412, 24, 6, 2, 16, 0 },
- { 112, 184, 144, 28, 4, 4, 16, 0 },
- { 5776, 64, 32, 36, 4, 4, 16, 0 },
- { 1568, 64, 40, 36, 6, 4, 4, 0 },
- { 2920, 64, 64, 24, 4, 4, 16, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_t_nt =
- {
- { 3136, 64, 64, 36, 4, 4, 8, 0 },
- { 4096, 48, 32, 36, 4, 4, 8, 0 },
- { 688, 92, 68, 32, 4, 4, 4, 0 },
- { 24, 464, 412, 24, 4, 4, 4, 0 },
- { 112, 184, 144, 28, 4, 4, 8, 0 },
- { 5776, 64, 32, 36, 4, 4, 8, 0 },
- { 1568, 64, 40, 36, 4, 4, 8, 0 },
- { 2920, 64, 64, 24, 4, 4, 8, 0 }
- };
-
- const MatMulNativeConfigsMatrix configs_mnkb_best_t_t =
- {
- { 3136, 64, 64, 36, 4, 2, 16, 0 },
- { 4096, 48, 32, 36, 4, 4, 4, 0 },
- { 688, 92, 68, 32, 4, 4, 8, 0 },
- { 24, 464, 412, 24, 4, 2, 16, 0 },
- { 112, 184, 144, 28, 4, 2, 16, 0 },
- { 5776, 64, 32, 36, 4, 4, 4, 0 },
- { 1568, 64, 40, 36, 4, 4, 8, 0 },
- { 2920, 64, 64, 24, 4, 2, 16, 0 }
- };
+ const MatMulNativeConfigsMatrix configs_mnkb_best_nt_nt = {
+ {3136, 64, 64, 36, 6, 4, 4, 0}, {4096, 48, 32, 36, 6, 4, 4, 0}, {688, 92, 68, 32, 2, 8, 4, 0},
+ {24, 464, 412, 24, 4, 4, 4, 0}, {112, 184, 144, 28, 6, 4, 4, 0}, {5776, 64, 32, 36, 6, 4, 4, 0},
+ {1568, 64, 40, 36, 6, 4, 4, 0}, {2920, 64, 64, 24, 5, 4, 4, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_nt_t = {
+ {3136, 64, 64, 36, 4, 4, 16, 0}, {4096, 48, 32, 36, 4, 4, 16, 0}, {688, 92, 68, 32, 4, 4, 16, 0},
+ {24, 464, 412, 24, 6, 2, 16, 0}, {112, 184, 144, 28, 4, 4, 16, 0}, {5776, 64, 32, 36, 4, 4, 16, 0},
+ {1568, 64, 40, 36, 6, 4, 4, 0}, {2920, 64, 64, 24, 4, 4, 16, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_t_nt = {
+ {3136, 64, 64, 36, 4, 4, 8, 0}, {4096, 48, 32, 36, 4, 4, 8, 0}, {688, 92, 68, 32, 4, 4, 4, 0},
+ {24, 464, 412, 24, 4, 4, 4, 0}, {112, 184, 144, 28, 4, 4, 8, 0}, {5776, 64, 32, 36, 4, 4, 8, 0},
+ {1568, 64, 40, 36, 4, 4, 8, 0}, {2920, 64, 64, 24, 4, 4, 8, 0}};
+
+ const MatMulNativeConfigsMatrix configs_mnkb_best_t_t = {
+ {3136, 64, 64, 36, 4, 2, 16, 0}, {4096, 48, 32, 36, 4, 4, 4, 0}, {688, 92, 68, 32, 4, 4, 8, 0},
+ {24, 464, 412, 24, 4, 2, 16, 0}, {112, 184, 144, 28, 4, 2, 16, 0}, {5776, 64, 32, 36, 4, 4, 4, 0},
+ {1568, 64, 40, 36, 4, 4, 8, 0}, {2920, 64, 64, 24, 4, 2, 16, 0}};
const bool adj_lhs = info.adj_lhs();
const bool adj_rhs = info.adj_rhs();
- if((adj_lhs == false) && (adj_rhs == false))
+ if ((adj_lhs == false) && (adj_rhs == false))
{
return find_info(configs_mnkb_best_nt_nt, adj_lhs, adj_rhs, m, n, k, b);
}
- else if((adj_lhs == false) && (adj_rhs == true))
+ else if ((adj_lhs == false) && (adj_rhs == true))
{
return find_info(configs_mnkb_best_nt_t, adj_lhs, adj_rhs, m, n, k, b);
}
- else if((adj_lhs == true) && (adj_rhs == false))
+ else if ((adj_lhs == true) && (adj_rhs == false))
{
return find_info(configs_mnkb_best_t_nt, adj_lhs, adj_rhs, m, n, k, b);
}
@@ -419,5 +281,5 @@ MatMulKernelInfo ClMatMulNativeDefaultConfigValhall::configure_G710_u8(unsigned
return find_info(configs_mnkb_best_t_t, adj_lhs, adj_rhs, m, n, k, b);
}
}
-} // namespace opencl
+} // namespace cl_matmul
} // namespace arm_compute
diff --git a/src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.h b/src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.h
index fe167d18dd..6b39db6a3f 100644
--- a/src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.h
+++ b/src/runtime/heuristics/matmul_native/ClMatMulNativeDefaultConfigValhall.h
@@ -44,10 +44,13 @@ public:
MatMulKernelInfo configure(const ITensorInfo *lhs, const ITensorInfo *rhs, const MatMulInfo &info) override;
private:
- MatMulKernelInfo configure_G710_f32(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info);
- MatMulKernelInfo configure_G710_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info);
- MatMulKernelInfo configure_G710_u8(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info);
+ MatMulKernelInfo configure_G710_f32(
+ unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info);
+ MatMulKernelInfo configure_G710_f16(
+ unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info);
+ MatMulKernelInfo configure_G710_u8(
+ unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool rhs_lock_padding, const MatMulInfo &info);
};
-} // namespace opencl
+} // namespace cl_matmul
} // namespace arm_compute
#endif /* SRC_RUNTIME_HEURISTICS_MATMUL_NATIVE_CLMATMULNATIVEDEFAULTCONFIGVALHALL */
diff --git a/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp b/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp
index 1e06e84d4d..89cad30214 100644
--- a/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp
+++ b/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/TensorShape.h"
+
#include "src/gpu/cl/kernels/ClMatMulNativeKernel.h"
#include <limits>
@@ -37,22 +38,32 @@ namespace cl_matmul
{
MatMulKernelInfo select_info(const MatMulKernelInfo &info0,
const MatMulKernelInfo &info1,
- unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type, bool rhs_lock_padding)
+ unsigned int m,
+ unsigned int n,
+ unsigned int k,
+ unsigned int b,
+ DataType data_type,
+ bool rhs_lock_padding)
{
- ARM_COMPUTE_ERROR_ON_MSG(info1.export_rhs_to_cl_image == true, "The fallback MatMul configuration cannot have export_to_cl_image = true");
- ARM_COMPUTE_ERROR_ON_MSG(info0.adj_lhs != info1.adj_lhs, "The MatMul configurations must have the same adj_lhs value");
- ARM_COMPUTE_ERROR_ON_MSG(info0.adj_rhs != info1.adj_rhs, "The MatMul configurations must have the same adj_rhs value");
+ ARM_COMPUTE_ERROR_ON_MSG(info1.export_rhs_to_cl_image == true,
+ "The fallback MatMul configuration cannot have export_to_cl_image = true");
+ ARM_COMPUTE_ERROR_ON_MSG(info0.adj_lhs != info1.adj_lhs,
+ "The MatMul configurations must have the same adj_lhs value");
+ ARM_COMPUTE_ERROR_ON_MSG(info0.adj_rhs != info1.adj_rhs,
+ "The MatMul configurations must have the same adj_rhs value");
const bool adj_lhs = info0.adj_lhs;
const bool adj_rhs = info0.adj_rhs;
- TensorInfo lhs_info = !adj_lhs ? TensorInfo(TensorShape(k, m, b), 1, data_type) : TensorInfo(TensorShape(m, k, b), 1, data_type);
- TensorInfo rhs_info = !adj_rhs ? TensorInfo(TensorShape(n, k, b), 1, data_type) : TensorInfo(TensorShape(k, n, b), 1, data_type);
+ TensorInfo lhs_info =
+ !adj_lhs ? TensorInfo(TensorShape(k, m, b), 1, data_type) : TensorInfo(TensorShape(m, k, b), 1, data_type);
+ TensorInfo rhs_info =
+ !adj_rhs ? TensorInfo(TensorShape(n, k, b), 1, data_type) : TensorInfo(TensorShape(k, n, b), 1, data_type);
TensorInfo dst_info;
- if(rhs_lock_padding == false)
+ if (rhs_lock_padding == false)
{
- if(bool(opencl::kernels::ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &dst_info, info0)))
+ if (bool(opencl::kernels::ClMatMulNativeKernel::validate(&lhs_info, &rhs_info, nullptr, &dst_info, info0)))
{
return info0;
}
@@ -67,7 +78,13 @@ MatMulKernelInfo select_info(const MatMulKernelInfo &info0,
}
}
-MatMulKernelInfo find_info(const MatMulNativeConfigsMatrix &configs, bool adj_lhs, bool adj_rhs, unsigned int m, unsigned int n, unsigned int k, unsigned int b)
+MatMulKernelInfo find_info(const MatMulNativeConfigsMatrix &configs,
+ bool adj_lhs,
+ bool adj_rhs,
+ unsigned int m,
+ unsigned int n,
+ unsigned int k,
+ unsigned int b)
{
size_t min_acc = std::numeric_limits<size_t>::max();
size_t min_idx = 0;
@@ -76,12 +93,13 @@ MatMulKernelInfo find_info(const MatMulNativeConfigsMatrix &configs, bool adj_lh
const size_t num_rows = configs.size();
const size_t num_cols = configs[0].size();
- ARM_COMPUTE_ERROR_ON_MSG(num_cols != 8U, "The entry should have 8 integer values representing: M, N, K, B, M0, N0. K0, IMG_RHS");
+ ARM_COMPUTE_ERROR_ON_MSG(num_cols != 8U,
+ "The entry should have 8 integer values representing: M, N, K, B, M0, N0. K0, IMG_RHS");
ARM_COMPUTE_UNUSED(num_cols);
// Find nearest GeMM workload
// Note: the workload does not depend on the K dimension
- for(size_t y = 0; y < num_rows; ++y)
+ for (size_t y = 0; y < num_rows; ++y)
{
size_t mc0 = static_cast<size_t>(configs[y][0]);
size_t nc0 = static_cast<size_t>(configs[y][1]);
@@ -94,7 +112,7 @@ MatMulKernelInfo find_info(const MatMulNativeConfigsMatrix &configs, bool adj_lh
acc += (k - kc0) * (k - kc0);
acc += (b - bc0) * (b - bc0);
acc = std::sqrt(acc);
- if(acc < min_acc)
+ if (acc < min_acc)
{
min_acc = acc;
min_idx = y;
diff --git a/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.h b/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.h
index 3881617558..a114fffa68 100644
--- a/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.h
+++ b/src/runtime/heuristics/matmul_native/ClMatMulNativeHelpers.h
@@ -52,7 +52,12 @@ using MatMulNativeConfigsMatrix = std::vector<std::vector<int32_t>>;
*/
MatMulKernelInfo select_info(const MatMulKernelInfo &info0,
const MatMulKernelInfo &info1,
- unsigned int m, unsigned int n, unsigned int k, unsigned int b, DataType data_type, bool rhs_lock_padding);
+ unsigned int m,
+ unsigned int n,
+ unsigned int k,
+ unsigned int b,
+ DataType data_type,
+ bool rhs_lock_padding);
/** Find the preferred configurations for the MatMul Native kernel using the MatMulNativeConfigsMatrix provided by the user
*
@@ -66,7 +71,13 @@ MatMulKernelInfo select_info(const MatMulKernelInfo &info0,
*
* @return @ref MatMulKernelInfo
*/
-MatMulKernelInfo find_info(const MatMulNativeConfigsMatrix &configs, bool adj_lhs, bool adj_rhs, unsigned int m, unsigned int n, unsigned int k, unsigned int b);
+MatMulKernelInfo find_info(const MatMulNativeConfigsMatrix &configs,
+ bool adj_lhs,
+ bool adj_rhs,
+ unsigned int m,
+ unsigned int n,
+ unsigned int k,
+ unsigned int b);
} // namespace cl_matmul
} // namespace arm_compute
#endif /* SRC_RUNTIME_HEURISTICS_MATMUL_NATIVE_CLMATMULNATIVEHELPERS */
diff --git a/src/runtime/heuristics/matmul_native/ClMatMulNativeKernelConfig.h b/src/runtime/heuristics/matmul_native/ClMatMulNativeKernelConfig.h
index a2dbfc7dd5..b10018a6d2 100644
--- a/src/runtime/heuristics/matmul_native/ClMatMulNativeKernelConfig.h
+++ b/src/runtime/heuristics/matmul_native/ClMatMulNativeKernelConfig.h
@@ -45,7 +45,7 @@ public:
*/
static std::unique_ptr<IClMatMulNativeKernelConfig> create(GPUTarget gpu)
{
- switch(get_arch_from_target(gpu))
+ switch (get_arch_from_target(gpu))
{
case GPUTarget::MIDGARD:
case GPUTarget::BIFROST:
@@ -56,6 +56,6 @@ public:
}
}
};
-} // namespace opencl
+} // namespace cl_matmul
} // namespace arm_compute
#endif /* SRC_RUNTIME_HEURISTICS_MATMUL_NATIVE_CLMATMULNATIVEKERNELCONFIG */
diff --git a/src/runtime/heuristics/matmul_native/IClMatMulNativeKernelConfig.h b/src/runtime/heuristics/matmul_native/IClMatMulNativeKernelConfig.h
index 4f548bd01d..b9b091100c 100644
--- a/src/runtime/heuristics/matmul_native/IClMatMulNativeKernelConfig.h
+++ b/src/runtime/heuristics/matmul_native/IClMatMulNativeKernelConfig.h
@@ -28,6 +28,7 @@
#include "arm_compute/core/KernelDescriptors.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/function_info/MatMulInfo.h"
+
#include "src/core/common/Macros.h"
namespace arm_compute
@@ -53,8 +54,7 @@ public:
* @param[in] func_int8 Function to call for matmul native Int8 (QASYMM8, QASYMM8_SIGNED, QSYMM8_PER_CHANNEL)
*
*/
- ClMatMulNativeConfigArray(T func_f32, T func_f16, T func_int8)
- : _configs{ func_f32, func_f16, func_int8 }
+ ClMatMulNativeConfigArray(T func_f32, T func_f16, T func_int8) : _configs{func_f32, func_f16, func_int8}
{
}
@@ -66,7 +66,7 @@ public:
*/
T get_function(DataType data_type)
{
- switch(data_type)
+ switch (data_type)
{
case DataType::F32:
return _configs.at(DT_F32);
@@ -93,8 +93,7 @@ public:
*
* @param[in] arch GPU target
*/
- IClMatMulNativeKernelConfig(GPUTarget arch)
- : _target(arch)
+ IClMatMulNativeKernelConfig(GPUTarget arch) : _target(arch)
{
}
ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(IClMatMulNativeKernelConfig);