aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp118
1 files changed, 91 insertions, 27 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp
index ebe33ab271..bdccd05326 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp
@@ -35,6 +35,7 @@
#include <algorithm>
#include <cassert>
+#include <cmath>
// Some macros used to decide how much working space to allocate.
// Round allocations up to the next cache line.
@@ -301,6 +302,36 @@ class GemmInterleavedPretransposed2d : public GemmCommon<To, Tr> {
}
}
+ static unsigned int get_k_block_size(const GemmArgs &args) {
+ // Work out blocking parameters, or override from provided GemmConfig
+ if (args._cfg && args._cfg->inner_block_size) {
+ return args._cfg->inner_block_size;
+ }
+
+ const unsigned int L1_size = args._ci->get_L1_cache_size();
+ unsigned int k_block;
+
+ // k_block: Find out how much of the larger array can be loaded into half the cache.
+ // This should account for associative caches.
+ k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
+
+ // Needs to be (at least a single) multiple of the K unroll level.
+ k_block /= strategy::k_unroll();
+ k_block = std::max(k_block, 1U) * strategy::k_unroll();
+
+ // Now tune to presented problem size; this is how many blocks we need.
+ unsigned int numk_blocks = iceildiv(args._Ksize, k_block);
+
+ // So divide the space equally into that many blocks.
+ k_block = iceildiv(args._Ksize, numk_blocks);
+
+ // And round UP to the K unroll level required.
+ k_block = iceildiv(k_block, strategy::k_unroll());
+ k_block *= strategy::k_unroll();
+
+ return k_block;
+ }
+
public:
GemmInterleavedPretransposed2d(GemmInterleavedPretransposed2d &) = delete;
GemmInterleavedPretransposed2d & operator= (GemmInterleavedPretransposed2d &) = delete;
@@ -315,8 +346,8 @@ public:
, _nmulti(args._nmulti)
, _act(args._act)
, _maxthreads(args._maxthreads)
- , _nthreads(args._maxthreads)
-
+ , _nthreads(args._maxthreads)
+ , _k_block(get_k_block_size(args))
// Work out the rounded size of M - needed for some buffers.
, _Mround_div ( iceildiv(_Msize, strategy::out_height()) )
, _Mround ( _Mround_div * strategy::out_height() )
@@ -326,32 +357,8 @@ public:
{
assert(_maxthreads > 0);
- const unsigned int L1_size = _ci->get_L1_cache_size();
const unsigned int L2_size = _ci->get_L2_cache_size();
- // Work out blocking parameters, or override from provided GemmConfig
- if (args._cfg && args._cfg->inner_block_size) {
- _k_block = args._cfg->inner_block_size;
- } else {
- // k_block: Find out how much of the larger array can be loaded into half the cache.
- // This should account for associative caches.
- _k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
-
- // Needs to be (at least a single) multiple of the K unroll level.
- _k_block /= strategy::k_unroll();
- _k_block = std::max(_k_block, 1U) * strategy::k_unroll();
-
- // Now tune to presented problem size; this is how many blocks we need.
- unsigned int num_k_blocks = iceildiv(_Ksize, _k_block);
-
- // So divide the space equally into that many blocks.
- _k_block = iceildiv(_Ksize, num_k_blocks);
-
- // And round UP to the K unroll level required.
- _k_block = iceildiv(_k_block, strategy::k_unroll());
- _k_block *= strategy::k_unroll();
- }
-
if (args._cfg && args._cfg->outer_block_size) {
_x_block = args._cfg->outer_block_size;
} else {
@@ -381,6 +388,10 @@ public:
return { m, n };
}
+ bool supports_dynamic_scheduling() const override {
+ return true;
+ }
+
// set_nthreads: pass on to buffer manager to avoid it waiting for non-existant threads.
void set_nthreads(int nthreads) override {
_nthreads = std::min(nthreads, _maxthreads);
@@ -495,7 +506,60 @@ public:
_B_transposed = reinterpret_cast<Toi *>(in_buffer);
}
- ~GemmInterleavedPretransposed2d() override { }
+ // Estimate cycles for given problem given provided parameters
+ static uint64_t estimate_cycles(const GemmArgs &args, const PerformanceParameters &params) {
+ unsigned int k_blocks = iceildiv(args._Ksize, get_k_block_size(args));
+ unsigned int m_blocks = iceildiv(args._Msize, strategy::out_height()) * args._nbatches;
+ unsigned int n_blocks = iceildiv(args._Nsize, strategy::out_width());
+
+ uint64_t total_macs = static_cast<uint64_t>(args._nbatches) * args._nmulti * roundup(args._Msize, strategy::out_height()) * roundup(args._Nsize, strategy::out_width()) * roundup(args._Ksize, strategy::k_unroll());
+ uint64_t prepare_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * roundup(args._Msize, strategy::out_height()) * roundup(args._Ksize, strategy::k_unroll()) * sizeof(Toi);
+ uint64_t merge_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * k_blocks * roundup(args._Msize, strategy::out_height()) * roundup(args._Nsize, strategy::out_width()) * sizeof(Tr);
+
+ // Wide problems incur extra preparation cost, as it is done per thread.
+ // Duplicate the logic the scheduler will later use to figure out how much that will affect us
+ float ratio = m_blocks / static_cast<float>(n_blocks);
+
+ unsigned int ideal_height = static_cast<unsigned int>(std::sqrt(args._maxthreads * ratio) + 0.5);
+ unsigned int height = 1;
+
+ if (ideal_height == 0) {
+ height = 1;
+ } else {
+ for (unsigned int adj=0; adj<ideal_height; adj++) {
+ const unsigned int round_down = ideal_height - adj;
+ if (args._maxthreads % round_down == 0) {
+ height = round_down;
+ break;
+ }
+
+ const unsigned int round_up = ideal_height + adj;
+ if (args._maxthreads % round_up == 0) {
+ height = round_up;
+ break;
+ }
+ }
+ }
+
+ // We've computed the height here - we need to multiply the amount of preparation effort by the width (which is total threads / height)
+ prepare_bytes *= (args._maxthreads / height);
+
+ float mac_cycles = static_cast<float>(total_macs) / params.kernel_macs_cycle;
+ float prepare_cycles = static_cast<float>(prepare_bytes) / params.prepare_bytes_cycle;
+ float merge_cycles = static_cast<float>(merge_bytes) / params.merge_bytes_cycle;
+
+ float total_cycles = mac_cycles + prepare_cycles + merge_cycles;
+
+ // We can't thread over multis, which might be a problem in some
+ // threaded cases. Penalize that here.
+ float parallelism_available = static_cast<float>(iceildiv(args._Msize, strategy::out_height()) * args._nbatches * iceildiv(args._Nsize, strategy::out_width())) * 0.9;
+
+ if (parallelism_available < args._maxthreads) {
+ total_cycles *= (static_cast<float>(args._maxthreads) / parallelism_available);
+ }
+
+ return static_cast<uint64_t>(total_cycles);
+ }
};
} // namespace arm_gemm