aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h')
-rw-r--r--arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h140
1 files changed, 131 insertions, 9 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
index e46c33018b..ba3223f66d 100644
--- a/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
+++ b/arm_compute/core/NEON/kernels/assembly/NEGEMMInterleavedPrepareBWrapperKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018 ARM Limited.
+ * Copyright (c) 2018-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,14 +24,16 @@
#ifndef __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__
#define __ARM_COMPUTE_NEGEMMINTERLEAVEDPREPAREBWRAPPERKERNEL_H__
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/NEON/INEKernel.h"
#include "arm_compute/core/NEON/kernels/assembly/Helpers.h"
#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
namespace arm_compute
{
-class ITensor;
-
/** Unit of work for @ref NEGEMMInterleavedPrepareBWrapperKernel to process */
struct PrepareBWorkload
{
@@ -56,6 +58,84 @@ struct PrepareBWorkload
unsigned int _kmax; /**< Last value to process along the K dimension. */
};
+namespace detail
+{
+// Call the lambda function for each workload generated by the passed window.
+template <typename strategy, bool use_buffer_manager, typename Lambda>
+void for_each_element_in_window(const Window &window, const ITensor *b, ITensor *transformed_b, unsigned int N, unsigned int K, Lambda &&lambda)
+{
+ unsigned int wl_index = 0;
+ unsigned int num_buffers = 0, reshaped_block_size = 0;
+
+ if(use_buffer_manager)
+ {
+ num_buffers = transformed_b->info()->tensor_shape()[1];
+ reshaped_block_size = transformed_b->info()->strides_in_bytes().y();
+ }
+
+ unsigned int offset_transformed_b = transformed_b->info()->offset_first_element_in_bytes();
+ execute_window_loop(window, [&](const Coordinates & coordinates)
+ {
+ const unsigned int x0 = coordinates.x();
+ const unsigned int k0 = coordinates.y();
+ const unsigned int multi = coordinates.z();
+
+ const unsigned int offset_b = b->info()->offset_element_in_bytes(Coordinates(0, 0, multi));
+ const unsigned int xmax = std::min(x0 + window.x().step(), N);
+ const unsigned int kmax = std::min(k0 + window.y().step(), K);
+
+ /* Figure out the size of each block. */
+ unsigned int x_size = (xmax - x0);
+ unsigned int k_size = (kmax - k0);
+
+ /* Round sizes up as needed. */
+ x_size = ceil_to_multiple(x_size, strategy::out_width());
+ k_size = ceil_to_multiple(k_size, strategy::k_unroll());
+
+ lambda(PrepareBWorkload(offset_b, offset_transformed_b, x0, xmax, k0, kmax));
+
+ //Each workload represents one block:
+ if(use_buffer_manager)
+ {
+ // Rotate through the BufferManager's buffers:
+ wl_index++;
+ offset_transformed_b = (wl_index % num_buffers) * reshaped_block_size;
+ }
+ else
+ {
+ offset_transformed_b += (x_size * k_size * sizeof(typename strategy::operand_type));
+ }
+ });
+}
+
+// Calculate the size of transformed_b:
+template <typename strategy>
+unsigned int get_B_pretransposed_array_size(unsigned int N, unsigned int K, const BlockSizes &bs, unsigned int multis)
+{
+ // How many full blocks do N / K contain ?
+ size_t num_full_k = K / bs.k_block;
+ size_t num_full_x = N / bs.x_block;
+
+ ARM_COMPUTE_ERROR_ON(bs.x_block % strategy::out_width() != 0);
+ ARM_COMPUTE_ERROR_ON(bs.k_block % strategy::k_unroll() != 0);
+
+ size_t normal_x_size = bs.x_block;
+ size_t normal_k_size = bs.k_block;
+
+ // Round up the leftovers to be a multiple of the strategy processing size:
+ size_t left_over_x_size = ceil_to_multiple(N % bs.x_block, strategy::out_width());
+ size_t left_over_k_size = ceil_to_multiple(K % bs.k_block, strategy::k_unroll());
+
+ // Calculate the total size of the buffer:
+ size_t total = num_full_k * normal_k_size * (num_full_x * normal_x_size + left_over_x_size);
+ total += left_over_k_size * (left_over_x_size + num_full_x * normal_x_size);
+
+ total *= multis;
+
+ return total;
+}
+} // namespace detail
+
/** Common interface for the templated wrappers around the B reshape NEON assembly implementations */
class NEGEMMInterleavedPrepareBWrapperKernel : public INEKernel
{
@@ -93,7 +173,7 @@ public:
/** Equivalent to arm_gemm::GemmInterleaved's strategy::transforms::PrepareB() but using Compute Library types.
*/
-template <typename To, bool use_dot = false>
+template <typename strategy>
class NEGEMMInterleavedPrepareBWrapperKernelTemplate : public NEGEMMInterleavedPrepareBWrapperKernel
{
public:
@@ -105,13 +185,55 @@ public:
* @param[in] ci CPU information
* @param[in] params M, N, K sizes.
*/
- void configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params);
+ void configure(const ITensor *b, ITensor *transformed_b, bool transpose_b, const CPUInfo &ci, const INEGEMMWrapperKernel::Params &params)
+ {
+ const unsigned int multis = b->info()->tensor_shape().z();
+ _Nsize = b->info()->tensor_shape().x();
+ _Ksize = b->info()->tensor_shape().y();
+ _b = b;
+ _transformed_b = transformed_b;
+ _transpose_b = transpose_b;
+
+ _block_sizes = calculate_block_sizes<strategy>(ci, params.M, params.N, params.K);
+
+ auto_init_if_empty(*transformed_b->info(), b->info()->clone()->set_tensor_shape(TensorShape{ detail::get_B_pretransposed_array_size<strategy>(_Nsize, _Ksize, _block_sizes, multis) }));
+
+ Window window;
+ window.set(Window::DimX, Window::Dimension(0, ceil_to_multiple(_Nsize, _block_sizes.x_block), _block_sizes.x_block));
+ window.set(Window::DimY, Window::Dimension(0, ceil_to_multiple(_Ksize, _block_sizes.k_block), _block_sizes.k_block));
+ window.set(Window::DimZ, Window::Dimension(0, multis));
+
+ INEKernel::configure(window);
+ }
// Inherited methods overridden:
- void transform(const PrepareBWorkload &wl, const ThreadInfo &info) override;
- void create_workloads(std::vector<PrepareBWorkload> &workloads) override;
- void run(const Window &window, const ThreadInfo &info) override;
- BlockSizes block_sizes() const override;
+ void transform(const PrepareBWorkload &wl, const ThreadInfo &info) override
+ {
+ strategy strat(info.cpu_info);
+ strat.transforms.PrepareB(reinterpret_cast<typename strategy::operand_type *>(_transformed_b->buffer() + wl._offset_transformed_b),
+ reinterpret_cast<typename strategy::operand_type *>(_b->buffer() + wl._offset_b),
+ _b->info()->strides_in_bytes().y() / sizeof(typename strategy::operand_type),
+ wl._x0, wl._xmax, wl._k0, wl._kmax, _transpose_b);
+ }
+ void create_workloads(std::vector<PrepareBWorkload> &workloads) override
+ {
+ detail::for_each_element_in_window<strategy, true>(window(), _b, _transformed_b, _Nsize, _Ksize, [&workloads](PrepareBWorkload && wl)
+ {
+ workloads.push_back(std::move(wl));
+ });
+ }
+ void run(const Window &window, const ThreadInfo &info) override
+ {
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(window, INEKernel::window());
+ detail::for_each_element_in_window<strategy, false>(window, _b, _transformed_b, _Nsize, _Ksize, [&](PrepareBWorkload && wl)
+ {
+ this->transform(wl, info);
+ });
+ }
+ BlockSizes block_sizes() const override
+ {
+ return _block_sizes;
+ }
private:
const ITensor *_b