aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2017-09-29 16:43:25 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitbf2fb95c99ebd215b3c0d93cb970461185ef9716 (patch)
treeef9ea161a5b4bf04d057681eb435605f3d1fa5ab /src
parentdd715f2a88827241a3fb9e4a2d8be82455f649f7 (diff)
downloadComputeLibrary-bf2fb95c99ebd215b3c0d93cb970461185ef9716.tar.gz
COMPMID-481: Add gemmlowp_aarch64_v8p4 kernel.
Change-Id: I15496b16ffd636f5bff76572e750df7e15c80830 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90532 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.cpp131
-rw-r--r--src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp519
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowp.cpp102
3 files changed, 730 insertions, 22 deletions
diff --git a/src/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.cpp b/src/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.cpp
new file mode 100644
index 0000000000..a9c624abd0
--- /dev/null
+++ b/src/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+#include <tuple>
+
+using namespace arm_compute;
+
+namespace
+{
+inline void gemm_interleave_8bit_elements(const ITensor *input, ITensor *output, const Window &window, unsigned int block_width, unsigned int block_height, bool transpose)
+{
+ const size_t in_stride = input->info()->strides_in_bytes()[1];
+ const float scale_y_factor = 1.f / float(block_height);
+
+ // Set window for output tensor
+ Window win_out(window);
+ win_out.scale(Window::DimY, scale_y_factor);
+ Iterator in(input, window);
+
+ win_out.set_dimension_step(Window::DimX, block_width * block_height);
+ Iterator out(output, win_out);
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ int j = 0;
+ for(unsigned int z = 0; z < block_height; ++z)
+ {
+ for(unsigned int b = 0; b < block_width; ++b)
+ {
+ if(!transpose)
+ {
+ const bool inbounds = (id.x() + b) < input->info()->dimension(0) && (id.y() + z) < input->info()->dimension(1);
+ *(out.ptr() + j++) = (inbounds) ? *(in.ptr() + z * in_stride + b) : 0;
+ }
+ else
+ {
+ const bool inbounds = (id.x() + b) < input->info()->dimension(1) && (id.y() + z) < input->info()->dimension(0);
+ const uint8_t value = (inbounds) ? *(input->buffer() + (id.x() + b) * in_stride + (id.y() + z)) : 0;
+ *(out.ptr() + j++) = value;
+ }
+ }
+ }
+ },
+ in, out);
+}
+
+} // namespace
+
+NEGEMMInterleaveBlockedKernel::NEGEMMInterleaveBlockedKernel()
+ : _block_height(0), _block_width(0), _transpose(false)
+{
+}
+
+void NEGEMMInterleaveBlockedKernel::configure(const ITensor *input, ITensor *output, unsigned int block_height, unsigned int block_width, bool transpose)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+ ARM_COMPUTE_ERROR_ON_MSG(block_height < 1, "Block height must be greater than 0");
+ ARM_COMPUTE_ERROR_ON_MSG(block_width < 1, "Block window must be greater than 0");
+
+ TensorShape output_shape = input->info()->tensor_shape();
+ const float interleave_by_f32 = block_height;
+ output_shape.set(0, input->info()->dimension(0) * interleave_by_f32);
+ output_shape.set(1, std::ceil(static_cast<float>(input->info()->dimension(1)) / interleave_by_f32));
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+
+ _input = input;
+ _output = output;
+ _block_height = block_height;
+ _block_width = block_width;
+ _transpose = transpose;
+
+ const unsigned int num_elems_processed_per_iteration_x = block_width;
+ const unsigned int num_elems_processed_per_iteration_y = block_height;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
+ const float scaley_factor = 1.f / interleave_by_f32;
+
+ AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x * num_elems_processed_per_iteration_y, 1, num_elems_processed_per_iteration_y, scaley_factor);
+ AccessWindowRectangle input_access(input->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
+ update_window_and_padding(win, output_access, input_access);
+
+ output_access.set_valid_region(win, input->info()->valid_region());
+
+ INEKernel::configure(win);
+}
+
+void NEGEMMInterleaveBlockedKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ gemm_interleave_8bit_elements(_input, _output, window, _block_width, _block_height, _transpose);
+}
diff --git a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
new file mode 100644
index 0000000000..939f1b7c40
--- /dev/null
+++ b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.cpp
@@ -0,0 +1,519 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "support/ToolchainSupport.h"
+
+#include <arm_neon.h>
+#include <cstddef>
+#include <cstdint>
+
+#define ASM_PREFETCH(address) "PRFM PLDL1KEEP, " address "\n"
+#define ASM_PREFETCHL2(address) "PRFM PLDL2KEEP, " address "\n"
+#define ASM_PREFETCHW(address) "PRFM PSTL1KEEP, " address "\n"
+#define ASM_PREFETCHWL2(address) "PRFM PSTL2KEEP, " address "\n"
+
+static inline void stincpld(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
+ uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
+ uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
+ uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
+{
+ __asm __volatile(
+ "LDR q0, [%[ptr0]]\n"
+ "LDR q1, [%[ptr1]]\n"
+ "LDR q2, [%[ptr2]]\n"
+ "LDR q3, [%[ptr3]]\n"
+ "LDR q4, [%[ptr4]]\n"
+ "LDR q5, [%[ptr5]]\n"
+ "LDR q6, [%[ptr6]]\n"
+ "LDR q7, [%[ptr7]]\n"
+ "ADD v0.4s, v0.4s, %[v0].4s\n" ASM_PREFETCH("[%[ptr0], #80]") "ADD v1.4s, v1.4s, %[v1].4s\n" ASM_PREFETCH("[%[ptr1], #80]") "ADD v2.4s, v2.4s, %[v2].4s\n" ASM_PREFETCH("[%[ptr2], #80]")
+ "ADD v3.4s, v3.4s, %[v3].4s\n" ASM_PREFETCH("[%[ptr3], #80]") "ADD v4.4s, v4.4s, %[v4].4s\n" ASM_PREFETCH("[%[ptr4], #80]") "ADD v5.4s, v5.4s, %[v5].4s\n" ASM_PREFETCH("[%[ptr5], #80]")
+ "ADD v6.4s, v6.4s, %[v6].4s\n" ASM_PREFETCH("[%[ptr6], #80]") "ADD v7.4s, v7.4s, %[v7].4s\n" ASM_PREFETCH("[%[ptr7], #80]")
+ "STR q0, [%[ptr0]], #16\n"
+ "STR q1, [%[ptr1]], #16\n"
+ "STR q2, [%[ptr2]], #16\n"
+ "STR q3, [%[ptr3]], #16\n"
+ "STR q4, [%[ptr4]], #16\n"
+ "STR q5, [%[ptr5]], #16\n"
+ "STR q6, [%[ptr6]], #16\n"
+ "STR q7, [%[ptr7]], #16\n"
+ : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
+ [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
+ : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
+ [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
+ : "x20", "x21", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
+}
+
+static inline void stinc(uint32x4_t v0, uint32x4_t v1, uint32x4_t v2, uint32x4_t v3,
+ uint32x4_t v4, uint32x4_t v5, uint32x4_t v6, uint32x4_t v7,
+ uint32_t *&ptr0, uint32_t *&ptr1, uint32_t *&ptr2, uint32_t *&ptr3,
+ uint32_t *&ptr4, uint32_t *&ptr5, uint32_t *&ptr6, uint32_t *&ptr7)
+{
+ __asm __volatile(
+ "LDR q0, [%[ptr0]]\n"
+ "LDR q1, [%[ptr1]]\n"
+ "LDR q2, [%[ptr2]]\n"
+ "LDR q3, [%[ptr3]]\n"
+ "LDR q4, [%[ptr4]]\n"
+ "LDR q5, [%[ptr5]]\n"
+ "LDR q6, [%[ptr6]]\n"
+ "LDR q7, [%[ptr7]]\n"
+ "ADD v0.4s, v0.4s, %[v0].4s\n"
+ "ADD v1.4s, v1.4s, %[v1].4s\n"
+ "ADD v2.4s, v2.4s, %[v2].4s\n"
+ "ADD v3.4s, v3.4s, %[v3].4s\n"
+ "ADD v4.4s, v4.4s, %[v4].4s\n"
+ "ADD v5.4s, v5.4s, %[v5].4s\n"
+ "ADD v6.4s, v6.4s, %[v6].4s\n"
+ "ADD v7.4s, v7.4s, %[v7].4s\n"
+ "STR q0, [%[ptr0]], #16\n"
+ "STR q1, [%[ptr1]], #16\n"
+ "STR q2, [%[ptr2]], #16\n"
+ "STR q3, [%[ptr3]], #16\n"
+ "STR q4, [%[ptr4]], #16\n"
+ "STR q5, [%[ptr5]], #16\n"
+ "STR q6, [%[ptr6]], #16\n"
+ "STR q7, [%[ptr7]], #16\n"
+ : [ptr0] "+r"(ptr0), [ptr1] "+r"(ptr1), [ptr2] "+r"(ptr2), [ptr3] "+r"(ptr3),
+ [ptr4] "+r"(ptr4), [ptr5] "+r"(ptr5), [ptr6] "+r"(ptr6), [ptr7] "+r"(ptr7)
+ : [v0] "w"(v0), [v1] "w"(v1), [v2] "w"(v2), [v3] "w"(v3),
+ [v4] "w"(v4), [v5] "w"(v5), [v6] "w"(v6), [v7] "w"(v7)
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory");
+}
+
+namespace arm_compute
+{
+void NEGEMMLowpAArch64V8P4Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
+
+ _input0 = input0;
+ _input1 = input1;
+ _output = output;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*output->info());
+
+ AccessWindowRectangle output_access(output->info(), 0, 0, 12, 8);
+
+ const int input0_access_end = ceil_to_multiple(input0->info()->tensor_shape().x(), 8);
+ const int input1_access_end = ceil_to_multiple(input1->info()->tensor_shape().x(), 12);
+
+ update_window_and_padding(win,
+ AccessWindowStatic(input0->info(), 0, 0, input0_access_end, input0->info()->tensor_shape().y()),
+ AccessWindowStatic(input1->info(), 0, 0, input1_access_end, input1->info()->tensor_shape().y()),
+ output_access);
+
+ INEKernel::configure(win);
+}
+
+bool NEGEMMLowpAArch64V8P4Kernel::is_parallelisable() const
+{
+ return false;
+}
+
+#define _UDOT_MACRO \
+ ".altmacro\n" \
+ ".macro udot opd:req, opn:req, opm:req\n" \
+ "local vd, vn, vm, h, l\n" \
+ ".irp reg,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
+ ".ifeqs \"\\opd\",\"v\\reg\\.4s\"\n" \
+ ".set vd,\\reg\n" \
+ ".endif\n" \
+ ".ifeqs \"\\opn\",\"v\\reg\\.16b\"\n" \
+ ".set vn,\\reg\n" \
+ ".endif\n" \
+ ".irp idx,0,1,2,3\n" \
+ ".ifeqs \"\\opm\",\"v\\reg\\.4b[\\idx\\]\"\n" \
+ ".set vm,\\reg\n" \
+ ".set h,\\idx / 2\n" \
+ ".set l,\\idx %% 2\n" \
+ ".endif\n" \
+ ".endr\n" \
+ ".endr\n" \
+ ".ifndef vd\n" \
+ ".error \"Bad operand \\opd\"\n" \
+ ".exitm\n" \
+ ".endif\n" \
+ ".ifndef vn\n" \
+ ".error \"Bad operand \\opn\"\n" \
+ ".exitm\n" \
+ ".endif\n" \
+ ".ifndef vm\n" \
+ ".error \"Bad operand \\opm\"\n" \
+ ".exitm\n" \
+ ".endif\n" \
+ ".ifndef h\n" \
+ ".error \"Bad operand \\opm\"\n" \
+ ".exitm\n" \
+ ".endif\n" \
+ ".ifndef l\n" \
+ ".error \"Bad operand \\opm\"\n" \
+ ".exitm\n" \
+ ".endif\n" \
+ ".int 0x6f80e000 | vd | (vn << 5) | (vm << 16) | (l << 21) | (h << 11)\n" \
+ ".endm\n"
+
+#define _PREFETCH_ \
+ __asm __volatile( \
+ "" ASM_PREFETCH("[%[a_ptr], #64]") \
+ ASM_PREFETCH("[%[a_ptr], #128]") \
+ ASM_PREFETCH("[%[a_ptr], #192]") \
+ : \
+ : \
+ [a_ptr] "r"(a_ptr), [b_ptr] "r"(b_ptr) \
+ : "x20", "x21", "memory"); \
+ __asm __volatile( \
+ "" ASM_PREFETCH("[%[b_ptr]]") \
+ ASM_PREFETCH("[%[b_ptr], #64]") \
+ ASM_PREFETCH("[%[b_ptr], #128]") \
+ ASM_PREFETCH("[%[b_ptr], #192]") \
+ : \
+ : \
+ [b_ptr] "r"(b_ptr) \
+ : "x20", "x21"); \
+ __asm __volatile( \
+ "" \
+ : [r00] "+w"(r00), [r01] "+w"(r01), \
+ [r10] "+w"(r10), [r11] "+w"(r11), \
+ [r20] "+w"(r20), [r21] "+w"(r21), \
+ [r30] "+w"(r30), [r31] "+w"(r31), \
+ [a0] "+w"(a0), [a1] "+w"(a1), \
+ [b0] "+w"(b0), [b1] "+w"(b1), [b2] "=w"(b2), \
+ [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr) \
+ : \
+ :); \
+ __asm __volatile( \
+ "" \
+ : [r02] "+w"(r02), \
+ [r12] "+w"(r12), \
+ [r22] "+w"(r22), \
+ [r32] "+w"(r32), \
+ [r40] "+w"(r40), \
+ [r50] "+w"(r50), \
+ [r60] "+w"(r60), \
+ [r70] "+w"(r70), \
+ [a0a] "=w"(a0a), [a1a] "=w"(a1a), \
+ [b0] "+w"(b0), [b2] "+w"(b2), [b5] "=&w"(b5) \
+ : \
+ :); \
+ __asm __volatile( \
+ "" \
+ : \
+ [r41] "+w"(r41), [r42] "+w"(r42), \
+ [r51] "+w"(r51), [r52] "+w"(r52), \
+ [r61] "+w"(r61), [r62] "+w"(r62), \
+ [r71] "+w"(r71), [r72] "+w"(r72), \
+ [a1] "+w"(a1), \
+ [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), \
+ [b_ptr] "+r"(b_ptr), [k] "+r"(k) \
+ : \
+ :);
+
+void NEGEMMLowpAArch64V8P4Kernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+
+ const int x_block = 348;
+ const int k_block = 1664;
+ const int nthreads = 1;
+ const int M = _output->info()->tensor_shape().y();
+ const int N = _output->info()->tensor_shape().x();
+ const int K = _input0->info()->tensor_shape().x() >> 3;
+
+ int yblocksperthread = ((M / nthreads) + 7) / 8;
+
+ if(yblocksperthread < 1)
+ {
+ yblocksperthread = 1;
+ }
+
+ const int lda = _input0->info()->strides_in_bytes().y();
+ const int ldb = _input1->info()->strides_in_bytes().y();
+ const int ldc = _output->info()->strides_in_bytes().y();
+ const int ldc2 = _output->info()->strides_in_bytes().x();
+ const int ldc3 = ldc / sizeof(uint32_t);
+
+ const int threadid = 0;
+ int y0 = threadid * yblocksperthread * 8;
+ int ymax = (threadid + 1) * yblocksperthread * 8;
+ if(y0 >= M)
+ {
+ return;
+ }
+ if(ymax > M)
+ {
+ ymax = M;
+ }
+ for(int k0 = 0; k0 < K; k0 += k_block)
+ {
+ int kmax = k0 + k_block;
+ if(kmax > K)
+ {
+ kmax = K;
+ }
+
+ for(int x0 = 0; x0 < N; x0 += x_block)
+ {
+ int xmax = x0 + x_block;
+ if(xmax > N)
+ {
+ xmax = N;
+ }
+
+ for(int y = y0; y < ymax; y += 8)
+ {
+ auto c_ptr0 = reinterpret_cast<uint32_t *>(_output->buffer() + (y * ldc) + x0 * ldc2);
+ uint32_t *c_ptr1 = c_ptr0 + ldc3;
+ uint32_t *c_ptr2 = c_ptr1 + ldc3;
+ uint32_t *c_ptr3 = c_ptr2 + ldc3;
+ uint32_t *c_ptr4 = c_ptr3 + ldc3;
+ uint32_t *c_ptr5 = c_ptr4 + ldc3;
+ uint32_t *c_ptr6 = c_ptr5 + ldc3;
+ uint32_t *c_ptr7 = c_ptr6 + ldc3;
+
+ __asm __volatile(
+ "" ASM_PREFETCH("[%[c_ptr0]]")
+ ASM_PREFETCH("[%[c_ptr1]]")
+ ASM_PREFETCH("[%[c_ptr2]]")
+ ASM_PREFETCH("[%[c_ptr3]]")
+ ASM_PREFETCH("[%[c_ptr4]]")
+ ASM_PREFETCH("[%[c_ptr5]]")
+ ASM_PREFETCH("[%[c_ptr6]]")
+ ASM_PREFETCH("[%[c_ptr7]]")
+ :
+ : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
+ [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
+ : "x20", "x21");
+
+ for(int x = x0; x < xmax; x += 12)
+ {
+ register uint32x4_t r00 asm("v8");
+ register uint32x4_t r10 asm("v9");
+ register uint32x4_t r20 asm("v10");
+ register uint32x4_t r30 asm("v11");
+ register uint32x4_t r40 asm("v12");
+ register uint32x4_t r50 asm("v13");
+ register uint32x4_t r60 asm("v14");
+ register uint32x4_t r70 asm("v15");
+ register uint32x4_t r01 asm("v16");
+ register uint32x4_t r11 asm("v17");
+ register uint32x4_t r21 asm("v18");
+ register uint32x4_t r31 asm("v19");
+ register uint32x4_t r41 asm("v20");
+ register uint32x4_t r51 asm("v21");
+ register uint32x4_t r61 asm("v22");
+ register uint32x4_t r71 asm("v23");
+ register uint32x4_t r02 asm("v24");
+ register uint32x4_t r12 asm("v25");
+ register uint32x4_t r22 asm("v26");
+ register uint32x4_t r32 asm("v27");
+ register uint32x4_t r42 asm("v28");
+ register uint32x4_t r52 asm("v29");
+ register uint32x4_t r62 asm("v30");
+ register uint32x4_t r72 asm("v31");
+
+ register uint8x16_t a0 asm("v0");
+ register uint8x16_t a1 asm("v1");
+ register uint8x16_t b0 asm("v2");
+ register uint8x16_t b1 asm("v3");
+ register uint8x16_t b2 asm("v4");
+ register uint8x16_t a0a asm("v5");
+ register uint8x16_t a1a asm("v6");
+ register uint8x16_t b5 asm("v7");
+ const uint8_t *a_ptr = _input0->buffer() + ((y / 8) * lda) + (k0 * 8);
+ const uint8_t *b_ptr = _input1->buffer() + ((x / 12) * ldb) + (k0 * 12);
+
+ r00 = r01 = r02 = r10 = r11 = r12 = r20 = r21 = r22 = r30 = r31 = r32 = vdupq_n_u32(0);
+ r40 = r41 = r42 = r50 = r51 = r52 = r60 = r61 = r62 = r70 = r71 = r72 = vdupq_n_u32(0);
+
+ int k = ((kmax - k0) / 8) - 1;
+
+ a0 = vld1q_u8(a_ptr);
+ b0 = vld1q_u8(b_ptr);
+ a1 = vld1q_u8(a_ptr + 16);
+ b1 = vld1q_u8(b_ptr + 16);
+
+ _PREFETCH_
+
+ __asm __volatile(
+ _UDOT_MACRO
+ "1:\n"
+ "udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
+ "udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
+ "udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
+ "udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
+ "udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
+ "udot v17.4s, %[b1].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[a_ptr], #256]")
+ "udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
+ "udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
+ "udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
+ "udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
+ "udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
+ "udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
+ "udot v25.4s, %[b2].16b, %[a0].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #256]")
+ "udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
+ "udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
+ "udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
+ "udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
+ "udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
+ "udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+
+ "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
+ "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
+ "ldr %q[a0], [%[a_ptr], #64]\n"
+ "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
+ "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
+ "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
+ "ldr %q[a1], [%[a_ptr], #80]\n"
+ "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
+ "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
+ "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
+
+ "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
+ "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[b_ptr], #320]")
+ "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
+ "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
+ "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
+ "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
+ "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
+ "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
+
+ "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
+ "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
+ "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
+ "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
+ "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
+
+ "bne 1b\n"
+
+ "udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
+ "udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
+ "udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
+ "udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
+ "udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
+ "udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
+ "udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
+ "udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
+ "udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
+ "udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
+ "udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
+ "udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
+ "udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
+ "udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
+ "udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
+ "udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
+ "udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
+ "udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
+ "udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+
+ "udot v8.4s , %[b0].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0]]") "udot v9.4s , %[b0].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1]]") "udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
+ ASM_PREFETCH("[%[c_ptr2]]") "udot v11.4s, %[b0].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3]]") "udot v12.4s, %[b0].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4]]")
+ "udot v13.4s, %[b0].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5]]") "udot v14.4s, %[b0].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6]]") "udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
+ ASM_PREFETCH("[%[c_ptr7]]")
+
+ "udot v16.4s, %[b1].16b, %[a0a].4b[0]\n" ASM_PREFETCH("[%[c_ptr0], #48]") "udot v17.4s, %[b1].16b, %[a0a].4b[1]\n" ASM_PREFETCH("[%[c_ptr1], #48]") "udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
+ ASM_PREFETCH("[%[c_ptr2], #48]") "udot v19.4s, %[b1].16b, %[a0a].4b[3]\n" ASM_PREFETCH("[%[c_ptr3], #48]") "udot v20.4s, %[b1].16b, %[a1a].4b[0]\n" ASM_PREFETCH("[%[c_ptr4], #48]")
+ "udot v21.4s, %[b1].16b, %[a1a].4b[1]\n" ASM_PREFETCH("[%[c_ptr5], #48]") "udot v22.4s, %[b1].16b, %[a1a].4b[2]\n" ASM_PREFETCH("[%[c_ptr6], #48]") "udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
+ ASM_PREFETCH("[%[c_ptr7], #48]")
+
+ "udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
+ "udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
+ "udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
+ "udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
+ "udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
+ "udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
+ "udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
+
+ // Clean up macro namespace
+ ".purgem udot\n"
+
+ :
+ [a_ptr] "+r"(a_ptr), [b_ptr] "+r"(b_ptr),
+ [a0] "+w"(a0), [a1] "+w"(a1), [a0a] "+w"(a0a), [a1a] "+w"(a1a),
+ [b0] "+w"(b0), [b1] "+w"(b1), [b2] "+w"(b2), [k] "+r"(k)
+ : [c_ptr0] "r"(c_ptr0), [c_ptr1] "r"(c_ptr1), [c_ptr2] "r"(c_ptr2), [c_ptr3] "r"(c_ptr3),
+ [c_ptr4] "r"(c_ptr4), [c_ptr5] "r"(c_ptr5), [c_ptr6] "r"(c_ptr6), [c_ptr7] "r"(c_ptr7)
+ : "x20", "x21");
+
+ stincpld(r00, r10, r20, r30, r40, r50, r60, r70, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
+ stinc(r01, r11, r21, r31, r41, r51, r61, r71, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
+ stinc(r02, r12, r22, r32, r42, r52, r62, r72, c_ptr0, c_ptr1, c_ptr2, c_ptr3, c_ptr4, c_ptr5, c_ptr6, c_ptr7);
+ }
+ }
+ }
+ }
+}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEGEMMLowp.cpp b/src/runtime/NEON/functions/NEGEMMLowp.cpp
index 7413b28d03..90e47ceca0 100644
--- a/src/runtime/NEON/functions/NEGEMMLowp.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowp.cpp
@@ -26,28 +26,100 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64V8P4Kernel.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/runtime/NEON/NEScheduler.h"
#include "arm_compute/runtime/TensorAllocator.h"
+#include "support/ToolchainSupport.h"
using namespace arm_compute;
+#define NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output) \
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((a), 1, DataType::U8); \
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN((b), 1, DataType::U8); \
+ ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B"); \
+ ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A"); \
+ ARM_COMPUTE_ERROR_ON_MSG((b)->info()->dimension(0) != (output)->info()->dimension(0), "The C matrix must have the same number of columns as the matrix C");
+
NEGEMMLowp::NEGEMMLowp(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _tmp_a(), _tmp_b()
+ : _memory_group(std::move(memory_manager)), _interleave_kernel(), _transpose_kernel(), _mm_kernel(), _mm_optimised_kernel(nullptr), _interleave_blocked(), _interleave_blocked_transposed(), _tmp_a(),
+ _tmp_b()
{
}
+void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output)
+{
+ NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+
+ const struct CPUInfo ci = NEScheduler::get().cpu_info();
+ const int cpu_has_dotprod = static_cast<int>(ci.CPU) & static_cast<int>(CPUTarget::DOT);
+ if(cpu_has_dotprod != 0)
+ {
+#if defined(__aarch64__)
+ // NEGEMMLowpAArch64V8P4Kernel only compiled in AArch64 targets
+ _mm_optimised_kernel = support::cpp14::make_unique<NEGEMMLowpAArch64V8P4Kernel>();
+ TensorShape shape_a_int = a->info()->tensor_shape();
+ shape_a_int.set(0, a->info()->dimension(0) * 8.f);
+ shape_a_int.set(1, std::ceil(a->info()->dimension(1) / 8.f));
+
+ TensorShape shape_b_int = b->info()->tensor_shape();
+ shape_b_int.set(0, b->info()->dimension(0) * 12.f);
+ shape_b_int.set(1, std::ceil(b->info()->dimension(1) / 12.f));
+
+ TensorInfo info_a_int(shape_a_int, 1, a->info()->data_type());
+ TensorInfo info_b_int(shape_b_int, 1, b->info()->data_type());
+ _tmp_a.allocator()->init(info_a_int);
+ _tmp_b.allocator()->init(info_b_int);
+
+ _memory_group.manage(&_tmp_a);
+ _memory_group.manage(&_tmp_b);
+
+ _interleave_blocked.configure(a, &_tmp_a, 8, 4, false);
+ _interleave_blocked_transposed.configure(b, &_tmp_b, 12, 4, true);
+ _mm_optimised_kernel->configure(&_tmp_a, &_tmp_b, output);
+
+ _tmp_a.allocator()->allocate();
+ _tmp_b.allocator()->allocate();
+#endif /* defined(__aarch64__) */
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Not implemented");
+ // This is in the process of being updated, for more info please refer to COMPMID-624.
+ }
+}
+
+void NEGEMMLowp::run()
+{
+ _memory_group.acquire();
+
+ if(_mm_optimised_kernel != nullptr)
+ {
+ NEScheduler::get().schedule(&_interleave_blocked, Window::DimY);
+ NEScheduler::get().schedule(&_interleave_blocked_transposed, Window::DimY);
+ NEScheduler::get().schedule(_mm_optimised_kernel.get(), Window::DimY);
+ }
+ else
+ {
+ /* Run interleave kernel */
+ NEScheduler::get().schedule(&_interleave_kernel, Window::DimY);
+ /* Run transpose kernel */
+ NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
+ /* Run matrix multiply kernel */
+ NEScheduler::get().schedule(&_mm_kernel, Window::DimY);
+ }
+
+ _memory_group.release();
+}
+
void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output, int32_t a_offset, int32_t b_offset, int32_t output_offset, int32_t output_mult_int, int32_t shift)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::U8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
+ NEGEMMLOWP_VALIDATE_DIMENSIONS(a, b, output);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b, output);
- ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(0) != b->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
- ARM_COMPUTE_ERROR_ON_MSG(a->info()->dimension(1) != output->info()->dimension(1), "The C matrix must have the same number of rows as the matrix A");
- ARM_COMPUTE_ERROR_ON_MSG(b->info()->dimension(0) != output->info()->dimension(0), "The C matrix must have the same number of columns as the matrix C");
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
/* The interleaved output matrix will have the following shape: [ a_height * 4, ceil(a_width / 4.0f) ] */
TensorShape shape_tmp_a = a->info()->tensor_shape();
@@ -75,18 +147,4 @@ void NEGEMMLowp::configure(const ITensor *a, const ITensor *b, ITensor *output,
_tmp_b.allocator()->allocate();
}
-void NEGEMMLowp::run()
-{
- _memory_group.acquire();
-
- /* Run interleave kernel */
- NEScheduler::get().schedule(&_interleave_kernel, Window::DimY);
-
- /* Run transpose kernel */
- NEScheduler::get().schedule(&_transpose_kernel, Window::DimY);
-
- /* Run matrix multiply kernel */
- NEScheduler::get().schedule(&_mm_kernel, Window::DimY);
-
- _memory_group.release();
-}
+#undef NEGEMMLOWP_VALIDATE_DIMENSIONS