aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h8
-rw-r--r--arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp61
-rw-r--r--arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4/generic.hpp465
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp2
-rw-r--r--src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp149
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp27
-rw-r--r--tests/validation/CPP/GEMMLowp.cpp28
-rw-r--r--tests/validation/CPP/GEMMLowp.h11
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp17
-rw-r--r--tests/validation/fixtures/GEMMLowpAssemblyFixture.h42
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h2
11 files changed, 737 insertions, 75 deletions
diff --git a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h
index e8e71cf3d7..a93df033de 100644
--- a/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h
+++ b/arm_compute/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.h
@@ -37,11 +37,19 @@ class ITensor;
class NEGEMMLowpAArch64Kernel : public NEGEMMAssemblyBaseKernel
{
public:
+ /** Default constructor */
+ NEGEMMLowpAArch64Kernel();
+
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
protected:
void internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1) override;
+
+private:
+ using NEGEMMLowpAArch64 = void(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1, const Window &window,
+ const ThreadInfo &info);
+ NEGEMMLowpAArch64 *_func;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_AARCH64_V8A */
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp
new file mode 100644
index 0000000000..3561bfec96
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+// Load the actual kernel
+#include "a64_gemm_u8_4x4/generic.hpp"
+
+class gemm_u8_4x4 {
+public:
+ typedef uint8_t operand_type;
+ typedef uint32_t result_type;
+
+ typedef void (*kern_type)(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
+
+ /* Describes the data layout for A input */
+ static const int A_interleave = 4;
+ static const int A_block = 16;
+ static const bool A_transpose = false;
+
+ /* Same for B input */
+ static const int B_interleave = 4;
+ static const int B_block = 16;
+ static const bool B_transpose = true;
+
+ /* Kernel blocking parameters */
+ static const int out_width = 4;
+ static const int out_height = 4;
+ static const int k_unroll = 16;
+
+ kern_type kernel = nullptr;
+
+ gemm_u8_4x4(const CPUInfo *ci) {
+ kernel = a64_gemm_u8_4x4;
+ }
+};
+
+#endif // __aarch64__
+
diff --git a/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4/generic.hpp b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4/generic.hpp
new file mode 100644
index 0000000000..e48c373f21
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4/generic.hpp
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <arm_neon.h>
+
+inline void a64_gemm_u8_4x4(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cpanel, int ablocks, int bblocks, int K) {
+ const uint8_t *a_ptr = Apanel;
+ uint32_t *c_ptr = Cpanel;
+ K /= 16;
+ int oddk = (K & 1);
+
+ for (int yb=0; yb<ablocks; yb++) {
+ const uint8_t *a_ptr0 = a_ptr;
+ const uint8_t *b_ptr = Bpanel;
+
+ for (int xb=0; xb<bblocks; xb++) {
+ a_ptr = a_ptr0;
+
+ int k = ((K+1)/2)-1;
+
+ register uint8x16_t b0 asm("v4");
+ register uint8x16_t b1 asm("v5");
+ register uint8x16_t b2 asm("v6");
+ register uint8x16_t b3 asm("v7");
+ register uint8x16_t b0a asm("v8");
+ register uint8x16_t b1a asm("v9");
+ register uint8x16_t b2a asm("v10");
+ register uint8x16_t b3a asm("v11");
+
+ __asm __volatile (
+ "movi v16.4s, #0x0\n"
+ "ldr q0, [%[a_ptr]]\n"
+ "movi v17.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v18.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v19.4s, #0x0\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "movi v20.4s, #0x0\n"
+ "ldr %q[b3], [%[b_ptr], #48]\n"
+ "movi v21.4s, #0x0\n"
+ "ldr q1, [%[a_ptr], #16]\n"
+ "movi v22.4s, #0x0\n"
+ "ldr q2, [%[a_ptr], #32]\n"
+ "movi v23.4s, #0x0\n"
+ "ldr q3, [%[a_ptr], #48]\n"
+ "movi v24.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #64]")
+ "movi v25.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #64]")
+ "movi v26.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #128]")
+ "movi v27.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #128]")
+ "movi v28.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #192]")
+ "movi v29.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #192]")
+ "movi v30.4s, #0x0\n"
+ ASM_PREFETCH("[%[b_ptr], #256]")
+ "movi v31.4s, #0x0\n"
+ ASM_PREFETCH("[%[a_ptr], #256]")
+
+ // Loop structure optimized for A57 (after r0).
+
+ // Unavoidably, the multiply will "dribble" if
+ // dual issued with an add.
+
+ // Minimize the effect of this by making sure
+ // there are 2 adds to run under the dribbled
+ // multiply.
+
+ // Pipeline in blocks of 8 multiplies - combine
+ // this iteration's multiplies with adds from
+ // the previous iteration.
+
+ // So the first block doesn't have any adds to
+ // do - but because all the adds are at the
+ // start of the block it's only the first couple
+ // of multiplies that need to be pulled out.
+
+ // Start of unroll 0 (first iteration)
+ "umull v12.8h, v0.8b, %[b0].8b\n"
+ "umull v13.8h, v0.8b, %[b1].8b\n"
+
+ // Skip loop if we are doing zero iterations of it.
+ "cbz %w[k], 4f\n"
+
+ // Unroll 0 continuation (branch target)
+ "1:\n"
+ "umull v14.8h, v0.8b, %[b2].8b\n"
+ "subs %w[k], %w[k], #1\n"
+ "umull v15.8h, v0.8b, %[b3].8b\n"
+ "ldr %q[b0a], [%[b_ptr], #64]\n"
+ "umlal2 v12.8h, v0.16b, %[b0].16b\n"
+ "umlal2 v13.8h, v0.16b, %[b1].16b\n"
+ "ldr %q[b1a], [%[b_ptr], #80]\n"
+ "umlal2 v14.8h, v0.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v0.16b, %[b3].16b\n"
+ "ldr q0, [%[a_ptr], #64]\n"
+
+ "uadalp v16.4s, v12.8h\n"
+ "umull v12.8h, v1.8b, %[b0].8b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull v13.8h, v1.8b, %[b1].8b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "umull v14.8h, v1.8b, %[b2].8b\n"
+ "ldr %q[b2a], [%[b_ptr], #96]\n"
+ "umull v15.8h, v1.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v1.16b, %[b0].16b\n"
+ "ldr %q[b3a], [%[b_ptr], #112]\n"
+ "umlal2 v13.8h, v1.16b, %[b1].16b\n"
+ "add %[b_ptr], %[b_ptr], #128\n"
+ "umlal2 v14.8h, v1.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v1.16b, %[b3].16b\n"
+ "ldr q1, [%[a_ptr], #80]\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull v12.8h, v2.8b, %[b0].8b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull v13.8h, v2.8b, %[b1].8b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "umull v14.8h, v2.8b, %[b2].8b\n"
+ "umull v15.8h, v2.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v2.16b, %[b0].16b\n"
+ ASM_PREFETCH("[%[b_ptr], #192]")
+ "umlal2 v13.8h, v2.16b, %[b1].16b\n"
+ "umlal2 v14.8h, v2.16b, %[b2].16b\n"
+ ASM_PREFETCH("[%[a_ptr], #320]")
+ "umlal2 v15.8h, v2.16b, %[b3].16b\n"
+ "ldr q2, [%[a_ptr], #96]\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull v12.8h, v3.8b, %[b0].8b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull v13.8h, v3.8b, %[b1].8b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "umull v14.8h, v3.8b, %[b2].8b\n"
+ "umull v15.8h, v3.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v3.16b, %[b0].16b\n"
+ "ldr %q[b0], [%[b_ptr], #0]\n"
+ "umlal2 v13.8h, v3.16b, %[b1].16b\n"
+ "umlal2 v14.8h, v3.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v3.16b, %[b3].16b\n"
+ "ldr q3, [%[a_ptr], #112]\n"
+
+ // Unroll 1
+ "uadalp v28.4s, v12.8h\n"
+ "umull v12.8h, v0.8b, %[b0a].8b\n"
+ "uadalp v29.4s, v13.8h\n"
+ "uadalp v30.4s, v14.8h\n"
+ "umull v13.8h, v0.8b, %[b1a].8b\n"
+ "uadalp v31.4s, v15.8h\n"
+ "umull v14.8h, v0.8b, %[b2a].8b\n"
+ "umull v15.8h, v0.8b, %[b3a].8b\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "umlal2 v12.8h, v0.16b, %[b0a].16b\n"
+ "umlal2 v13.8h, v0.16b, %[b1a].16b\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "umlal2 v14.8h, v0.16b, %[b2a].16b\n"
+ "umlal2 v15.8h, v0.16b, %[b3a].16b\n"
+ "ldr q0, [%[a_ptr], #128]\n"
+
+ "uadalp v16.4s, v12.8h\n"
+ "umull v12.8h, v1.8b, %[b0a].8b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull v13.8h, v1.8b, %[b1a].8b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "add %[a_ptr], %[a_ptr], #128\n"
+ "umull v14.8h, v1.8b, %[b2a].8b\n"
+ "umull v15.8h, v1.8b, %[b3a].8b\n"
+ "ldr %q[b3], [%[b_ptr], #48]\n"
+ "umlal2 v12.8h, v1.16b, %[b0a].16b\n"
+ "umlal2 v13.8h, v1.16b, %[b1a].16b\n"
+ "umlal2 v14.8h, v1.16b, %[b2a].16b\n"
+ "umlal2 v15.8h, v1.16b, %[b3a].16b\n"
+ "ldr q1, [%[a_ptr], #16]\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull v12.8h, v2.8b, %[b0a].8b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull v13.8h, v2.8b, %[b1a].8b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "umull v14.8h, v2.8b, %[b2a].8b\n"
+ "umull v15.8h, v2.8b, %[b3a].8b\n"
+ "umlal2 v12.8h, v2.16b, %[b0a].16b\n"
+ ASM_PREFETCH("[%[b_ptr], #256]")
+ "umlal2 v13.8h, v2.16b, %[b1a].16b\n"
+ "umlal2 v14.8h, v2.16b, %[b2a].16b\n"
+ ASM_PREFETCH("[%[a_ptr], #256]")
+ "umlal2 v15.8h, v2.16b, %[b3a].16b\n"
+ "ldr q2, [%[a_ptr], #32]\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull v12.8h, v3.8b, %[b0a].8b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull v13.8h, v3.8b, %[b1a].8b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "umull v14.8h, v3.8b, %[b2a].8b\n"
+ "umull v15.8h, v3.8b, %[b3a].8b\n"
+ "umlal2 v12.8h, v3.16b, %[b0a].16b\n"
+ "umlal2 v13.8h, v3.16b, %[b1a].16b\n"
+ "umlal2 v14.8h, v3.16b, %[b2a].16b\n"
+ "umlal2 v15.8h, v3.16b, %[b3a].16b\n"
+ "ldr q3, [%[a_ptr], #48]\n"
+
+ // Start of unroll 0 for next iteration.
+ "uadalp v28.4s, v12.8h\n"
+ "umull v12.8h, v0.8b, %[b0].8b\n"
+ "uadalp v29.4s, v13.8h\n"
+ "uadalp v30.4s, v14.8h\n"
+ "umull v13.8h, v0.8b, %[b1].8b\n"
+ "uadalp v31.4s, v15.8h\n"
+ "bne 1b\n"
+
+ // Target to use when K=1 or 2 (i.e. zero iterations of main loop)
+ "4:\n"
+
+ // Branch to alternative tail for odd K
+ "cbnz %w[oddk], 2f\n"
+
+ // Detached final iteration (even K)
+ "umull v14.8h, v0.8b, %[b2].8b\n"
+ "umull v15.8h, v0.8b, %[b3].8b\n"
+ "ldr %q[b0a], [%[b_ptr], #64]\n"
+ "umlal2 v12.8h, v0.16b, %[b0].16b\n"
+ "umlal2 v13.8h, v0.16b, %[b1].16b\n"
+ "ldr %q[b1a], [%[b_ptr], #80]\n"
+ "umlal2 v14.8h, v0.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v0.16b, %[b3].16b\n"
+ "ldr q0, [%[a_ptr], #64]\n"
+
+ "uadalp v16.4s, v12.8h\n"
+ "umull v12.8h, v1.8b, %[b0].8b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull v13.8h, v1.8b, %[b1].8b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "umull v14.8h, v1.8b, %[b2].8b\n"
+ "ldr %q[b2a], [%[b_ptr], #96]\n"
+ "umull v15.8h, v1.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v1.16b, %[b0].16b\n"
+ "ldr %q[b3a], [%[b_ptr], #112]\n"
+ "umlal2 v13.8h, v1.16b, %[b1].16b\n"
+ "add %[b_ptr], %[b_ptr], #128\n"
+ "umlal2 v14.8h, v1.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v1.16b, %[b3].16b\n"
+ "ldr q1, [%[a_ptr], #80]\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull v12.8h, v2.8b, %[b0].8b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull v13.8h, v2.8b, %[b1].8b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "umull v14.8h, v2.8b, %[b2].8b\n"
+ "umull v15.8h, v2.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v2.16b, %[b0].16b\n"
+ "umlal2 v13.8h, v2.16b, %[b1].16b\n"
+ "umlal2 v14.8h, v2.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v2.16b, %[b3].16b\n"
+ "ldr q2, [%[a_ptr], #96]\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull v12.8h, v3.8b, %[b0].8b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull v13.8h, v3.8b, %[b1].8b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "umull v14.8h, v3.8b, %[b2].8b\n"
+ "umull v15.8h, v3.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v3.16b, %[b0].16b\n"
+ "umlal2 v13.8h, v3.16b, %[b1].16b\n"
+ "umlal2 v14.8h, v3.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v3.16b, %[b3].16b\n"
+ "ldr q3, [%[a_ptr], #112]\n"
+
+ // Unroll 1
+ "uadalp v28.4s, v12.8h\n"
+ "umull v12.8h, v0.8b, %[b0a].8b\n"
+ "uadalp v29.4s, v13.8h\n"
+ "uadalp v30.4s, v14.8h\n"
+ "umull v13.8h, v0.8b, %[b1a].8b\n"
+ "uadalp v31.4s, v15.8h\n"
+ "umull v14.8h, v0.8b, %[b2a].8b\n"
+ "add %[a_ptr], %[a_ptr], #128\n"
+ "umull v15.8h, v0.8b, %[b3a].8b\n"
+ "umlal2 v12.8h, v0.16b, %[b0a].16b\n"
+ "umlal2 v13.8h, v0.16b, %[b1a].16b\n"
+ "umlal2 v14.8h, v0.16b, %[b2a].16b\n"
+ "umlal2 v15.8h, v0.16b, %[b3a].16b\n"
+
+ "uadalp v16.4s, v12.8h\n"
+ "umull v12.8h, v1.8b, %[b0a].8b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull v13.8h, v1.8b, %[b1a].8b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "umull v14.8h, v1.8b, %[b2a].8b\n"
+ "umull v15.8h, v1.8b, %[b3a].8b\n"
+ "umlal2 v12.8h, v1.16b, %[b0a].16b\n"
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "umlal2 v13.8h, v1.16b, %[b1a].16b\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "umlal2 v14.8h, v1.16b, %[b2a].16b\n"
+ "umlal2 v15.8h, v1.16b, %[b3a].16b\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull v12.8h, v2.8b, %[b0a].8b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull v13.8h, v2.8b, %[b1a].8b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "umull v14.8h, v2.8b, %[b2a].8b\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "addp v19.4s, v22.4s, v23.4s\n"
+ "umull v15.8h, v2.8b, %[b3a].8b\n"
+ "umlal2 v12.8h, v2.16b, %[b0a].16b\n"
+ "str q16, [%[c_ptr]]\n"
+ "umlal2 v13.8h, v2.16b, %[b1a].16b\n"
+ "umlal2 v14.8h, v2.16b, %[b2a].16b\n"
+ "umlal2 v15.8h, v2.16b, %[b3a].16b\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull v12.8h, v3.8b, %[b0a].8b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull v13.8h, v3.8b, %[b1a].8b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "umull v14.8h, v3.8b, %[b2a].8b\n"
+ "addp v20.4s, v24.4s, v25.4s\n"
+ "addp v21.4s, v26.4s, v27.4s\n"
+ "umull v15.8h, v3.8b, %[b3a].8b\n"
+ "umlal2 v12.8h, v3.16b, %[b0a].16b\n"
+ "str q17, [%[c_ptr], #16]\n"
+ "umlal2 v13.8h, v3.16b, %[b1a].16b\n"
+ "umlal2 v14.8h, v3.16b, %[b2a].16b\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "umlal2 v15.8h, v3.16b, %[b3a].16b\n"
+ "b 3f\n"
+
+ // Detached final iteration (odd K)
+ "2:\n"
+ "umull v14.8h, v0.8b, %[b2].8b\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "umull v15.8h, v0.8b, %[b3].8b\n"
+ "add %[b_ptr], %[b_ptr], #64\n"
+ "umlal2 v12.8h, v0.16b, %[b0].16b\n"
+ "umlal2 v13.8h, v0.16b, %[b1].16b\n"
+ "umlal2 v14.8h, v0.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v0.16b, %[b3].16b\n"
+
+ "uadalp v16.4s, v12.8h\n"
+ "umull v12.8h, v1.8b, %[b0].8b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull v13.8h, v1.8b, %[b1].8b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "umull v14.8h, v1.8b, %[b2].8b\n"
+ "umull v15.8h, v1.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v1.16b, %[b0].16b\n"
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "umlal2 v13.8h, v1.16b, %[b1].16b\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "umlal2 v14.8h, v1.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v1.16b, %[b3].16b\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull v12.8h, v2.8b, %[b0].8b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull v13.8h, v2.8b, %[b1].8b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "umull v14.8h, v2.8b, %[b2].8b\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "addp v19.4s, v22.4s, v23.4s\n"
+ "umull v15.8h, v2.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v2.16b, %[b0].16b\n"
+ "str q16, [%[c_ptr]]\n"
+ "umlal2 v13.8h, v2.16b, %[b1].16b\n"
+ "umlal2 v14.8h, v2.16b, %[b2].16b\n"
+ "umlal2 v15.8h, v2.16b, %[b3].16b\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull v12.8h, v3.8b, %[b0].8b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull v13.8h, v3.8b, %[b1].8b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "umull v14.8h, v3.8b, %[b2].8b\n"
+ "addp v20.4s, v24.4s, v25.4s\n"
+ "addp v21.4s, v26.4s, v27.4s\n"
+ "umull v15.8h, v3.8b, %[b3].8b\n"
+ "umlal2 v12.8h, v3.16b, %[b0].16b\n"
+ "str q17, [%[c_ptr], #16]\n"
+ "umlal2 v13.8h, v3.16b, %[b1].16b\n"
+ "umlal2 v14.8h, v3.16b, %[b2].16b\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "umlal2 v15.8h, v3.16b, %[b3].16b\n"
+
+ "3:\n"
+
+ // Final additions
+ "uadalp v28.4s, v12.8h\n"
+ "str q18, [%[c_ptr], #32]\n"
+ "uadalp v29.4s, v13.8h\n"
+ "uadalp v30.4s, v14.8h\n"
+ "uadalp v31.4s, v15.8h\n"
+
+ // Horizontal reduction, phase 1
+ "addp v22.4s, v28.4s, v29.4s\n"
+ "addp v23.4s, v30.4s, v31.4s\n"
+
+ // Horizontal reduction, phase 2
+ "addp v19.4s, v22.4s, v23.4s\n"
+ "str q19, [%[c_ptr], #48]\n"
+ "add %[c_ptr], %[c_ptr], #64\n"
+
+ :
+ [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
+ [b0] "+w" (b0), [b1] "+w" (b1), [b2] "+w" (b2), [b3] "+w" (b3),
+ [b0a] "+w" (b0a), [b1a] "+w" (b1a), [b2a] "+w" (b2a), [b3a] "+w" (b3a),
+ [k] "+r" (k)
+ : [oddk] "r" (oddk)
+ : "x20", "x21", "v0","v1","v2","v3","v12","v13","v14","v15","v16","v17","v18","v19",
+ "v20","v21","v22","v23","v24","v25","v26","v27","v28","v29","v30","v31", "cc");
+ }
+ }
+}
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index 2bc251e91f..208a60ce27 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -49,7 +49,7 @@ namespace
{
Error validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::S8);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::S8, DataType::U8);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
diff --git a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp
index b75a8ab251..db37201687 100644
--- a/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp
+++ b/src/core/NEON/kernels/arm64/NEGEMMLowpAArch64Kernel.cpp
@@ -39,6 +39,7 @@ namespace arm_compute
{
#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp"
#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp"
+#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp"
} // namespace arm_compute
#include <arm_neon.h>
@@ -50,10 +51,101 @@ namespace arm_compute
namespace arm_compute
{
+NEGEMMLowpAArch64Kernel::NEGEMMLowpAArch64Kernel()
+ : _func(nullptr)
+{
+}
+
+void gemm_interleaved_s8(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1, const Window &window,
+ const ThreadInfo &info)
+{
+ const int lda = input0->info()->strides_in_bytes().y();
+ const int ldb = input1->info()->strides_in_bytes().y();
+ const int ldc = output->info()->strides_in_bytes().y() / sizeof(int32_t);
+
+ const auto in1_ptr = reinterpret_cast<const int8_t *>(input1->buffer());
+
+ const int M = std::min(output->info()->tensor_shape().y(), static_cast<size_t>(window.y().end())) - window.y().start();
+ const int N = output->info()->tensor_shape().x();
+ const int K = input0->info()->tensor_shape().x();
+
+ // Only iterate over batches
+ Window win(window);
+ win.set(0, Window::Dimension(0, 1, 1));
+ win.set(1, Window::Dimension(0, 1, 1));
+
+ Iterator in0(input0, window);
+ Iterator out(output, window);
+
+ GemmInterleaved<gemm_s8_4x4, int8_t, int32_t> gemm(&info.cpu_info, M, N, K, !transform_1, !transform_1);
+
+ constexpr size_t alignment = 4096;
+ const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id;
+ void *_workspace = workspace->buffer() + offset;
+ size_t workspace_size = workspace->info()->total_size();
+
+ if(support::cpp11::align(alignment, gemm.get_working_size(), _workspace, workspace_size) == nullptr)
+ {
+ ARM_COMPUTE_ERROR("Not enough space to align buffer!");
+ }
+
+ execute_window_loop(win, [&](const Coordinates & id)
+ {
+ gemm.execute(reinterpret_cast<const int8_t *>(in0.ptr()), lda,
+ reinterpret_cast<const int8_t *>(in1_ptr), ldb,
+ reinterpret_cast<int32_t *>(out.ptr()), ldc,
+ alpha, beta, _workspace);
+ },
+ in0, out);
+}
+
+void gemm_interleaved_u8(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1, const Window &window,
+ const ThreadInfo &info)
+{
+ const int lda = input0->info()->strides_in_bytes().y();
+ const int ldb = input1->info()->strides_in_bytes().y();
+ const int ldc = output->info()->strides_in_bytes().y() / sizeof(uint32_t);
+
+ const auto in1_ptr = reinterpret_cast<const uint8_t *>(input1->buffer());
+
+ const int M = std::min(output->info()->tensor_shape().y(), static_cast<size_t>(window.y().end())) - window.y().start();
+ const int N = output->info()->tensor_shape().x();
+ const int K = input0->info()->tensor_shape().x();
+
+ // Only iterate over batches
+ Window win(window);
+ win.set(0, Window::Dimension(0, 1, 1));
+ win.set(1, Window::Dimension(0, 1, 1));
+
+ Iterator in0(input0, window);
+ Iterator out(output, window);
+
+ GemmInterleaved<gemm_u8_4x4, uint8_t, uint32_t> gemm(&info.cpu_info, M, N, K, !transform_1, !transform_1);
+
+ constexpr size_t alignment = 4096;
+ const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id;
+ void *_workspace = workspace->buffer() + offset;
+ size_t workspace_size = workspace->info()->total_size();
+
+ if(support::cpp11::align(alignment, gemm.get_working_size(), _workspace, workspace_size) == nullptr)
+ {
+ ARM_COMPUTE_ERROR("Not enough space to align buffer!");
+ }
+
+ execute_window_loop(win, [&](const Coordinates & id)
+ {
+ gemm.execute(reinterpret_cast<const uint8_t *>(in0.ptr()), lda,
+ reinterpret_cast<const uint8_t *>(in1_ptr), ldb,
+ reinterpret_cast<uint32_t *>(out.ptr()), ldc,
+ alpha, beta, _workspace);
+ },
+ in0, out);
+}
+
void NEGEMMLowpAArch64Kernel::internal_configure(const ITensor *input0, const ITensor *input1, ITensor *output, ITensor *workspace, float alpha, float beta, bool transform_0, bool transform_1)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::S8, DataType::U8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::U32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
_input0 = input0;
@@ -65,6 +157,19 @@ void NEGEMMLowpAArch64Kernel::internal_configure(const ITensor *input0, const IT
_transform_0 = transform_0;
_transform_1 = transform_1;
+ switch(input0->info()->data_type())
+ {
+ case DataType::S8:
+ _func = &gemm_interleaved_s8;
+ break;
+ case DataType::U8:
+ _func = &gemm_interleaved_u8;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Element size not supported");
+ break;
+ }
+
// Configure kernel window
Window win = calculate_max_window(*output->info());
@@ -85,45 +190,9 @@ void NEGEMMLowpAArch64Kernel::run(const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ ARM_COMPUTE_ERROR_ON(_func == nullptr);
- const int lda = _input0->info()->strides_in_bytes().y();
- const int ldb = _input1->info()->strides_in_bytes().y();
- const int ldc = _output->info()->strides_in_bytes().y() / sizeof(int32_t);
-
- const auto in1_ptr = reinterpret_cast<const int8_t *>(_input1->buffer());
-
- const int M = std::min(_output->info()->tensor_shape().y(), static_cast<size_t>(window.y().end())) - window.y().start();
- const int N = _output->info()->tensor_shape().x();
- const int K = _input0->info()->tensor_shape().x();
-
- // Only iterate over batches
- Window win(window);
- win.set(0, Window::Dimension(0, 1, 1));
- win.set(1, Window::Dimension(0, 1, 1));
-
- Iterator in0(_input0, window);
- Iterator out(_output, window);
-
- GemmInterleaved<gemm_s8_4x4, int8_t, int32_t> gemm(&info.cpu_info, M, N, K, !_transform_1, !_transform_1);
-
- constexpr size_t alignment = 4096;
- const size_t offset = (gemm.get_working_size() + alignment - 1) * info.thread_id;
- void *workspace = _workspace->buffer() + offset;
- size_t workspace_size = _workspace->info()->total_size();
-
- if(support::cpp11::align(alignment, gemm.get_working_size(), workspace, workspace_size) == nullptr)
- {
- ARM_COMPUTE_ERROR("Not enough space to align buffer!");
- }
-
- execute_window_loop(win, [&](const Coordinates & id)
- {
- gemm.execute(reinterpret_cast<const int8_t *>(in0.ptr()), lda,
- reinterpret_cast<const int8_t *>(in1_ptr), ldb,
- reinterpret_cast<int32_t *>(out.ptr()), ldc,
- _alpha, _beta, workspace);
- },
- in0, out);
+ (*_func)(_input0, _input1, _output, _workspace, _alpha, _beta, _transform_0, _transform_1, window, info);
}
} // namespace arm_compute
#endif /* ARM_COMPUTE_AARCH64_V8A */
diff --git a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
index 708daeb265..1bf437eb5f 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
@@ -43,6 +43,7 @@ namespace arm_compute
#include "arm_compute/core/NEON/kernels/assembly/gemm_interleaved.hpp"
#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_12x8.hpp"
#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_s8_4x4.hpp"
+#include "arm_compute/core/NEON/kernels/assembly/kernels/a64_gemm_u8_4x4.hpp"
} // namespace arm_compute
@@ -55,8 +56,8 @@ NEGEMMLowpAssemblyMatrixMultiplyCore::NEGEMMLowpAssemblyMatrixMultiplyCore(std::
void NEGEMMLowpAssemblyMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b, ITensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::S8);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::U8, DataType::S8);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(0) != (b)->info()->dimension(1), "The product AB is defined only if the number of columns in A is equal to the number of rows in B");
ARM_COMPUTE_ERROR_ON_MSG((a)->info()->dimension(1) != (output)->info()->dimension(1), "The output matrix must have the same number of rows as the matrix A");
@@ -92,9 +93,25 @@ void NEGEMMLowpAssemblyMatrixMultiplyCore::configure(const ITensor *a, const ITe
#elif defined(ARM_COMPUTE_AARCH64_V8A)
if(1)
{
- // Configure matrix multiply kernel
- GemmInterleaved<gemm_s8_4x4, int8_t, int32_t> gemm(&ci, M, N, K, false, false);
- _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
+ switch(a->info()->data_type())
+ {
+ case DataType::S8:
+ {
+ // Configure matrix multiply kernel
+ GemmInterleaved<gemm_s8_4x4, int8_t, int32_t> gemm(&ci, M, N, K, false, false);
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
+ }
+ break;
+ case DataType::U8:
+ {
+ // Configure matrix multiply kernel
+ GemmInterleaved<gemm_u8_4x4, uint8_t, uint32_t> gemm(&ci, M, N, K, false, false);
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + workspace_alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Datatype not supported");
+ }
_memory_group.manage(&_workspace);
// Configure matrix multiplication kernel
auto k = arm_compute::support::cpp14::make_unique<NEGEMMLowpAArch64Kernel>();
diff --git a/tests/validation/CPP/GEMMLowp.cpp b/tests/validation/CPP/GEMMLowp.cpp
index bf002cf2b5..35b8a6486e 100644
--- a/tests/validation/CPP/GEMMLowp.cpp
+++ b/tests/validation/CPP/GEMMLowp.cpp
@@ -63,19 +63,21 @@ void quantize_down_int32_to_uint8_scale(const SimpleTensor<T> *in, const SimpleT
}
} // namespace
-template <typename T>
-SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<T> &a, const SimpleTensor<T> &b, int32_t a_offset, int32_t b_offset)
+template <typename T_out, typename T_in>
+SimpleTensor<T_out> gemmlowp_matrix_multiply_core(const SimpleTensor<T_in> &a, const SimpleTensor<T_in> &b, int32_t a_offset, int32_t b_offset)
{
- TensorShape shape(b.shape()[0], a.shape()[1]);
+ static_assert(std::is_same<typename std::decay<T_out>::type, int32_t>::value, "Only int32_t is allowed for the output");
- SimpleTensor<int32_t> c(shape, DataType::S32);
+ TensorShape shape(b.shape()[0], a.shape()[1]);
+ DataType dt = std::is_same<T_out, int32_t>::value ? DataType::S32 : DataType::U32;
+ SimpleTensor<T_out> c(shape, dt);
const int K = a.shape().x();
const int b_width = b.shape().x();
const int rows = c.shape().y(); //M
const int cols = c.shape().x(); //N
- std::vector<int32_t> acc;
+ std::vector<T_out> acc;
acc.resize(cols);
for(int i = 0; i < rows; ++i)
@@ -86,11 +88,11 @@ SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<T> &a, co
}
for(int k = 0; k < K; ++k)
{
- const int32_t tmp_a = a_offset + static_cast<int32_t>(a[k + i * K]);
+ const T_out tmp_a = a_offset + static_cast<T_out>(a[k + i * K]);
for(int j = 0; j < b_width; ++j)
{
- const int32_t tmp_b = b_offset + static_cast<int32_t>(b[j + k * b_width]);
- const int32_t mult_as_int = tmp_a * tmp_b;
+ const T_out tmp_b = b_offset + static_cast<T_out>(b[j + k * b_width]);
+ const T_out mult_as_int = tmp_a * tmp_b;
acc[j] += mult_as_int;
}
}
@@ -104,9 +106,10 @@ SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<T> &a, co
}
// used to validate assembly kernels which don't know anything about offsets
-SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b)
+template <typename T1, typename T2>
+SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b)
{
- return gemmlowp_matrix_multiply_core(a, b, 0, 0);
+ return gemmlowp_matrix_multiply_core<T1, T2>(a, b, 0, 0);
}
template <typename T>
@@ -130,11 +133,14 @@ SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTe
return dst;
}
-template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, int32_t a_offset, int32_t b_offset);
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min,
int32_t max);
template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_offset, int32_t result_mult_int,
int32_t result_shift, int32_t min, int32_t max);
+template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, int32_t a_offset, int32_t b_offset);
+template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, int32_t a_offset, int32_t b_offset);
+template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b);
+template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/CPP/GEMMLowp.h b/tests/validation/CPP/GEMMLowp.h
index ee33d8e0c0..6c72b56e7a 100644
--- a/tests/validation/CPP/GEMMLowp.h
+++ b/tests/validation/CPP/GEMMLowp.h
@@ -35,13 +35,16 @@ namespace validation
{
namespace reference
{
-SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b);
-
template <typename T>
-SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<T> &a, const SimpleTensor<T> &b, int32_t a_offset, int32_t b_offset);
+SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min = 0, int32_t max = 0);
+template <typename T1, typename T2>
+SimpleTensor<T1> gemmlowp_matrix_multiply_core(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b, int32_t a_offset, int32_t b_offset);
template <typename T>
-SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min = 0, int32_t max = 0);
+SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift);
+
+template <typename T1, typename T2>
+SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b);
template <typename T>
SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_offset, int32_t result_mult_int, int32_t result_shift,
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 1418578a51..6366223820 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -58,14 +58,27 @@ const auto data_matrix_multiply = framework::dataset::make("M", 12, 20) * framew
TEST_SUITE(NEON)
TEST_SUITE(ASSEMBLY_MATRIX_MULTIPLY)
-using NEGEMMAssemblyFixture = GEMMLowpAssemblyFixture<Tensor, Accessor, NEGEMMLowpAssemblyMatrixMultiplyCore>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture, framework::DatasetMode::PRECOMMIT, data_matrix_multiply)
+
+using NEGEMMAssemblyFixture_S8 = GEMMLowpAssemblyFixture<Tensor, Accessor, NEGEMMLowpAssemblyMatrixMultiplyCore, int8_t>;
+using NEGEMMAssemblyFixture_U8 = GEMMLowpAssemblyFixture<Tensor, Accessor, NEGEMMLowpAssemblyMatrixMultiplyCore, uint8_t>;
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture_S8, framework::DatasetMode::PRECOMMIT, data_matrix_multiply)
{
// Validate output
validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
+TEST_SUITE(U8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMAssemblyFixture_U8, framework::DatasetMode::PRECOMMIT, data_matrix_multiply)
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
TEST_SUITE(GEMMLowp)
TEST_SUITE(INTERLEAVE_BLOCKED)
diff --git a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h
index a2587440fb..38e08f7992 100644
--- a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h
+++ b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h
@@ -42,7 +42,7 @@ namespace test
{
namespace validation
{
-template <typename TensorType, typename AccessorType, typename FunctionType>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T2>
class GEMMLowpAssemblyFixture : public framework::Fixture
{
public:
@@ -66,9 +66,11 @@ protected:
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
{
+ DataType dt_in = std::is_same<T2, int8_t>::value ? DataType::S8 : DataType::U8;
+
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, DataType::S8, 1);
- TensorType b = create_tensor<TensorType>(shape_b, DataType::S8, 1);
+ TensorType a = create_tensor<TensorType>(shape_a, dt_in, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, dt_in, 1);
TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
// Create and configure function
@@ -89,8 +91,16 @@ protected:
ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
// Fill tensors
- fill(AccessorType(a), 0, -128, 127);
- fill(AccessorType(b), 1, -128, 127);
+ if(dt_in == DataType::S8)
+ {
+ fill(AccessorType(a), 0, -128, 127);
+ fill(AccessorType(b), 1, -128, 127);
+ }
+ else
+ {
+ fill(AccessorType(a), 0, 0, 128);
+ fill(AccessorType(b), 1, 0, 128);
+ }
fill(AccessorType(c), 2, 0, 0);
// Compute GEMM function
@@ -100,15 +110,25 @@ protected:
SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
{
+ DataType dt = std::is_same<T2, int8_t>::value ? DataType::S8 : DataType::U8;
+
// Create reference
- SimpleTensor<int8_t> a{ shape_a, DataType::S8, 1 };
- SimpleTensor<int8_t> b{ shape_b, DataType::S8, 1 };
+ SimpleTensor<T2> a{ shape_a, dt, 1 };
+ SimpleTensor<T2> b{ shape_b, dt, 1 };
// Fill reference
- fill(a, 0, -128, 127);
- fill(b, 1, -128, 127);
-
- return reference::gemmlowp(a, b);
+ if(dt == DataType::S8)
+ {
+ fill(a, 0, -128, 127);
+ fill(b, 1, -128, 127);
+ }
+ else
+ {
+ fill(a, 0, 0, 128);
+ fill(b, 1, 0, 128);
+ }
+
+ return reference::gemmlowp<int32_t, T2>(a, b);
}
TensorType _target{};
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index a99e9323c8..60b89bc653 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -110,7 +110,7 @@ protected:
fill(a, 0);
fill(b, 1);
- return reference::gemmlowp_matrix_multiply_core<uint8_t>(a, b, a_offset, b_offset);
+ return reference::gemmlowp_matrix_multiply_core<int32_t, uint8_t>(a, b, a_offset, b_offset);
}
TensorType _target{};