aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--SConscript2
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a32_interleave_6way_32bit.hpp7
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a32_transpose_interleave_8way_32bit.hpp2
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a64_block16_interleave4_8bit.hpp120
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_16bit.hpp7
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_32bit.hpp21
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_half_to_float.hpp189
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_12way_16bit.hpp145
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_12way_half_to_float.hpp120
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_24way_16bit.hpp2
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/list.hpp9
-rw-r--r--arm_compute/core/NEON/kernels/assembly/transforms/transpose_interleave_common.hpp2
12 files changed, 598 insertions, 28 deletions
diff --git a/SConscript b/SConscript
index 80b88ce095..d813033676 100644
--- a/SConscript
+++ b/SConscript
@@ -178,7 +178,7 @@ if env['neon']:
# build winograd sources for either v7a / v8a
core_files += Glob('src/core/NEON/kernels/winograd/*.cpp')
core_files += Glob('src/core/NEON/kernels/winograd/transforms/*.cpp')
- arm_compute_env.Append(CPPPATH = ["arm_compute/core/NEON/kernels/winograd/"])
+ arm_compute_env.Append(CPPPATH = ["arm_compute/core/NEON/kernels/winograd/", "arm_compute/core/NEON/kernels/assembly/"])
if env['arch'] == "armv7a":
core_files += Glob('src/core/NEON/kernels/arm32/*.cpp')
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a32_interleave_6way_32bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a32_interleave_6way_32bit.hpp
index 1c1f85c11c..4a1b5d2bf2 100644
--- a/arm_compute/core/NEON/kernels/assembly/transforms/a32_interleave_6way_32bit.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a32_interleave_6way_32bit.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,13 +25,12 @@
#ifdef __arm__
-#include "../asmlib.hpp"
-
#include <arm_neon.h>
+#include "asmlib.hpp"
template<>
template<typename T>
-void TransformImpl<6, 1, false, 4, 4>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) {
+inline void TransformImpl<6, 1, false, 4, 4>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) {
uint32_t *outptr = reinterpret_cast<uint32_t *>(out);
const uint32_t *inptr = reinterpret_cast<const uint32_t *>(in);
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a32_transpose_interleave_8way_32bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a32_transpose_interleave_8way_32bit.hpp
index a5a5a1058f..a7e17fa074 100644
--- a/arm_compute/core/NEON/kernels/assembly/transforms/a32_transpose_interleave_8way_32bit.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a32_transpose_interleave_8way_32bit.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a64_block16_interleave4_8bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a64_block16_interleave4_8bit.hpp
new file mode 100644
index 0000000000..ac84567b54
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a64_block16_interleave4_8bit.hpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include <arm_neon.h>
+#include "asmlib.hpp"
+
+template<>
+template<typename T>
+inline void TransformImpl<4, 16, false, 1, 1>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) {
+ uint8_t *outptr = (uint8_t *)out;
+ const uint8_t *inptr = (uint8_t *)in;
+
+ uint8_t zerobuff[16];
+
+ for (int y=y0; y<ymax; y+=4) {
+ const uint8_t *inptr0 = inptr + y * ldin + k0;
+ const uint8_t *inptr1 = inptr0 + ldin;
+ const uint8_t *inptr2 = inptr1 + ldin;
+ const uint8_t *inptr3 = inptr2 + ldin;
+
+ prefetch_2x(inptr0);
+ prefetch_2x(inptr1);
+ prefetch_2x(inptr2);
+ prefetch_2x(inptr3);
+
+ int x=(kmax-k0);
+ for (;x>15;x-=16) {
+ /* Cope with ragged cases by copying from a buffer of zeroes instead */
+ if ((y + 3) >= ymax) {
+ switch ((y + 3) - ymax) {
+ /* Everything falls through in here */
+ case 2:
+ inptr1 = zerobuff;
+ case 1:
+ inptr2 = zerobuff;
+ case 0:
+ inptr3 = zerobuff;
+ default:
+ break;
+ }
+ }
+
+ __asm __volatile (
+ "LDR q0, [%[inptr0]], #16\n"
+ ASM_PREFETCH("[%[inptr0], #176]")
+ "LDR q1, [%[inptr1]], #16\n"
+ ASM_PREFETCH("[%[inptr1], #176]")
+ "STP q0, q1, [%[outptr]], #32\n"
+ "LDR q0, [%[inptr2]], #16\n"
+ ASM_PREFETCH("[%[inptr2], #176]")
+ "LDR q1, [%[inptr3]], #16\n"
+ ASM_PREFETCH("[%[inptr3], #176]")
+ "STP q0, q1, [%[outptr]], #32\n"
+ : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3),
+ [outptr] "+r" (outptr)
+ :
+ : "v0", "v1"
+ );
+ }
+
+ if (x>0) {
+ /* Need to duplicate this here, in case we didn't run the main loop. */
+ if ((y + 3) >= ymax) {
+ switch ((y + 3) - ymax) {
+ /* Everything falls through in here */
+ case 2:
+ inptr1 = zerobuff;
+ case 1:
+ inptr2 = zerobuff;
+ case 0:
+ inptr3 = zerobuff;
+ default:
+ break;
+ }
+ }
+
+ /* We have to write out 16 values, copy as many legal values as there are and pad with 0 */
+ auto f = [&outptr, x](const uint8_t *&p) {
+ for (int i=0; i<16; i++) {
+ if (i < x) {
+ *outptr++ = *p++;
+ } else {
+ *outptr++ = 0;
+ }
+ }
+ };
+
+ f(inptr0);
+ f(inptr1);
+ f(inptr2);
+ f(inptr3);
+ }
+ }
+}
+
+#endif // __aarch64__
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_16bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_16bit.hpp
index 0c23cebe63..bdc05473b4 100644
--- a/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_16bit.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_16bit.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,8 +26,7 @@
#ifdef __aarch64__
#include <arm_neon.h>
-#include "../asmlib.hpp"
-
+#include "asmlib.hpp"
template<>
template<typename T>
@@ -76,8 +75,6 @@ void TransformImpl<8, 1, false, 2, 2>::Transform(T *out, const T *in, int ldin,
inptr6 = zerobuff;
case 0:
inptr7 = zerobuff;
- default:
- break;
}
}
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_32bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_32bit.hpp
index 6317424598..bd5125afab 100644
--- a/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_32bit.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_32bit.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,13 +25,12 @@
#ifdef __aarch64__
-#include "../asmlib.hpp"
-
#include <arm_neon.h>
+#include "asmlib.hpp"
template<>
template<typename T>
-void TransformImpl<8, 1, false, 4, 4>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) {
+inline void TransformImpl<8, 1, false, 4, 4>::Transform(T *out, const T *in, int ldin, int y0, int ymax, int k0, int kmax) {
uint32_t *outptr = (uint32_t *)out;
const uint32_t *inptr = (uint32_t *)in;
@@ -92,47 +91,46 @@ void TransformImpl<8, 1, false, 4, 4>::Transform(T *out, const T *in, int ldin,
"ZIP1 v17.4s, v2.4s, v6.4s\n" // q17=B0D0B1D1
"LDP q8, q9, [%[inptr4]], #32\n"
"LDP q10, q11, [%[inptr5]], #32\n"
- ASM_PREFETCH("[%[inptr1], #128]")
"LDP q12, q13, [%[inptr6]], #32\n"
"ZIP1 v18.4s, v8.4s, v12.4s\n"
+ ASM_PREFETCH("[%[inptr1], #128]")
"LDP q14, q15, [%[inptr7]], #32\n"
"ZIP1 v19.4s, v10.4s, v14.4s\n"
- ASM_PREFETCH("[%[inptr2], #128]")
"ZIP1 v20.4s, v16.4s, v17.4s\n" // q20=A0B0C0D0
+ ASM_PREFETCH("[%[inptr2], #128]")
"ZIP1 v21.4s, v18.4s, v19.4s\n"
"ZIP2 v22.4s, v16.4s, v17.4s\n"
"ZIP2 v23.4s, v18.4s, v19.4s\n"
- ASM_PREFETCH("[%[inptr3], #128]")
"ZIP2 v16.4s, v0.4s, v4.4s\n"
+ ASM_PREFETCH("[%[inptr3], #128]")
"ZIP2 v17.4s, v2.4s, v6.4s\n"
"STP q20, q21, [%[outptr]], #32\n" // Write back the first element of each source
"ZIP2 v18.4s, v8.4s, v12.4s\n"
- ASM_PREFETCH("[%[inptr4], #128]")
"ZIP2 v19.4s, v10.4s, v14.4s\n"
"STP q22, q23, [%[outptr]], #32\n" // Write back the second element of each source
"ZIP1 v20.4s, v16.4s, v17.4s\n"
+ ASM_PREFETCH("[%[inptr4], #128]")
"ZIP1 v21.4s, v18.4s, v19.4s\n"
- ASM_PREFETCH("[%[inptr5], #128]")
"ZIP2 v22.4s, v16.4s, v17.4s\n"
"ZIP2 v23.4s, v18.4s, v19.4s\n"
"ZIP1 v16.4s, v1.4s, v5.4s\n"
+ ASM_PREFETCH("[%[inptr5], #128]")
"ZIP1 v17.4s, v3.4s, v7.4s\n"
- ASM_PREFETCH("[%[inptr6], #128]")
"STP q20, q21, [%[outptr]], #32\n" // Third element
"ZIP1 v18.4s, v9.4s, v13.4s\n"
"ZIP1 v19.4s, v11.4s, v15.4s\n"
"STP q22, q23, [%[outptr]], #32\n" // Fourth element
- ASM_PREFETCH("[%[inptr7], #128]")
"ZIP1 v20.4s, v16.4s, v17.4s\n"
"ZIP1 v21.4s, v18.4s, v19.4s\n"
"ZIP2 v22.4s, v16.4s, v17.4s\n"
+ ASM_PREFETCH("[%[inptr6], #128]")
"ZIP2 v23.4s, v18.4s, v19.4s\n"
"ZIP2 v16.4s, v1.4s, v5.4s\n"
@@ -140,6 +138,7 @@ void TransformImpl<8, 1, false, 4, 4>::Transform(T *out, const T *in, int ldin,
"STP q20, q21, [%[outptr]], #32\n" // Fifth element
"ZIP2 v18.4s, v9.4s, v13.4s\n"
+ ASM_PREFETCH("[%[inptr7], #128]")
"ZIP2 v19.4s, v11.4s, v15.4s\n"
"STP q22, q23, [%[outptr]], #32\n" // Sixth element
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_half_to_float.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_half_to_float.hpp
new file mode 100644
index 0000000000..3c9e05223d
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a64_interleave_8way_half_to_float.hpp
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#if defined( __aarch64__) && defined( __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+#include <arm_neon.h>
+#include "asmlib.hpp"
+
+template<>
+template<>
+inline void TransformImpl<8, 1, false, 4, 2>::Transform(float *out, const __fp16 *in, int ldin, int y0, int ymax, int k0, int kmax) {
+ float *outptr = out;
+ const __fp16 *inptr = in;
+
+ __fp16 zerobuff[8];
+
+ for (int y=y0; y<ymax; y+=8) {
+ const __fp16 *inptr0 = inptr + y * ldin + k0;
+ const __fp16 *inptr1 = inptr0 + ldin;
+ const __fp16 *inptr2 = inptr1 + ldin;
+ const __fp16 *inptr3 = inptr2 + ldin;
+ const __fp16 *inptr4 = inptr3 + ldin;
+ const __fp16 *inptr5 = inptr4 + ldin;
+ const __fp16 *inptr6 = inptr5 + ldin;
+ const __fp16 *inptr7 = inptr6 + ldin;
+
+ prefetch_2x(inptr0);
+ prefetch_2x(inptr1);
+ prefetch_2x(inptr2);
+ prefetch_2x(inptr3);
+ prefetch_2x(inptr4);
+ prefetch_2x(inptr5);
+ prefetch_2x(inptr6);
+ prefetch_2x(inptr7);
+
+ int x=(kmax-k0);
+ for (;x>7;x-=8) {
+ /* Cope with ragged cases by copying from a buffer of zeroes instead */
+ if ((y + 7) >= ymax) {
+ switch ((y + 7) - ymax) {
+ /* Everything falls through in here */
+ case 6:
+ inptr1 = zerobuff;
+ case 5:
+ inptr2 = zerobuff;
+ case 4:
+ inptr3 = zerobuff;
+ case 3:
+ inptr4 = zerobuff;
+ case 2:
+ inptr5 = zerobuff;
+ case 1:
+ inptr6 = zerobuff;
+ case 0:
+ inptr7 = zerobuff;
+ default:
+ break;
+ }
+ }
+
+ __asm __volatile (
+ // Load up 8 elements (2 vectors) from each of 8 sources.
+ "LDR q0, [%[inptr0]], #16\n"
+ "LDR q2, [%[inptr1]], #16\n"
+ "FCVTL2 v1.4s, v0.8h\n"
+ "FCVTL v0.4s, v0.4h\n"
+ "LDR q4, [%[inptr2]], #16\n" // q4=C0C1C2C3
+ "FCVTL2 v3.4s, v2.8h\n"
+ "FCVTL v2.4s, v2.4h\n"
+ "FCVTL2 v5.4s, v4.8h\n"
+ "FCVTL v4.4s, v4.4h\n"
+ "ZIP1 v16.4s, v0.4s, v4.4s\n" // q16=A0C0A1C1
+ ASM_PREFETCH("[%[inptr0], #128]")
+ "LDR q6, [%[inptr3]], #16\n" // q6=D0D1D2D3
+ "FCVTL2 v7.4s, v6.8h\n"
+ "FCVTL v6.4s, v6.4h\n"
+ "ZIP1 v17.4s, v2.4s, v6.4s\n" // q17=B0D0B1D1
+ "LDR q8, [%[inptr4]], #16\n"
+ "LDR q10, [%[inptr5]], #16\n"
+ "FCVTL2 v9.4s, v8.8h\n"
+ "FCVTL v8.4s, v8.4h\n"
+ ASM_PREFETCH("[%[inptr1], #128]")
+ "LDR q12, [%[inptr6]], #16\n"
+ "FCVTL2 v11.4s, v10.8h\n"
+ "FCVTL v10.4s, v10.4h\n"
+ "FCVTL2 v13.4s, v12.8h\n"
+ "FCVTL v12.4s, v12.4h\n"
+ "ZIP1 v18.4s, v8.4s, v12.4s\n"
+ "LDR q14, [%[inptr7]], #16\n"
+ "FCVTL2 v15.4s, v14.8h\n"
+ "FCVTL v14.4s, v14.4h\n"
+ "ZIP1 v19.4s, v10.4s, v14.4s\n"
+
+ ASM_PREFETCH("[%[inptr2], #128]")
+ "ZIP1 v20.4s, v16.4s, v17.4s\n" // q20=A0B0C0D0
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+ ASM_PREFETCH("[%[inptr3], #128]")
+
+ "ZIP2 v16.4s, v0.4s, v4.4s\n"
+ "ZIP2 v17.4s, v2.4s, v6.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Write back the first element of each source
+
+ "ZIP2 v18.4s, v8.4s, v12.4s\n"
+ ASM_PREFETCH("[%[inptr4], #128]")
+ "ZIP2 v19.4s, v10.4s, v14.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Write back the second element of each source
+
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ ASM_PREFETCH("[%[inptr5], #128]")
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+
+ "ZIP1 v16.4s, v1.4s, v5.4s\n"
+ "ZIP1 v17.4s, v3.4s, v7.4s\n"
+ ASM_PREFETCH("[%[inptr6], #128]")
+ "STP q20, q21, [%[outptr]], #32\n" // Third element
+
+ "ZIP1 v18.4s, v9.4s, v13.4s\n"
+ "ZIP1 v19.4s, v11.4s, v15.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Fourth element
+ ASM_PREFETCH("[%[inptr7], #128]")
+
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+
+ "ZIP2 v16.4s, v1.4s, v5.4s\n"
+ "ZIP2 v17.4s, v3.4s, v7.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Fifth element
+
+ "ZIP2 v18.4s, v9.4s, v13.4s\n"
+ "ZIP2 v19.4s, v11.4s, v15.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Sixth element
+
+ "ZIP1 v20.4s, v16.4s, v17.4s\n"
+ "ZIP1 v21.4s, v18.4s, v19.4s\n"
+ "STP q20, q21, [%[outptr]], #32\n" // Seventh element
+
+ "ZIP2 v22.4s, v16.4s, v17.4s\n"
+ "ZIP2 v23.4s, v18.4s, v19.4s\n"
+ "STP q22, q23, [%[outptr]], #32\n" // Eighth element
+ : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3),
+ [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7), [outptr] "+r" (outptr)
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12",
+ "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
+ );
+ }
+
+ for (;x>0;x--) {
+ *outptr++ = *inptr0++;
+ *outptr++ = *inptr1++;
+ *outptr++ = *inptr2++;
+ *outptr++ = *inptr3++;
+ *outptr++ = *inptr4++;
+ *outptr++ = *inptr5++;
+ *outptr++ = *inptr6++;
+ *outptr++ = *inptr7++;
+ }
+ }
+}
+
+#endif // __aarch64__
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_12way_16bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_12way_16bit.hpp
new file mode 100644
index 0000000000..6e07064a0c
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_12way_16bit.hpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef __aarch64__
+
+#include "transpose_interleave_common.hpp"
+
+// Generic unblocked transposed 6x32-bit sized specialisation
+template <>
+template <typename T>
+inline void TransformImpl<6, 1, true, 4, 4>::Transform(
+ T* out, const T* const in, const int stride,
+ const int x0, const int xmax, const int k0, const int kmax
+) {
+ // Redirect to a 12 x uint16_t specialisation
+ TransformImpl<12, 1, true, 2, 2>::Transform(
+ reinterpret_cast<uint16_t *>(out),
+ reinterpret_cast<const uint16_t * const>(in),
+ stride*2, x0*2, xmax*2, k0, kmax
+ );
+}
+
+// Generic 12x16-bit sized specialisation
+template <>
+template <typename T>
+inline void TransformImpl<12, 1, true, 2, 2>::Transform(
+ T* out, const T* const in, const int stride,
+ const int x0, const int xmax, const int k0, const int kmax
+) {
+ // Redirect to a uint16_t specialisation
+ Transform(
+ reinterpret_cast<uint16_t *>(out),
+ reinterpret_cast<const uint16_t * const>(in),
+ stride, x0, xmax, k0, kmax
+ );
+}
+
+// Specialised 12 x uint16_t version
+template <>
+inline void TransposeInterleaveCommon<12, uint16_t, uint16_t>::moveblock_1x1(const uint16_t *&in0, uint16_t *out) {
+ __asm volatile (
+ "LDR q0, [%[in0]]\n"
+ "STR q0, [%[out]]\n"
+ "LDR d1, [%[in0], #0x10]\n"
+ "STR d1, [%[out], #0x10]\n"
+ "ADD %x[in0], %x[in0], #0x18\n"
+ ASM_PREFETCH("[%[in0], #192]")
+ : [in0] "+r" (in0),
+ [out] "+r" (out)
+ :
+ : "v0", "v1", "memory"
+ );
+}
+
+template <>
+inline void TransposeInterleaveCommon<12, uint16_t, uint16_t>::moveblock_1x2(const uint16_t *&in0, const uint16_t *&in1, uint16_t *out) {
+ __asm volatile (
+ "LDR q0, [%[in0]]\n"
+ "LDR d1, [%[in0], #0x10]\n"
+ "ADD %x[in0], %x[in0], #0x18\n"
+ ASM_PREFETCH("[%[in0], #192]")
+
+ "LDR x21, [%[in1]]\n"
+ "LDR q2, [%[in1], #0x08]\n"
+ "INS v1.d[1], x21\n"
+ "ADD %x[in1], %x[in1], #0x18\n"
+ "STP q0, q1, [%[out]]\n"
+ "STR q2, [%x[out], #0x20]\n"
+ ASM_PREFETCH("[%[in1], #192]")
+ : [in0] "+r" (in0),
+ [in1] "+r" (in1),
+ [out] "+r" (out)
+ :
+ : "x21", "v0", "v1", "v2", "memory"
+ );
+}
+
+template <>
+inline void TransposeInterleaveCommon<12, uint16_t, uint16_t>::moveblock_1x4(const uint16_t *&in0, const uint16_t *&in1, const uint16_t *&in2, const uint16_t *&in3, uint16_t *out) {
+ __asm __volatile (
+ "LDR q0, [%x[in0]], #0x10\n"
+ "STR q0, [%x[out]]\n"
+ "LDR d1, [%x[in0]], #0x08\n"
+ ASM_PREFETCH("[%[in0], #192]")
+ "STR d1, [%x[out], #0x10]\n"
+
+ "LDR q0, [%x[in1]], #0x10\n"
+ "STR q0, [%x[out], #0x18]\n"
+ "LDR d1, [%x[in1]], #0x08\n"
+ ASM_PREFETCH("[%[in1], #192]")
+ "STR d1, [%x[out], #0x28]\n"
+
+ "LDR q0, [%x[in2]], #0x10\n"
+ "STR q0, [%x[out], #0x30]\n"
+ "LDR d1, [%x[in2]], #0x08\n"
+ ASM_PREFETCH("[%[in2], #192]")
+ "STR d1, [%x[out], #0x40]\n"
+
+ "LDR q0, [%x[in3]], #0x10\n"
+ "STR q0, [%x[out], #0x48]\n"
+ "LDR d1, [%x[in3]], #0x08\n"
+ ASM_PREFETCH("[%[in3], #192]")
+ "STR d1, [%x[out], #0x58]\n"
+ : [in0] "+r" (in0),
+ [in1] "+r" (in1),
+ [in2] "+r" (in2),
+ [in3] "+r" (in3),
+ [out] "+r" (out)
+ :
+ : "v0", "v1", "memory"
+ );
+}
+
+template <>
+template <>
+inline void TransformImpl<12, 1, true, 2, 2>::Transform(
+ uint16_t* out, const uint16_t* const in, const int stride,
+ const int x0, const int xmax, const int k0, const int kmax
+) {
+ TransposeInterleaveCommon<12, uint16_t, uint16_t>::Transform(out, in, stride, x0, xmax, k0, kmax);
+}
+
+#endif // __aarch64__
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_12way_half_to_float.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_12way_half_to_float.hpp
new file mode 100644
index 0000000000..835e4d87aa
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_12way_half_to_float.hpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#if defined( __aarch64__) && defined( __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
+#include "transpose_interleave_common.hpp"
+
+template <>
+inline void TransposeInterleaveCommon<12, __fp16, float>::moveblock_1x1(const __fp16 *&in0, float *out) {
+ __asm __volatile (
+ "LDR q0, [%[in0]], #16\n"
+ "FCVTL2 v1.4s, v0.8h\n"
+ "FCVTL v0.4s, v0.4h\n"
+ "STP q0, q1, [%[out]]\n"
+ ASM_PREFETCH("[%[in0], #192]")
+ "LDR d2, [%[in0]], #8\n"
+ "FCVTL v2.4s, v2.4h\n"
+ "STR q2, [%[out], #32]\n"
+ : [in0] "+r" (in0), [out] "+r" (out)
+ :
+ : "v0", "v1", "v2", "memory"
+ );
+}
+
+template <>
+inline void TransposeInterleaveCommon<12, __fp16, float>::moveblock_1x2(const __fp16 *&in0, const __fp16 *&in1, float *out) {
+ __asm __volatile (
+ "LDR q0, [%[in0]], #16\n"
+ "FCVTL2 v1.4s, v0.8h\n"
+ "FCVTL v0.4s, v0.4h\n"
+ "STP q0, q1, [%[out]]\n"
+ ASM_PREFETCH("[%[in0], #192]")
+ "LDR d2, [%[in0]], #8\n"
+ "FCVTL v2.4s, v2.4h\n"
+ "LDR q3, [%[in1]], #16\n"
+ "FCVTL2 v4.4s, v3.8h\n"
+ "FCVTL v3.4s, v3.4h\n"
+ "STP q2, q3, [%[out], #32]\n"
+ ASM_PREFETCH("[%[in1], #192]")
+ "LDR d5, [%[in1]], #16\n"
+ "FCVTL v5.4s, v5.4h\n"
+ "STP q4, q5, [%[out], #64]\n"
+ : [in0] "+r" (in0), [in1] "+r" (in1), [out] "+r" (out)
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "memory"
+ );
+}
+
+template <>
+inline void TransposeInterleaveCommon<12, __fp16, float>::moveblock_1x4(const __fp16 *&in0, const __fp16 *&in1, const __fp16 *&in2, const __fp16 *&in3, float *out) {
+ __asm __volatile (
+ "LDR q0, [%[in0]], #16\n"
+ "FCVTL2 v1.4s, v0.8h\n"
+ "FCVTL v0.4s, v0.4h\n"
+ "STP q0, q1, [%[out]]\n"
+ "LDR d2, [%[in0]], #8\n"
+ ASM_PREFETCH("[%[in0], #192]")
+ "FCVTL v2.4s, v2.4h\n"
+ "LDR q3, [%[in1]], #16\n"
+ "FCVTL2 v4.4s, v3.8h\n"
+ "FCVTL v3.4s, v3.4h\n"
+ "STP q2, q3, [%[out], #32]\n"
+ "LDR d5, [%[in1]], #8\n"
+ "FCVTL v5.4s, v5.4h\n"
+ ASM_PREFETCH("[%[in1], #192]")
+ "STP q4, q5, [%[out], #64]\n"
+ "LDR q6, [%[in2]], #16\n"
+ "FCVTL2 v7.4s, v6.8h\n"
+ "FCVTL v6.4s, v6.4h\n"
+ "STP q6, q7, [%[out], #96]\n"
+ "LDR d8, [%[in2]], #8\n"
+ "FCVTL v8.4s, v8.4h\n"
+ ASM_PREFETCH("[%[in2], #192]")
+ "LDR q9, [%[in3]], #16\n"
+ "FCVTL2 v10.4s, v9.8h\n"
+ "FCVTL v9.4s, v9.4h\n"
+ "STP q8, q9, [%[out], #128]\n"
+ "LDR d11, [%[in3]], #8\n"
+ "FCVTL v11.4s, v11.4h\n"
+ "STP q10, q11, [%[out], #160]\n"
+ ASM_PREFETCH("[%[in3], #192]")
+
+ : [in0] "+r" (in0), [in1] "+r" (in1), [in2] "+r" (in2), [in3] "+r" (in3), [out] "+r" (out)
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "memory"
+ );
+}
+
+template <>
+template <>
+inline void TransformImpl<12, 1, true, 4, 2>::Transform(
+ float* out, const __fp16* const in, const int stride,
+ const int x0, const int xmax, const int k0, const int kmax
+) {
+ TransposeInterleaveCommon<12, __fp16, float>::Transform(out, in, stride, x0, xmax, k0, kmax);
+}
+
+#endif // __aarch64__
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_24way_16bit.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_24way_16bit.hpp
index e440e3288f..b6565baa23 100644
--- a/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_24way_16bit.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/a64_transpose_interleave_24way_16bit.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/list.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/list.hpp
index 8a2213f7fe..8ad5b857fb 100644
--- a/arm_compute/core/NEON/kernels/assembly/transforms/list.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/list.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,10 +23,11 @@
*/
#include "a32_interleave_6way_32bit.hpp"
#include "a32_transpose_interleave_8way_32bit.hpp"
+#include "a64_block16_interleave4_8bit.hpp"
#include "a64_interleave_8way_16bit.hpp"
#include "a64_interleave_8way_32bit.hpp"
-//#include "a64_interleave_8way_half_to_float.hpp"
-//#include "a64_transpose_interleave_12way_16bit.hpp"
-//#include "a64_transpose_interleave_12way_half_to_float.hpp"
+#include "a64_interleave_8way_half_to_float.hpp"
+#include "a64_transpose_interleave_12way_16bit.hpp"
+#include "a64_transpose_interleave_12way_half_to_float.hpp"
#include "a64_transpose_interleave_24way_16bit.hpp"
#include "transpose_interleave_common.hpp"
diff --git a/arm_compute/core/NEON/kernels/assembly/transforms/transpose_interleave_common.hpp b/arm_compute/core/NEON/kernels/assembly/transforms/transpose_interleave_common.hpp
index 882da9c831..231b3f181e 100644
--- a/arm_compute/core/NEON/kernels/assembly/transforms/transpose_interleave_common.hpp
+++ b/arm_compute/core/NEON/kernels/assembly/transforms/transpose_interleave_common.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*