aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_u8_to_u16.hpp
diff options
context:
space:
mode:
authorDavid Mansell <David.Mansell@arm.com>2020-07-08 13:28:45 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-07-23 11:55:45 +0000
commit318c9f40770b2d1c06f8c0fe3f7929812503733e (patch)
tree58926bc05cd825d885cf9af9553b1309068f36b6 /src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_u8_to_u16.hpp
parent40aad9bbbae5308d7302e61e1372328c9b5daf99 (diff)
downloadComputeLibrary-318c9f40770b2d1c06f8c0fe3f7929812503733e.tar.gz
COMPMID-3578: Update FP32/int8 kernel selection.
Upgrade the current 'is_preferred()' mechanism with a new framework, where kernels instead provide an estimated cycle count figure. Compatibility with old mechanism is achieved via a wrapper which replaces a "true" result with an estimate of 0, and a "false" result with UINT64_MAX. This mechanism is then used to select between 'interleaved' and 'hybrid' FP32 NEON kernels. This uses a simple system based on counting MACs performed and bytes of data transferred (for rearrange/merge operations) and dividing by fixed performance figures, which are provided for A53, A55, A73 and 'default' figures (based on A76). Separately, a new route for performing int8 GEMMs by using the int16 kernel is provided. This performs significantly (for uint8) or slightly (for int8) better on A53 than the existing int8 route. Optimized 8-to-16 bit transforms are also included. Change-Id: I53b2e59eb9368793c78c2081e17d2445361bcc47 Signed-off-by: David Mansell <David.Mansell@arm.com> Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/250120 Tested-by: bsgcomp <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: bsgcomp <bsgcomp@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3609 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_u8_to_u16.hpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_u8_to_u16.hpp224
1 files changed, 224 insertions, 0 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_u8_to_u16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_u8_to_u16.hpp
new file mode 100644
index 0000000000..a3a269c9cd
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_interleave_8way_u8_to_u16.hpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2017-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#if defined(__aarch64__) && defined(__ARM_FP16_ARGS)
+
+#include <arm_neon.h>
+#include <cstdint>
+
+#include "../asmlib.hpp"
+
+template<>
+template<>
+inline void TransformImpl<8, 1, false, 2, 1, false>::Transform(uint16_t *out, const uint8_t *in, int ldin, int y0, int ymax, int k0, int kmax) {
+ uint16_t *outptr = out;
+ const uint8_t *inptr = in;
+ bool first = true;
+
+ uint8_t zerobuff[32] = { 0 }; // 16 for asm loop plus up to 15 for overflow loop
+
+ for (int y=y0; y<ymax; y+=8) {
+ const uint8_t *inptr0 = inptr + y * ldin + k0;
+ const uint8_t *inptr1 = inptr0 + ldin;
+ const uint8_t *inptr2 = inptr1 + ldin;
+ const uint8_t *inptr3 = inptr2 + ldin;
+ const uint8_t *inptr4 = inptr3 + ldin;
+ const uint8_t *inptr5 = inptr4 + ldin;
+ const uint8_t *inptr6 = inptr5 + ldin;
+ const uint8_t *inptr7 = inptr6 + ldin;
+
+ prefetch_2x(inptr0);
+ prefetch_2x(inptr1);
+ prefetch_2x(inptr2);
+ prefetch_2x(inptr3);
+ prefetch_2x(inptr4);
+ prefetch_2x(inptr5);
+ prefetch_2x(inptr6);
+ prefetch_2x(inptr7);
+
+ int x=(kmax-k0);
+ for (;(x>15) || first;x-=16) {
+ /* Cope with ragged cases by copying from a buffer of zeroes instead */
+ /* 'first' forces this to always run at least once, needed if the total size is <=7. */
+ if ((y + 7) >= ymax) {
+ switch ((y + 7) - ymax) {
+ case 6:
+ inptr1 = zerobuff;
+ // fall through
+ case 5:
+ inptr2 = zerobuff;
+ // fall through
+ case 4:
+ inptr3 = zerobuff;
+ // fall through
+ case 3:
+ inptr4 = zerobuff;
+ // fall through
+ case 2:
+ inptr5 = zerobuff;
+ // fall through
+ case 1:
+ inptr6 = zerobuff;
+ // fall through
+ case 0:
+ inptr7 = zerobuff;
+ break;
+
+ default:
+ UNREACHABLE("Impossible.");
+ }
+ }
+
+ if (first) {
+ if (x<=15) {
+ break;
+ }
+
+ first = false;
+ }
+
+ __asm __volatile (
+ // Load up 16 elements (1 source vector, 2 destination vectors) from each of 8 sources.
+ "LDR q0, [%[inptr0]], #16\n"
+ "LDR q2, [%[inptr1]], #16\n"
+ "USHLL2 v1.8h, v0.16b, #0\n"
+ "USHLL v0.8h, v0.8b, #0\n"
+ "LDR q4, [%[inptr2]], #16\n" // q4=C0C1C2C3
+ "USHLL2 v3.8h, v2.16b, #0\n"
+ "USHLL v2.8h, v2.8b, #0\n"
+ "USHLL2 v5.8h, v4.16b, #0\n"
+ "USHLL v4.8h, v4.8b, #0\n"
+ "ZIP1 v16.8h, v0.8h, v4.8h\n" // q16=A0C0A1C1
+ ASM_PREFETCH("[%[inptr0], #128]")
+ "LDR q6, [%[inptr3]], #16\n" // q6=D0D1D2D3
+ "USHLL2 v7.8h, v6.16b, #0\n"
+ "USHLL v6.8h, v6.8b, #0\n"
+ "ZIP1 v17.8h, v2.8h, v6.8h\n" // q17=B0D0B1D1
+ "LDR q8, [%[inptr4]], #16\n"
+ "LDR q10, [%[inptr5]], #16\n"
+ "USHLL2 v9.8h, v8.16b, #0\n"
+ "USHLL v8.8h, v8.8b, #0\n"
+ ASM_PREFETCH("[%[inptr1], #128]")
+ "LDR q12, [%[inptr6]], #16\n"
+ "USHLL2 v11.8h, v10.16b, #0\n"
+ "USHLL v10.8h, v10.8b, #0\n"
+ "USHLL2 v13.8h, v12.16b, #0\n"
+ "USHLL v12.8h, v12.8b, #0\n"
+ "ZIP1 v18.8h, v8.8h, v12.8h\n"
+ "LDR q14, [%[inptr7]], #16\n"
+ "USHLL2 v15.8h, v14.16b, #0\n"
+ "USHLL v14.8h, v14.8b, #0\n"
+ "ZIP1 v19.8h, v10.8h, v14.8h\n"
+
+ ASM_PREFETCH("[%[inptr2], #128]")
+ "ZIP1 v20.8h, v16.8h, v17.8h\n" // q20=A0B0C0D0A1B1C1D1
+ "ZIP1 v21.8h, v18.8h, v19.8h\n" // q21=E0F0G0H0E1F1G1H1
+ "ZIP2 v22.8h, v16.8h, v17.8h\n" // q22=A2B2C2D2A3B3C3D3
+ "ZIP2 v23.8h, v18.8h, v19.8h\n" // q23=E2F2G2H1E3F3G3H3
+ ASM_PREFETCH("[%[inptr3], #128]")
+
+ "ZIP2 v16.8h, v0.8h, v4.8h\n"
+ "ZIP2 v17.8h, v2.8h, v6.8h\n"
+ "TRN1 v24.2d, v20.2d, v21.2d\n"
+ "TRN2 v25.2d, v20.2d, v21.2d\n"
+
+ "ZIP2 v18.8h, v8.8h, v12.8h\n"
+ ASM_PREFETCH("[%[inptr4], #128]")
+ "ZIP2 v19.8h, v10.8h, v14.8h\n"
+ "STP q24, q25, [%[outptr]], #32\n" // Write back the first element of each source
+ "TRN1 v24.2d, v22.2d, v23.2d\n"
+ "TRN2 v25.2d, v22.2d, v23.2d\n"
+
+ "ZIP1 v20.8h, v16.8h, v17.8h\n"
+ "ZIP1 v21.8h, v18.8h, v19.8h\n"
+ ASM_PREFETCH("[%[inptr5], #128]")
+ "ZIP2 v22.8h, v16.8h, v17.8h\n"
+ "ZIP2 v23.8h, v18.8h, v19.8h\n"
+ "STP q24, q25, [%[outptr]], #32\n" // Write back the second element of each source
+
+ "ZIP1 v16.8h, v1.8h, v5.8h\n"
+ "ZIP1 v17.8h, v3.8h, v7.8h\n"
+ ASM_PREFETCH("[%[inptr6], #128]")
+ "TRN1 v24.2d, v20.2d, v21.2d\n"
+ "TRN2 v25.2d, v20.2d, v21.2d\n"
+
+ "ZIP1 v18.8h, v9.8h, v13.8h\n"
+ "ZIP1 v19.8h, v11.8h, v15.8h\n"
+ "STP q24, q25, [%[outptr]], #32\n" // Third element
+ "TRN1 v24.2d, v22.2d, v23.2d\n"
+ "TRN2 v25.2d, v22.2d, v23.2d\n"
+ ASM_PREFETCH("[%[inptr7], #128]")
+
+ "ZIP1 v20.8h, v16.8h, v17.8h\n"
+ "ZIP1 v21.8h, v18.8h, v19.8h\n"
+ "STP q24, q25, [%[outptr]], #32\n" // Fourth element
+ "ZIP2 v22.8h, v16.8h, v17.8h\n"
+ "ZIP2 v23.8h, v18.8h, v19.8h\n"
+
+ "ZIP2 v16.8h, v1.8h, v5.8h\n"
+ "ZIP2 v17.8h, v3.8h, v7.8h\n"
+ "TRN1 v24.2d, v20.2d, v21.2d\n"
+ "TRN2 v25.2d, v20.2d, v21.2d\n"
+
+ "ZIP2 v18.8h, v9.8h, v13.8h\n"
+ "ZIP2 v19.8h, v11.8h, v15.8h\n"
+ "STP q24, q25, [%[outptr]], #32\n" // Fifth element
+ "TRN1 v24.2d, v22.2d, v23.2d\n"
+ "TRN2 v25.2d, v22.2d, v23.2d\n"
+
+ "ZIP1 v20.8h, v16.8h, v17.8h\n"
+ "ZIP1 v21.8h, v18.8h, v19.8h\n"
+ "STP q24, q25, [%[outptr]], #32\n" // Sixth element
+ "TRN1 v24.2d, v20.2d, v21.2d\n"
+ "TRN2 v25.2d, v20.2d, v21.2d\n"
+
+ "ZIP2 v22.8h, v16.8h, v17.8h\n"
+ "ZIP2 v23.8h, v18.8h, v19.8h\n"
+ "STP q24, q25, [%[outptr]], #32\n" // Seventh element
+ "TRN1 v24.2d, v22.2d, v23.2d\n"
+ "TRN2 v25.2d, v22.2d, v23.2d\n"
+ "STP q24, q25, [%[outptr]], #32\n" // Eighth element
+ : [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3),
+ [inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [inptr6] "+r" (inptr6), [inptr7] "+r" (inptr7), [outptr] "+r" (outptr)
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12",
+ "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "memory"
+ );
+ }
+
+ for (;x>0;x--) {
+ *outptr++ = *inptr0++;
+ *outptr++ = *inptr1++;
+ *outptr++ = *inptr2++;
+ *outptr++ = *inptr3++;
+ *outptr++ = *inptr4++;
+ *outptr++ = *inptr5++;
+ *outptr++ = *inptr6++;
+ *outptr++ = *inptr7++;
+ }
+ }
+}
+
+#endif // __aarch64__ && __ARM_FP16_ARGS