aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp
blob: 5667820865d2a27f100e1fd6a995d46d336b8ef7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
/*
 * Copyright (c) 2021, 2023 Arm Limited.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#pragma once

#if defined(__aarch64__)

namespace {

void a64_transpose_interleave_4_1x4(uint8_t *out, const uint8_t *in, size_t width, size_t in_stride, size_t height)
{
    uint8_t *pad_row = reinterpret_cast<uint8_t *>(alloca(width * sizeof(uint8_t)));

    if (height % 4) {
        memset(pad_row, 0, width * sizeof(uint8_t));
    }

    size_t out_stride = 4 * roundup<size_t>(height, 4) * sizeof(uint8_t);

    __asm__ __volatile__(
      "cmp %x[height], #0x10\n"
      "blt 8f\n"
      "1:"  // Main row loop: Head
      "mov x17, %x[in]\n"
      "add x16, x17, %x[in_stride]\n"
      "add x15, x16, %x[in_stride]\n"
      "add x14, x15, %x[in_stride]\n"
      "add x13, x14, %x[in_stride]\n"
      "add x12, x13, %x[in_stride]\n"
      "add x11, x12, %x[in_stride]\n"
      "add x10, x11, %x[in_stride]\n"
      "add x9, x10, %x[in_stride]\n"
      "add x28, x9, %x[in_stride]\n"
      "add x27, x28, %x[in_stride]\n"
      "add x26, x27, %x[in_stride]\n"
      "add x25, x26, %x[in_stride]\n"
      "mov x24, %x[width]\n"
      "add x23, x25, %x[in_stride]\n"
      "add x22, x23, %x[in_stride]\n"
      "add x20, x22, %x[in_stride]\n"
      "cmp x24, #0x10\n"
      "add %x[in], x20, %x[in_stride]\n"
      "mov x21, %x[out]\n"
      "sub %x[height], %x[height], #0x10\n"
      "blt 3f\n"
      "2:"  // Main row loop: Unroll column loop
      "ldr q21, [x17], #0x10\n"
      "ldr q20, [x16], #0x10\n"
      "sub x24, x24, #0x10\n"
      "cmp x24, #0x10\n"
      "ldr q17, [x15], #0x10\n"
      "ldr q16, [x14], #0x10\n"
      "zip1 v3.16b, v21.16b, v17.16b\n"
      "zip1 v2.16b, v20.16b, v16.16b\n"
      "ldr q19, [x13], #0x10\n"
      "ldr q18, [x12], #0x10\n"
      "zip2 v1.16b, v21.16b, v17.16b\n"
      "zip2 v0.16b, v20.16b, v16.16b\n"
      "ldr q17, [x11], #0x10\n"
      "ldr q16, [x10], #0x10\n"
      "zip1 v31.16b, v19.16b, v17.16b\n"
      "zip1 v30.16b, v18.16b, v16.16b\n"
      "ldr q21, [x9], #0x10\n"
      "ldr q20, [x28], #0x10\n"
      "zip2 v29.16b, v19.16b, v17.16b\n"
      "zip2 v28.16b, v18.16b, v16.16b\n"
      "ldr q17, [x27], #0x10\n"
      "ldr q16, [x26], #0x10\n"
      "zip1 v23.16b, v21.16b, v17.16b\n"
      "zip1 v22.16b, v20.16b, v16.16b\n"
      "ldr q19, [x25], #0x10\n"
      "ldr q18, [x23], #0x10\n"
      "zip2 v27.16b, v21.16b, v17.16b\n"
      "zip2 v26.16b, v20.16b, v16.16b\n"
      "ldr q17, [x22], #0x10\n"
      "ldr q16, [x20], #0x10\n"
      "zip1 v21.16b, v19.16b, v17.16b\n"
      "zip1 v20.16b, v18.16b, v16.16b\n"
      "zip2 v25.16b, v19.16b, v17.16b\n"
      "zip2 v24.16b, v18.16b, v16.16b\n"
      "zip1 v16.16b, v3.16b, v2.16b\n"
      "zip1 v18.16b, v31.16b, v30.16b\n"
      "str q16, [x21, #0x0]\n"
      "zip1 v17.16b, v23.16b, v22.16b\n"
      "zip1 v16.16b, v21.16b, v20.16b\n"
      "str q18, [x21, #0x10]\n"
      "str q17, [x21, #0x20]\n"
      "zip2 v19.16b, v3.16b, v2.16b\n"
      "zip2 v18.16b, v31.16b, v30.16b\n"
      "str q16, [x21, #0x30]\n"
      "add x21, x21, %x[out_stride]\n"
      "zip2 v17.16b, v23.16b, v22.16b\n"
      "zip2 v16.16b, v21.16b, v20.16b\n"
      "str q19, [x21, #0x0]\n"
      "zip1 v23.16b, v1.16b, v0.16b\n"
      "zip1 v22.16b, v29.16b, v28.16b\n"
      "str q18, [x21, #0x10]\n"
      "zip1 v21.16b, v27.16b, v26.16b\n"
      "zip1 v20.16b, v25.16b, v24.16b\n"
      "str q17, [x21, #0x20]\n"
      "zip2 v19.16b, v1.16b, v0.16b\n"
      "zip2 v18.16b, v29.16b, v28.16b\n"
      "str q16, [x21, #0x30]\n"
      "add x21, x21, %x[out_stride]\n"
      "zip2 v17.16b, v27.16b, v26.16b\n"
      "zip2 v16.16b, v25.16b, v24.16b\n"
      "str q23, [x21, #0x0]\n"
      "str q22, [x21, #0x10]\n"
      "str q21, [x21, #0x20]\n"
      "str q20, [x21, #0x30]\n"
      "add x21, x21, %x[out_stride]\n"
      "str q19, [x21, #0x0]\n"
      "str q18, [x21, #0x10]\n"
      "str q17, [x21, #0x20]\n"
      "str q16, [x21, #0x30]\n"
      "add x21, x21, %x[out_stride]\n"
      "bge 2b\n"
      "3:"  // Main row loop: Unroll column loop skip
      "cmp x24, #0x4\n"
      "blt 5f\n"
      "4:"  // Main row loop: Column loop
      "ldr s19, [x17], #0x4\n"
      "ldr s18, [x16], #0x4\n"
      "sub x24, x24, #0x4\n"
      "cmp x24, #0x4\n"
      "ldr s17, [x15], #0x4\n"
      "ldr s16, [x14], #0x4\n"
      "zip1 v17.16b, v19.16b, v17.16b\n"
      "zip1 v16.16b, v18.16b, v16.16b\n"
      "ldr s19, [x13], #0x4\n"
      "ldr s18, [x12], #0x4\n"
      "zip1 v22.16b, v17.16b, v16.16b\n"
      "ldr s17, [x11], #0x4\n"
      "ldr s16, [x10], #0x4\n"
      "zip1 v17.16b, v19.16b, v17.16b\n"
      "zip1 v16.16b, v18.16b, v16.16b\n"
      "ldr s19, [x9], #0x4\n"
      "ldr s18, [x28], #0x4\n"
      "zip1 v21.16b, v17.16b, v16.16b\n"
      "ldr s17, [x27], #0x4\n"
      "ldr s16, [x26], #0x4\n"
      "zip1 v17.16b, v19.16b, v17.16b\n"
      "zip1 v16.16b, v18.16b, v16.16b\n"
      "ldr s20, [x25], #0x4\n"
      "ldr s19, [x23], #0x4\n"
      "zip1 v18.16b, v17.16b, v16.16b\n"
      "ldr s17, [x22], #0x4\n"
      "ldr s16, [x20], #0x4\n"
      "zip1 v17.16b, v20.16b, v17.16b\n"
      "zip1 v16.16b, v19.16b, v16.16b\n"
      "zip1 v16.16b, v17.16b, v16.16b\n"
      "str q22, [x21, #0x0]\n"
      "str q21, [x21, #0x10]\n"
      "str q18, [x21, #0x20]\n"
      "str q16, [x21, #0x30]\n"
      "add x21, x21, %x[out_stride]\n"
      "bge 4b\n"
      "5:"  // Main row loop: Column loop skip
      "cmp x24, #0x1\n"
      "blt 7f\n"
      "6:"  // Main row loop: width 1 loop: loop
      "ldr b19, [x17], #0x1\n"
      "ldr b18, [x16], #0x1\n"
      "sub x24, x24, #0x1\n"
      "cmp x24, #0x1\n"
      "ldr b17, [x15], #0x1\n"
      "ldr b16, [x14], #0x1\n"
      "zip1 v17.16b, v19.16b, v17.16b\n"
      "zip1 v16.16b, v18.16b, v16.16b\n"
      "ldr b19, [x13], #0x1\n"
      "ldr b18, [x12], #0x1\n"
      "zip1 v22.16b, v17.16b, v16.16b\n"
      "ldr b17, [x11], #0x1\n"
      "ldr b16, [x10], #0x1\n"
      "zip1 v17.16b, v19.16b, v17.16b\n"
      "zip1 v16.16b, v18.16b, v16.16b\n"
      "ldr b19, [x9], #0x1\n"
      "ldr b18, [x28], #0x1\n"
      "zip1 v21.16b, v17.16b, v16.16b\n"
      "ldr b17, [x27], #0x1\n"
      "ldr b16, [x26], #0x1\n"
      "zip1 v17.16b, v19.16b, v17.16b\n"
      "zip1 v16.16b, v18.16b, v16.16b\n"
      "ldr b20, [x25], #0x1\n"
      "ldr b19, [x23], #0x1\n"
      "zip1 v18.16b, v17.16b, v16.16b\n"
      "ldr b17, [x22], #0x1\n"
      "ldr b16, [x20], #0x1\n"
      "zip1 v17.16b, v20.16b, v17.16b\n"
      "zip1 v16.16b, v19.16b, v16.16b\n"
      "str s22, [x21, #0x0]\n"
      "zip1 v16.16b, v17.16b, v16.16b\n"
      "str s21, [x21, #0x10]\n"
      "str s18, [x21, #0x20]\n"
      "str s16, [x21, #0x30]\n"
      "add x21, x21, #0x4\n"
      "bge 6b\n"
      "7:"  // Main row loop: width 1 loop: skip
      "cmp %x[height], #0x10\n"
      "add %x[out], %x[out], #0x40\n"
      "bge 1b\n"
      "cbz %x[height], 16f\n"
      "8:"  // Main loop skip
      "9:"  // Tail row loop: Head
      "mov x17, %x[in]\n"
      "add x16, x17, %x[in_stride]\n"
      "add x15, x16, %x[in_stride]\n"
      "mov x20, %x[width]\n"
      "add x14, x15, %x[in_stride]\n"
      "cmp %x[height], #0x3\n"
      "add %x[in], x14, %x[in_stride]\n"
      "csel x14, x14, %x[pad_row], GT\n"
      "csel x15, x15, %x[pad_row], GE\n"
      "cmp %x[height], #0x1\n"
      "csel x16, x16, %x[pad_row], GT\n"
      "cmp x20, #0x10\n"
      "mov x21, %x[out]\n"
      "sub %x[height], %x[height], #0x4\n"
      "blt 11f\n"
      "10:"  // Tail row loop: Unroll column loop
      "ldr q19, [x17], #0x10\n"
      "ldr q21, [x16], #0x10\n"
      "sub x20, x20, #0x10\n"
      "cmp x20, #0x10\n"
      "ldr q18, [x15], #0x10\n"
      "ldr q16, [x14], #0x10\n"
      "zip1 v20.16b, v19.16b, v18.16b\n"
      "zip1 v17.16b, v21.16b, v16.16b\n"
      "zip2 v19.16b, v19.16b, v18.16b\n"
      "zip2 v18.16b, v21.16b, v16.16b\n"
      "zip1 v16.16b, v20.16b, v17.16b\n"
      "str q16, [x21, #0x0]\n"
      "add x21, x21, %x[out_stride]\n"
      "zip2 v16.16b, v20.16b, v17.16b\n"
      "str q16, [x21, #0x0]\n"
      "add x21, x21, %x[out_stride]\n"
      "zip1 v17.16b, v19.16b, v18.16b\n"
      "zip2 v16.16b, v19.16b, v18.16b\n"
      "str q17, [x21, #0x0]\n"
      "add x21, x21, %x[out_stride]\n"
      "str q16, [x21, #0x0]\n"
      "add x21, x21, %x[out_stride]\n"
      "bge 10b\n"
      "11:"  // Tail row loop: Unroll column loop skip
      "cmp x20, #0x4\n"
      "blt 13f\n"
      "12:"  // Tail row loop: Column loop
      "ldr s19, [x17], #0x4\n"
      "ldr s18, [x16], #0x4\n"
      "sub x20, x20, #0x4\n"
      "cmp x20, #0x4\n"
      "ldr s17, [x15], #0x4\n"
      "ldr s16, [x14], #0x4\n"
      "zip1 v17.16b, v19.16b, v17.16b\n"
      "zip1 v16.16b, v18.16b, v16.16b\n"
      "zip1 v16.16b, v17.16b, v16.16b\n"
      "str q16, [x21, #0x0]\n"
      "add x21, x21, %x[out_stride]\n"
      "bge 12b\n"
      "13:"  // Tail row loop: Column loop skip
      "cmp x20, #0x1\n"
      "blt 15f\n"
      "14:"  // Tail row loop: width 1 loop: loop
      "ldr b19, [x17], #0x1\n"
      "ldr b18, [x16], #0x1\n"
      "sub x20, x20, #0x1\n"
      "cmp x20, #0x1\n"
      "ldr b17, [x15], #0x1\n"
      "ldr b16, [x14], #0x1\n"
      "zip1 v17.16b, v19.16b, v17.16b\n"
      "zip1 v16.16b, v18.16b, v16.16b\n"
      "zip1 v16.16b, v17.16b, v16.16b\n"
      "str s16, [x21, #0x0]\n"
      "add x21, x21, #0x4\n"
      "bge 14b\n"
      "15:"  // Tail row loop: width 1 loop: skip
      "cmp %x[height], #0x1\n"
      "add %x[out], %x[out], #0x10\n"
      "bge 9b\n"
      "16:"  // Done
      : [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
      : [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
      : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
    );
}

} // anonymous namespace

template<>
void Transform<4, 4, true, VLType::None>(
    uint8_t *out, const uint8_t *in, int stride, int x0, int xmax, int k0, int kmax)
{
    a64_transpose_interleave_4_1x4(
        reinterpret_cast<uint8_t *>(out),
        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
        (xmax-x0) * sizeof(uint8_t) / 1,
        stride * sizeof(uint8_t),
        (kmax-k0)
    );
}

template<>
void Transform<4, 4, true, VLType::None>(
    int8_t *out, const int8_t *in, int stride, int x0, int xmax, int k0, int kmax)
{
    a64_transpose_interleave_4_1x4(
        reinterpret_cast<uint8_t *>(out),
        reinterpret_cast<const uint8_t *>(in + k0 * stride + x0),
        (xmax-x0) * sizeof(int8_t) / 1,
        stride * sizeof(int8_t),
        (kmax-k0)
    );
}


#endif  // defined(__aarch64__)