aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
blob: 29f5e6b37699eb5d23d96dc8d7e0fa9b50b8164c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
/*
 * Copyright (c) 2020-2022 Arm Limited.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to
 * deal in the Software without restriction, including without limitation the
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
 * sell copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Window.h"
#include "src/core/NEON/NEAsymm.h"
#include "src/core/NEON/NEMath.h"
#include "src/core/NEON/wrapper/wrapper.h"

#include <arm_neon.h>
#include <cmath>
#include <cstddef>
#include <cstdint>

namespace arm_compute
{
namespace cpu
{
namespace
{
#ifdef __aarch64__

void substitute_bytes_neon(
    const uint8_t        *table,
    size_t                num_strings,
    size_t                string_length,
    const uint8_t *const *input,
    uint8_t *const       *output)
{
    __asm__ __volatile__(
        "ldr q16, [%x[table], #0x0]\n"
        "ldr q17, [%x[table], #0x10]\n"
        "mov x22, #0x0\n"
        "ldr q18, [%x[table], #0x20]\n"
        "ldr q19, [%x[table], #0x30]\n"
        "ldr q20, [%x[table], #0x40]\n"
        "ldr q21, [%x[table], #0x50]\n"
        "ldr q22, [%x[table], #0x60]\n"
        "ldr q23, [%x[table], #0x70]\n"
        "ldr q24, [%x[table], #0x80]\n"
        "ldr q25, [%x[table], #0x90]\n"
        "ldr q26, [%x[table], #0xa0]\n"
        "ldr q27, [%x[table], #0xb0]\n"
        "ldr q28, [%x[table], #0xc0]\n"
        "ldr q29, [%x[table], #0xd0]\n"
        "ldr q30, [%x[table], #0xe0]\n"
        "ldr q31, [%x[table], #0xf0]\n"
        "1:" // string loop
        "ldr x21, [%x[input], x22, LSL #0x3]\n"
        "ldr x20, [%x[output], x22, LSL #0x3]\n"
        "movi v12.16b, #0x40\n"
        "movi v11.16b, #0x80\n"
        "movi v10.16b, #0xc0\n"
        "mov x19, %x[string_length]\n"
        "2:" // 4 rounds: width loop
        "cmp x19, #0x30\n"
        "bge 27f\n"
        "tbz x19, #5, 10f\n"
        "ld1 { v9.16b }, [x21], #0x10\n"
        "ld1 { v13.16b }, [x21], #0x10\n"
        "tbz x19, #3, 6f\n"
        "ldr d14, [x21], #0x8\n"
        "tbz x19, #2, 4f\n"
        "ld1 { v14.s }[2], [x21], #0x4\n"
        "tbz x19, #1, 3f\n"
        "ld1 { v14.h }[6], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v14.b }[14], [x21]\n"
        "b 26f\n"
        "3:" // 4 rounds: Partial load: partial_1_44
        "tbz x19, #0, 26f\n"
        "ld1 { v14.b }[12], [x21]\n"
        "b 26f\n"
        "4:" // 4 rounds: Partial load: partial_2_40
        "tbz x19, #1, 5f\n"
        "ld1 { v14.h }[4], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v14.b }[10], [x21]\n"
        "b 26f\n"
        "5:" // 4 rounds: Partial load: partial_1_40
        "tbz x19, #0, 26f\n"
        "ld1 { v14.b }[8], [x21]\n"
        "b 26f\n"
        "6:" // 4 rounds: Partial load: partial_4_32
        "tbz x19, #2, 8f\n"
        "ldr s14, [x21], #0x4\n"
        "tbz x19, #1, 7f\n"
        "ld1 { v14.h }[2], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v14.b }[6], [x21]\n"
        "b 26f\n"
        "7:" // 4 rounds: Partial load: partial_1_36
        "tbz x19, #0, 26f\n"
        "ld1 { v14.b }[4], [x21]\n"
        "b 26f\n"
        "8:" // 4 rounds: Partial load: partial_2_32
        "tbz x19, #1, 9f\n"
        "ldr h14, [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v14.b }[2], [x21]\n"
        "b 26f\n"
        "9:" // 4 rounds: Partial load: partial_1_32
        "tbz x19, #0, 26f\n"
        "ldr b14, [x21, #0x0]\n"
        "b 26f\n"
        "10:" // 4 rounds: Partial load: partial_16_0
        "tbz x19, #4, 18f\n"
        "ld1 { v9.16b }, [x21], #0x10\n"
        "tbz x19, #3, 14f\n"
        "ldr d13, [x21], #0x8\n"
        "tbz x19, #2, 12f\n"
        "ld1 { v13.s }[2], [x21], #0x4\n"
        "tbz x19, #1, 11f\n"
        "ld1 { v13.h }[6], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v13.b }[14], [x21]\n"
        "b 26f\n"
        "11:" // 4 rounds: Partial load: partial_1_28
        "tbz x19, #0, 26f\n"
        "ld1 { v13.b }[12], [x21]\n"
        "b 26f\n"
        "12:" // 4 rounds: Partial load: partial_2_24
        "tbz x19, #1, 13f\n"
        "ld1 { v13.h }[4], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v13.b }[10], [x21]\n"
        "b 26f\n"
        "13:" // 4 rounds: Partial load: partial_1_24
        "tbz x19, #0, 26f\n"
        "ld1 { v13.b }[8], [x21]\n"
        "b 26f\n"
        "14:" // 4 rounds: Partial load: partial_4_16
        "tbz x19, #2, 16f\n"
        "ldr s13, [x21], #0x4\n"
        "tbz x19, #1, 15f\n"
        "ld1 { v13.h }[2], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v13.b }[6], [x21]\n"
        "b 26f\n"
        "15:" // 4 rounds: Partial load: partial_1_20
        "tbz x19, #0, 26f\n"
        "ld1 { v13.b }[4], [x21]\n"
        "b 26f\n"
        "16:" // 4 rounds: Partial load: partial_2_16
        "tbz x19, #1, 17f\n"
        "ldr h13, [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v13.b }[2], [x21]\n"
        "b 26f\n"
        "17:" // 4 rounds: Partial load: partial_1_16
        "tbz x19, #0, 26f\n"
        "ldr b13, [x21, #0x0]\n"
        "b 26f\n"
        "18:" // 4 rounds: Partial load: partial_8_0
        "tbz x19, #3, 22f\n"
        "ldr d9, [x21], #0x8\n"
        "tbz x19, #2, 20f\n"
        "ld1 { v9.s }[2], [x21], #0x4\n"
        "tbz x19, #1, 19f\n"
        "ld1 { v9.h }[6], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v9.b }[14], [x21]\n"
        "b 26f\n"
        "19:" // 4 rounds: Partial load: partial_1_12
        "tbz x19, #0, 26f\n"
        "ld1 { v9.b }[12], [x21]\n"
        "b 26f\n"
        "20:" // 4 rounds: Partial load: partial_2_8
        "tbz x19, #1, 21f\n"
        "ld1 { v9.h }[4], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v9.b }[10], [x21]\n"
        "b 26f\n"
        "21:" // 4 rounds: Partial load: partial_1_8
        "tbz x19, #0, 26f\n"
        "ld1 { v9.b }[8], [x21]\n"
        "b 26f\n"
        "22:" // 4 rounds: Partial load: partial_4_0
        "tbz x19, #2, 24f\n"
        "ldr s9, [x21], #0x4\n"
        "tbz x19, #1, 23f\n"
        "ld1 { v9.h }[2], [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v9.b }[6], [x21]\n"
        "b 26f\n"
        "23:" // 4 rounds: Partial load: partial_1_4
        "tbz x19, #0, 26f\n"
        "ld1 { v9.b }[4], [x21]\n"
        "b 26f\n"
        "24:" // 4 rounds: Partial load: partial_2_0
        "tbz x19, #1, 25f\n"
        "ldr h9, [x21], #0x2\n"
        "tbz x19, #0, 26f\n"
        "ld1 { v9.b }[2], [x21]\n"
        "b 26f\n"
        "25:" // 4 rounds: Partial load: partial_1_0
        "ldr b9, [x21, #0x0]\n"
        "26:" // 4 rounds: Partial load: Done
        "b 28f\n"
        "27:" // 4 rounds: Full load
        "ldr q9, [x21, #0x0]\n"
        "ldr q13, [x21, #0x10]\n"
        "ldr q14, [x21, #0x20]\n"
        "add x21, x21, #0x30\n"
        "28:" // 4 rounds: Load done
        "sub v8.16b, v9.16b, v12.16b\n"
        "sub v7.16b, v9.16b, v11.16b\n"
        "tbl v8.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v8.16b\n"
        "sub v6.16b, v9.16b, v10.16b\n"
        "sub v5.16b, v13.16b, v12.16b\n"
        "tbl v9.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v9.16b\n"
        "sub v4.16b, v13.16b, v11.16b\n"
        "sub v3.16b, v13.16b, v10.16b\n"
        "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
        "sub v2.16b, v14.16b, v12.16b\n"
        "sub v1.16b, v14.16b, v11.16b\n"
        "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
        "sub v0.16b, v14.16b, v10.16b\n"
        "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
        "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
        "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
        "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
        "orr v9.16b, v9.16b, v8.16b\n"
        "tbl v14.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v14.16b\n"
        "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
        "orr v7.16b, v7.16b, v6.16b\n"
        "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
        "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
        "orr v13.16b, v13.16b, v5.16b\n"
        "orr v4.16b, v4.16b, v3.16b\n"
        "orr v14.16b, v14.16b, v2.16b\n"
        "cmp x19, #0x30\n"
        "orr v1.16b, v1.16b, v0.16b\n"
        "orr v9.16b, v9.16b, v7.16b\n"
        "orr v13.16b, v13.16b, v4.16b\n"
        "orr v14.16b, v14.16b, v1.16b\n"
        "bge 53f\n"
        "tbz x19, #5, 36f\n"
        "st1 { v9.16b }, [x20], #0x10\n"
        "st1 { v13.16b }, [x20], #0x10\n"
        "tbz x19, #3, 32f\n"
        "str d14, [x20], #0x8\n"
        "tbz x19, #2, 30f\n"
        "st1 { v14.s }[2], [x20], #0x4\n"
        "tbz x19, #1, 29f\n"
        "st1 { v14.h }[6], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v14.b }[14], [x20]\n"
        "b 52f\n"
        "29:" // 4 rounds: Partial writeback: partial_1_44
        "tbz x19, #0, 52f\n"
        "st1 { v14.b }[12], [x20]\n"
        "b 52f\n"
        "30:" // 4 rounds: Partial writeback: partial_2_40
        "tbz x19, #1, 31f\n"
        "st1 { v14.h }[4], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v14.b }[10], [x20]\n"
        "b 52f\n"
        "31:" // 4 rounds: Partial writeback: partial_1_40
        "tbz x19, #0, 52f\n"
        "st1 { v14.b }[8], [x20]\n"
        "b 52f\n"
        "32:" // 4 rounds: Partial writeback: partial_4_32
        "tbz x19, #2, 34f\n"
        "str s14, [x20], #0x4\n"
        "tbz x19, #1, 33f\n"
        "st1 { v14.h }[2], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v14.b }[6], [x20]\n"
        "b 52f\n"
        "33:" // 4 rounds: Partial writeback: partial_1_36
        "tbz x19, #0, 52f\n"
        "st1 { v14.b }[4], [x20]\n"
        "b 52f\n"
        "34:" // 4 rounds: Partial writeback: partial_2_32
        "tbz x19, #1, 35f\n"
        "str h14, [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v14.b }[2], [x20]\n"
        "b 52f\n"
        "35:" // 4 rounds: Partial writeback: partial_1_32
        "tbz x19, #0, 52f\n"
        "str b14, [x20, #0x0]\n"
        "b 52f\n"
        "36:" // 4 rounds: Partial writeback: partial_16_0
        "tbz x19, #4, 44f\n"
        "st1 { v9.16b }, [x20], #0x10\n"
        "tbz x19, #3, 40f\n"
        "str d13, [x20], #0x8\n"
        "tbz x19, #2, 38f\n"
        "st1 { v13.s }[2], [x20], #0x4\n"
        "tbz x19, #1, 37f\n"
        "st1 { v13.h }[6], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v13.b }[14], [x20]\n"
        "b 52f\n"
        "37:" // 4 rounds: Partial writeback: partial_1_28
        "tbz x19, #0, 52f\n"
        "st1 { v13.b }[12], [x20]\n"
        "b 52f\n"
        "38:" // 4 rounds: Partial writeback: partial_2_24
        "tbz x19, #1, 39f\n"
        "st1 { v13.h }[4], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v13.b }[10], [x20]\n"
        "b 52f\n"
        "39:" // 4 rounds: Partial writeback: partial_1_24
        "tbz x19, #0, 52f\n"
        "st1 { v13.b }[8], [x20]\n"
        "b 52f\n"
        "40:" // 4 rounds: Partial writeback: partial_4_16
        "tbz x19, #2, 42f\n"
        "str s13, [x20], #0x4\n"
        "tbz x19, #1, 41f\n"
        "st1 { v13.h }[2], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v13.b }[6], [x20]\n"
        "b 52f\n"
        "41:" // 4 rounds: Partial writeback: partial_1_20
        "tbz x19, #0, 52f\n"
        "st1 { v13.b }[4], [x20]\n"
        "b 52f\n"
        "42:" // 4 rounds: Partial writeback: partial_2_16
        "tbz x19, #1, 43f\n"
        "str h13, [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v13.b }[2], [x20]\n"
        "b 52f\n"
        "43:" // 4 rounds: Partial writeback: partial_1_16
        "tbz x19, #0, 52f\n"
        "str b13, [x20, #0x0]\n"
        "b 52f\n"
        "44:" // 4 rounds: Partial writeback: partial_8_0
        "tbz x19, #3, 48f\n"
        "str d9, [x20], #0x8\n"
        "tbz x19, #2, 46f\n"
        "st1 { v9.s }[2], [x20], #0x4\n"
        "tbz x19, #1, 45f\n"
        "st1 { v9.h }[6], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v9.b }[14], [x20]\n"
        "b 52f\n"
        "45:" // 4 rounds: Partial writeback: partial_1_12
        "tbz x19, #0, 52f\n"
        "st1 { v9.b }[12], [x20]\n"
        "b 52f\n"
        "46:" // 4 rounds: Partial writeback: partial_2_8
        "tbz x19, #1, 47f\n"
        "st1 { v9.h }[4], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v9.b }[10], [x20]\n"
        "b 52f\n"
        "47:" // 4 rounds: Partial writeback: partial_1_8
        "tbz x19, #0, 52f\n"
        "st1 { v9.b }[8], [x20]\n"
        "b 52f\n"
        "48:" // 4 rounds: Partial writeback: partial_4_0
        "tbz x19, #2, 50f\n"
        "str s9, [x20], #0x4\n"
        "tbz x19, #1, 49f\n"
        "st1 { v9.h }[2], [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v9.b }[6], [x20]\n"
        "b 52f\n"
        "49:" // 4 rounds: Partial writeback: partial_1_4
        "tbz x19, #0, 52f\n"
        "st1 { v9.b }[4], [x20]\n"
        "b 52f\n"
        "50:" // 4 rounds: Partial writeback: partial_2_0
        "tbz x19, #1, 51f\n"
        "str h9, [x20], #0x2\n"
        "tbz x19, #0, 52f\n"
        "st1 { v9.b }[2], [x20]\n"
        "b 52f\n"
        "51:" // 4 rounds: Partial writeback: partial_1_0
        "str b9, [x20, #0x0]\n"
        "52:" // 4 rounds: Partial writeback: Done
        "b 54f\n"
        "53:" // 4 rounds: Full writeback
        "str q9, [x20, #0x0]\n"
        "str q13, [x20, #0x10]\n"
        "str q14, [x20, #0x20]\n"
        "add x20, x20, #0x30\n"
        "54:" // 4 rounds: Writeback done
        "subs x19, x19, #0x30\n"
        "bgt 2b\n"
        "add x22, x22, #0x1\n"
        "cmp x22, %x[num_strings]\n"
        "bne 1b\n"
        :
        : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length), [table] "r"(table)
        : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22");
}

#endif // __aarch64__
} // namespace

void neon_qasymm8_hardswish_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
    ARM_COMPUTE_ERROR_ON(act_info.activation() != ActivationLayerInfo::ActivationFunction::HARD_SWISH);
#ifdef __aarch64__
    const int window_step_x  = src->info()->tensor_shape().x();
    Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
    win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
    Iterator input(src, win_collapsed);
    Iterator output(dst, win_collapsed);
    execute_window_loop(win_collapsed, [&](const Coordinates &)
    {
        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
        auto       output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
        substitute_bytes_neon(act_info.lut().data(), 1u, window_step_x, &input_ptr, &output_ptr);
    },
    input, output);
#else  // #ifdef __aarch64__
    ARM_COMPUTE_UNUSED(src);
    ARM_COMPUTE_UNUSED(dst);
    ARM_COMPUTE_UNUSED(act_info);
    ARM_COMPUTE_UNUSED(window);
    ARM_COMPUTE_ERROR("LUT Only supported in aarch64.");
#endif // __aarch64__
}

void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
    constexpr int                                 window_step_x  = 16;
    const auto                                    window_start_x = static_cast<int>(window.x().start());
    const auto                                    window_end_x   = static_cast<int>(window.x().end());
    const ActivationLayerInfo::ActivationFunction act            = act_info.activation();

    Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
    win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));

    Iterator input(src, win_collapsed);
    Iterator output(dst, win_collapsed);

    const UniformQuantizationInfo qi_in    = src->info()->quantization_info().uniform();
    const UniformQuantizationInfo qi_out   = dst->info()->quantization_info().uniform();
    const qasymm8x16_t            va       = vdupq_n_u8(quantize_qasymm8(act_info.a(), qi_in));
    const qasymm8x16_t            vb       = vdupq_n_u8(quantize_qasymm8(act_info.b(), qi_in));
    const qasymm8_t               a        = quantize_qasymm8(act_info.a(), qi_in);
    const qasymm8_t               b        = quantize_qasymm8(act_info.b(), qi_in);
    const qasymm8_t               const_0  = quantize_qasymm8(0.f, qi_in);
    const qasymm8x16_t            vconst_0 = vdupq_n_u8(const_0);
    const auto                    vconst_1 = vdupq_n_f32(1.f);
#ifndef __aarch64__
    const auto vconst_0_f32 = vdupq_n_f32(0);
#endif // __aarch64__
    const float32x4_t va_f32 = vdupq_n_f32(act_info.a());
    const float32x4_t vb_f32 = vdupq_n_f32(act_info.b());
    const float       a_f32  = act_info.a();
    const float       b_f32  = act_info.b();

    // Initialise scale/offset for re-quantization
    float       s  = qi_in.scale / qi_out.scale;
    float       o  = -qi_in.offset * s + qi_out.offset;
    float32x4_t vs = vdupq_n_f32(s);
    float32x4_t vo = vdupq_n_f32(o);

    execute_window_loop(win_collapsed, [&](const Coordinates &)
    {
        const auto input_ptr  = reinterpret_cast<const qasymm8_t *>(input.ptr());
        const auto output_ptr = reinterpret_cast<qasymm8_t *>(output.ptr());

        wrapper::traits::neon_bitvector_t<qasymm8_t, wrapper::traits::BitWidth::W128> tmp;

        // Compute S elements per iteration
        int x = window_start_x;
        for(; x <= (window_end_x - window_step_x); x += window_step_x)
        {
            const auto vin = wrapper::vloadq(input_ptr + x);
            if(act == ActivationLayerInfo::ActivationFunction::RELU)
            {
                // Perform activation
                tmp = vmaxq_u8(vconst_0, vin);
                // Re-quantize to new output space
                tmp = vmlaq_qasymm8(tmp, vs, vo);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
            {
                // Perform activation
                tmp = vminq_u8(va, vmaxq_u8(vconst_0, vin));
                // Re-quantize to new output space
                tmp = vmlaq_qasymm8(tmp, vs, vo);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
            {
                // Perform activation
                tmp = vminq_u8(va, vmaxq_u8(vb, vin));
                // Re-quantize to new output space
                tmp = vmlaq_qasymm8(tmp, vs, vo);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
            {
                // De-quantize
                const auto vin_deq = vdequantize(vin, qi_in);
                // Perform activation
                const float32x4x4_t tmp_dep =
                {
                    {
                        wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[0])))),
                        wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[1])))),
                        wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[2])))),
                        wrapper::vdiv(vconst_1, wrapper::vadd(vconst_1, wrapper::vexpq(wrapper::vneg(vin_deq.val[3])))),
                    }
                };
                // Re-quantize to new output space
                tmp = vquantize(tmp_dep, qi_out);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::TANH)
            {
                // De-quantize
                const auto vin_deq = vdequantize(vin, qi_in);
                // Perform activation
                const float32x4x4_t tmp_dep =
                {
                    {
                        wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[0], vb_f32))),
                        wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[1], vb_f32))),
                        wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[2], vb_f32))),
                        wrapper::vmul(va_f32, wrapper::vtanh(wrapper::vmul(vin_deq.val[3], vb_f32))),
                    }
                };
                // Re-quantize to new output space
                tmp = vquantize(tmp_dep, qi_out);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::LEAKY_RELU)
            {
                const auto vin_deq = vdequantize(vin, qi_in);

#ifdef __aarch64__
                const uint32x4x4_t pos_mask =
                {
                    {
                        wrapper::vcgtz(vin_deq.val[0]),
                        wrapper::vcgtz(vin_deq.val[1]),
                        wrapper::vcgtz(vin_deq.val[2]),
                        wrapper::vcgtz(vin_deq.val[3]),
                    }
                };
#else  // __aarch64__
                const uint32x4x4_t pos_mask =
                {
                    {
                        wrapper::vcgt(vin_deq.val[0], vconst_0_f32),
                        wrapper::vcgt(vin_deq.val[1], vconst_0_f32),
                        wrapper::vcgt(vin_deq.val[2], vconst_0_f32),
                        wrapper::vcgt(vin_deq.val[3], vconst_0_f32),
                    }
                };
#endif // __aarch64__

                const float32x4x4_t tmp_dep =
                {
                    {
                        wrapper::vbsl(pos_mask.val[0], vin_deq.val[0], wrapper::vmul(va_f32, vin_deq.val[0])),
                        wrapper::vbsl(pos_mask.val[1], vin_deq.val[1], wrapper::vmul(va_f32, vin_deq.val[1])),
                        wrapper::vbsl(pos_mask.val[2], vin_deq.val[2], wrapper::vmul(va_f32, vin_deq.val[2])),
                        wrapper::vbsl(pos_mask.val[3], vin_deq.val[3], wrapper::vmul(va_f32, vin_deq.val[3])),
                    }
                };

                tmp = vquantize(tmp_dep, qi_out);
            }
            else
            {
                ARM_COMPUTE_ERROR("Unsupported activation function");
            }
            wrapper::vstore(output_ptr + x, tmp);
        }

        // Compute left-over elements
        for(; x < window_end_x; ++x)
        {
            qasymm8_t in  = *(reinterpret_cast<const qasymm8_t *>(input_ptr + x));
            qasymm8_t tmp = 0;
            if(act == ActivationLayerInfo::ActivationFunction::RELU)
            {
                tmp = std::max(const_0, in);
                tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
            {
                tmp = std::min(a, std::max(const_0, in));
                tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
            {
                tmp = std::min(a, std::max(b, in));
                tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
            {
                float tmp_f = dequantize_qasymm8(in, qi_in);
                tmp_f       = 1.f / (1.f + std::exp(-tmp_f));
                tmp         = quantize_qasymm8(tmp_f, qi_out);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::TANH)
            {
                float tmp_f = dequantize_qasymm8(in, qi_in);
                tmp_f       = a_f32 * std::tanh(b_f32 * tmp_f);
                tmp         = quantize_qasymm8(tmp_f, qi_out);
            }
            else if(act == ActivationLayerInfo::ActivationFunction::LEAKY_RELU)
            {
                float tmp_f = dequantize_qasymm8(in, qi_in);
                tmp_f       = tmp_f > 0 ? tmp_f : tmp_f * a_f32;
                tmp         = quantize_qasymm8(tmp_f, qi_out);
            }
            else
            {
                ARM_COMPUTE_ERROR("Unsupported activation function");
            }
            *(output_ptr + x) = tmp;
        }
    },
    input, output);
}
} // namespace cpu
} // namespace arm_compute