aboutsummaryrefslogtreecommitdiff
path: root/NnapiSupport.txt
blob: a3981578acc0c08a13ee43c37cc888175165a2c4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
------ ArmNN for Android NNAPI supported operations ------

This release of ArmNN for Android supports use as a driver for the Android Neural Networks API. It implements the
android.hardware.neuralnetworks@1.0, android.hardware.neuralnetworks@1.1 and android.hardware.neuralnetworks@1.2
HAL interfaces.

For more information on the Android Neural Networks API, see https://developer.android.com/ndk/guides/neuralnetworks/index.html

For integration and usage documentation, please see README.md.

--- Support for Android Neural Networks HAL operations ---

The following AndroidNN HAL 1.0, 1.1 and 1.2 operations are currently supported:

AndroidNN operator           Tensor type supported
ABS                          (FLOAT32)
ADD                          (FLOAT32,QUANT8_ASYMM)
AVERAGE_POOL_2D              (FLOAT32,QUANT8_ASYMM)
BATCH_TO_SPACE_ND            (FLOAT32,QUANT8_ASYMM)
CONCATENATION                (FLOAT32,QUANT8_ASYMM)
CONV_2D                      (FLOAT32,QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
DEPTHWISE_CONV_2D            (FLOAT32,QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
DIV                          (FLOAT32,QUANT8_ASYMM)
DEQUANTIZE                   (FLOAT32,QUANT8_ASYMM)
EXPAND_DIMS                  (FLOAT32,QUANT8_ASYMM)
FLOOR                        (FLOAT32)
FULLY_CONNECTED              (FLOAT32,QUANT8_ASYMM)
GROUPED_CONV_2D              (FLOAT32,QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
INSTANCE_NORMALIZATION       (FLOAT32)
L2_NORMALIZATION             (FLOAT32)
L2_POOL_2D                   (FLOAT32,QUANT8_ASYMM)
LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
LOGISTIC                     (FLOAT32,QUANT8_ASYMM)
LOG_SOFTMAX                  (FLOAT32)
LSTM                         (FLOAT32)
MAXIMUM                      (FLOAT32,QUANT8_ASYMM)
MAX_POOL_2D                  (FLOAT32,QUANT8_ASYMM)
MEAN                         (FLOAT32,QUANT8_ASYMM)
MINIMUM                      (FLOAT32,QUANT8_ASYMM)
MUL                          (FLOAT32,QUANT8_ASYMM)
PAD                          (FLOAT32,QUANT8_ASYMM)
PAD_V2                       (FLOAT32,QUANT8_ASYMM)
PRELU                        (FLOAT32,QUANT8_ASYMM)
QUANTIZE                     (FLOAT32,QUANT8_ASYMM)
QUANTIZED_16BIT_LSTM         (QUANT8_ASYMM)
RELU                         (FLOAT32,QUANT8_ASYMM)
RELU1                        (FLOAT32,QUANT8_ASYMM)
RELU6                        (FLOAT32,QUANT8_ASYMM)
RESHAPE                      (FLOAT32,QUANT8_ASYMM)
RESIZE_BILINEAR              (FLOAT32,QUANT8_ASYMM)
RESIZE_NEAREST_NEIGHBOR      (FLOAT32,QUANT8_ASYMM)
RSQRT                        (FLOAT32)
SOFTMAX                      (FLOAT32,QUANT8_ASYMM)
SPACE_TO_BATCH_ND            (FLOAT32,QUANT8_ASYMM)
SPACE_TO_DEPTH_ND            (FLOAT32,QUANT8_ASYMM)
SQRT                         (FLOAT32)
SQUEEZE                      (FLOAT32,QUANT8_ASYMM)
STRIDED_SLICE                (FLOAT32,QUANT8_ASYMM)
SUB                          (FLOAT32,QUANT8_ASYMM)
TANH                         (FLOAT32,QUANT8_ASYMM)
TRANSPOSE                    (FLOAT32,QUANT8_ASYMM)
TRANSPOSE_CONV_2D            (FLOAT32,QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))

Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework
appropriately and the framework implements those operations using a CPU implementation.