aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikhil Raj <nikhil.raj@arm.com>2021-05-10 16:27:26 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2021-05-11 20:02:25 +0100
commit0257d1d60633e06fcb5c28d387d436734b4d6050 (patch)
tree7337cf4c3f438dc0a2190b607956b01c5f3ac3a8
parentfc628d8ab70b890846b9cf8319569e2dceafb26f (diff)
downloadandroid-nn-driver-0257d1d60633e06fcb5c28d387d436734b4d6050.tar.gz
IVGCVSW-5903 Update android-nn-driver NnapiSupport.txt for 21.05
Signed-off-by: Nikhil Raj <nikhil.raj@arm.com> Change-Id: I653a6ca0e88053c77adb9b7effb7a28ec1f12375
-rw-r--r--NnapiSupport.txt87
1 files changed, 45 insertions, 42 deletions
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index 9fdb6171..10837a27 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -14,70 +14,73 @@ For integration and usage documentation, please see README.md.
The following AndroidNN HAL 1.0, 1.1, 1.2 and 1.3 operations are currently supported:
AndroidNN operator Tensor type supported
-ABS (FLOAT32)
-ADD (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-ARGMAX (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-ARGMIN (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-AVERAGE_POOL_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-BATCH_TO_SPACE_ND (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+ABS (FLOAT32, FLOAT16, INT32)
+ADD (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+ARGMAX (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+ARGMIN (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+AVERAGE_POOL_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+BATCH_TO_SPACE_ND (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
CAST (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM)
CONCATENATION (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
DEPTH_TO_SPACE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
DEPTHWISE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
DEQUANTIZE (FLOAT32 (output only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (input only))
-DIV (FLOAT32, QUANT8_ASYMM)
-ELU (FLOAT32, QUANT8_ASYMM)
-EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+DIV (FLOAT32, FLOAT16, INT32)
+ELU (FLOAT32, FLOAT16, QUANT8_ASYMM)
+EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
EXP (FLOAT32, FLOAT16)
-EXPAND_DIMS (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+EXPAND_DIMS (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
FILL (FLOAT32, FLOAT16, INT32)
-FLOOR (FLOAT32)
+FLOOR (FLOAT32, FLOAT16)
FULLY_CONNECTED (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-GREATER (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-GREATER_EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-GROUPED_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-HARD_SWISH (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-INSTANCE_NORMALIZATION (FLOAT32)
+GREATER (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+GREATER_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+GROUPED_CONV_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+HARD_SWISH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+INSTANCE_NORMALIZATION (FLOAT32, FLOAT16)
L2_NORMALIZATION (FLOAT32)
-L2_POOL_2D (FLOAT32, QUANT8_ASYMM)
-LESS (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-LESS_EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+L2_POOL_2D (FLOAT32, FLOAT16)
+LESS (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+LESS_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
LOGICAL_AND (BOOL8)
LOGICAL_NOT (BOOL8)
LOGICAL_OR (BOOL8)
-LOGISTIC (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-LOG_SOFTMAX (FLOAT32)
+LOGISTIC (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+LOG_SOFTMAX (FLOAT32, FLOAT16)
LSTM (FLOAT32)
-MAXIMUM (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-MAX_POOL_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-MEAN (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-MINIMUM (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-MUL (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-NEG (FLOAT32)
-NOT_EQUAL (FLOAT32, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-PAD (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+MAXIMUM (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+MAX_POOL_2D (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+MEAN (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+MINIMUM (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+MUL (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+NEG (FLOAT32, FLOAT16)
+NOT_EQUAL (BOOL8, FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+PAD (FLOAT32, FLOAT16, QUANT8_ASYMM)
PAD_V2 (FLOAT32, FLOAT16, QUANT8_ASYMM)
-PRELU (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+PRELU (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
QUANTIZE (FLOAT32 (input only), QUANT8_ASYMM and QUANT8_ASYMM_SIGNED (output only))
QUANTIZED_16BIT_LSTM (QUANT8_ASYMM)
QUANTIZED_LSTM (QUANT8_ASYMM)
-RELU (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-RELU1 (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-RELU6 (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+REDUCE_MAX (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+REDUCE_MIN (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+REDUCE_SUM (FLOAT32, FLOAT16)
+RELU (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+RELU1 (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+RELU6 (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
RESHAPE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-RESIZE_BILINEAR (FLOAT32, QUANT8_ASYMM)
-RESIZE_NEAREST_NEIGHBOR (FLOAT32, QUANT8_ASYMM)
-RSQRT (FLOAT32)
+RESIZE_BILINEAR (FLOAT32, FLOAT16, QUANT8_ASYMM)
+RESIZE_NEAREST_NEIGHBOR (FLOAT32, FLOAT16, QUANT8_ASYMM)
+RSQRT (FLOAT32, FLOAT16)
SOFTMAX (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-SPACE_TO_BATCH_ND (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+SPACE_TO_BATCH_ND (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
SPACE_TO_DEPTH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-SQRT (FLOAT32)
+SQRT (FLOAT32, FLOAT16)
SQUEEZE (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
STRIDED_SLICE (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-SUB (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
-TANH (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+SUB (FLOAT32, FLOAT16, INT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
+TANH (FLOAT32, FLOAT16, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
TRANSPOSE (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
TRANSPOSE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_ASYMM_SIGNED)
@@ -86,5 +89,5 @@ appropriately and the framework implements those operations using a CPU implemen
NOTE: By convention, only those tensor types have been listed above, which are fully supported across all
ArmNN backends.
- - FLOAT16 input tensors are partially supported on most HAL 1.2 operators on the GpuAcc and
- CpuRef backends, however not on CpuAcc. \ No newline at end of file
+ - FLOAT16 input tensors are partially supported on most HAL 1.2 and 1.3 operators on the GpuAcc and
+ CpuRef backends, however not on CpuAcc.