aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-11-18 16:21:23 +0000
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-11-18 16:21:23 +0000
commit9acf579752d7dbf43a26e933224854d2d003da30 (patch)
tree286a73e7722cdd4b69525812ed4d3f235a53ece2
parent65a1b1d600cbccf7269409cb7ca0947f0222cb8b (diff)
downloadandroid-nn-driver-9acf579752d7dbf43a26e933224854d2d003da30.tar.gz
IVGCVSW-4056 Update NNAPISupport.txt for 19.11
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I3e8069d6cb4a2149396329fee4643d0fc5a12181
-rw-r--r--NnapiSupport.txt85
1 files changed, 48 insertions, 37 deletions
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index a3981578..3cd85cbe 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -14,52 +14,63 @@ The following AndroidNN HAL 1.0, 1.1 and 1.2 operations are currently supported:
AndroidNN operator Tensor type supported
ABS (FLOAT32)
-ADD (FLOAT32,QUANT8_ASYMM)
-AVERAGE_POOL_2D (FLOAT32,QUANT8_ASYMM)
-BATCH_TO_SPACE_ND (FLOAT32,QUANT8_ASYMM)
-CONCATENATION (FLOAT32,QUANT8_ASYMM)
-CONV_2D (FLOAT32,QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
-DEPTHWISE_CONV_2D (FLOAT32,QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
-DIV (FLOAT32,QUANT8_ASYMM)
-DEQUANTIZE (FLOAT32,QUANT8_ASYMM)
-EXPAND_DIMS (FLOAT32,QUANT8_ASYMM)
+ADD (FLOAT32, QUANT8_ASYMM)
+AVERAGE_POOL_2D (FLOAT32, QUANT8_ASYMM)
+BATCH_TO_SPACE_ND (FLOAT32, QUANT8_ASYMM)
+CONCATENATION (FLOAT32, QUANT8_ASYMM)
+CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_SYMM_PER_CHANNEL(only for weights))
+DEPTH_TO_SPACE (FLOAT32, FLOAT16, QUANT8_ASYMM)
+DEPTHWISE_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_SYMM_PER_CHANNEL(only for weights))
+DEQUANTIZE (FLOAT32 (output only), QUANT8_ASYMM (input only))
+DIV (FLOAT32, QUANT8_ASYMM)
+EQUAL (FLOAT32, QUANT8_ASYMM)
+EXPAND_DIMS (FLOAT32, FLOAT16, QUANT8_ASYMM)
FLOOR (FLOAT32)
-FULLY_CONNECTED (FLOAT32,QUANT8_ASYMM)
-GROUPED_CONV_2D (FLOAT32,QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
+FULLY_CONNECTED (FLOAT32, QUANT8_ASYMM)
+GREATER (FLOAT32, QUANT8_ASYMM)
+GREATER_EQUAL (FLOAT32, QUANT8_ASYMM)
+GROUPED_CONV_2D (FLOAT32, QUANT8_ASYMM, QUANT8_SYMM_PER_CHANNEL(only for weights))
INSTANCE_NORMALIZATION (FLOAT32)
L2_NORMALIZATION (FLOAT32)
-L2_POOL_2D (FLOAT32,QUANT8_ASYMM)
+L2_POOL_2D (FLOAT32, QUANT8_ASYMM)
+LESS (FLOAT32, QUANT8_ASYMM)
+LESS_EQUAL (FLOAT32, QUANT8_ASYMM)
LOCAL_RESPONSE_NORMALIZATION (FLOAT32)
-LOGISTIC (FLOAT32,QUANT8_ASYMM)
+LOGISTIC (FLOAT32, QUANT8_ASYMM)
LOG_SOFTMAX (FLOAT32)
LSTM (FLOAT32)
-MAXIMUM (FLOAT32,QUANT8_ASYMM)
-MAX_POOL_2D (FLOAT32,QUANT8_ASYMM)
-MEAN (FLOAT32,QUANT8_ASYMM)
-MINIMUM (FLOAT32,QUANT8_ASYMM)
-MUL (FLOAT32,QUANT8_ASYMM)
-PAD (FLOAT32,QUANT8_ASYMM)
-PAD_V2 (FLOAT32,QUANT8_ASYMM)
-PRELU (FLOAT32,QUANT8_ASYMM)
-QUANTIZE (FLOAT32,QUANT8_ASYMM)
+MAXIMUM (FLOAT32, QUANT8_ASYMM)
+MAX_POOL_2D (FLOAT32, QUANT8_ASYMM)
+MEAN (FLOAT32, QUANT8_ASYMM)
+MINIMUM (FLOAT32, QUANT8_ASYMM)
+MUL (FLOAT32, QUANT8_ASYMM)
+NOT_EQUAL (FLOAT32, QUANT8_ASYMM)
+PAD (FLOAT32, QUANT8_ASYMM)
+PAD_V2 (FLOAT32, QUANT8_ASYMM)
+PRELU (FLOAT32, QUANT8_ASYMM)
+QUANTIZE (FLOAT32 (input only), QUANT8_ASYMM (output only))
QUANTIZED_16BIT_LSTM (QUANT8_ASYMM)
-RELU (FLOAT32,QUANT8_ASYMM)
-RELU1 (FLOAT32,QUANT8_ASYMM)
-RELU6 (FLOAT32,QUANT8_ASYMM)
-RESHAPE (FLOAT32,QUANT8_ASYMM)
-RESIZE_BILINEAR (FLOAT32,QUANT8_ASYMM)
-RESIZE_NEAREST_NEIGHBOR (FLOAT32,QUANT8_ASYMM)
+RELU (FLOAT32, QUANT8_ASYMM)
+RELU1 (FLOAT32, QUANT8_ASYMM)
+RELU6 (FLOAT32, QUANT8_ASYMM)
+RESHAPE (FLOAT32, QUANT8_ASYMM)
+RESIZE_BILINEAR (FLOAT32, QUANT8_ASYMM)
+RESIZE_NEAREST_NEIGHBOR (FLOAT32, QUANT8_ASYMM)
RSQRT (FLOAT32)
-SOFTMAX (FLOAT32,QUANT8_ASYMM)
-SPACE_TO_BATCH_ND (FLOAT32,QUANT8_ASYMM)
-SPACE_TO_DEPTH_ND (FLOAT32,QUANT8_ASYMM)
+SOFTMAX (FLOAT32, QUANT8_ASYMM)
+SPACE_TO_BATCH_ND (FLOAT32, QUANT8_ASYMM)
+SPACE_TO_DEPTH (FLOAT32, QUANT8_ASYMM)
SQRT (FLOAT32)
-SQUEEZE (FLOAT32,QUANT8_ASYMM)
-STRIDED_SLICE (FLOAT32,QUANT8_ASYMM)
-SUB (FLOAT32,QUANT8_ASYMM)
-TANH (FLOAT32,QUANT8_ASYMM)
-TRANSPOSE (FLOAT32,QUANT8_ASYMM)
-TRANSPOSE_CONV_2D (FLOAT32,QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
+SQUEEZE (FLOAT32, QUANT8_ASYMM)
+STRIDED_SLICE (FLOAT32, QUANT8_ASYMM)
+SUB (FLOAT32, QUANT8_ASYMM)
+TANH (FLOAT32, QUANT8_ASYMM)
+TRANSPOSE (FLOAT32, QUANT8_ASYMM)
+TRANSPOSE_CONV_2D (FLOAT32, QUANT8_ASYMM,QUANT8_SYMM_PER_CHANNEL(only for weights))
Where operations are not supported by the ArmNN Android NN Driver, the driver indicates this to the framework
appropriately and the framework implements those operations using a CPU implementation.
+
+NOTE: By convention, only those tensor types have been listed above, which are fully supported across all
+ArmNN backends. FLOAT16 input tensors are partially supported on most HAL 1.2 operators on the GpuAcc and
+CpuRef backends, however not on CpuAcc.