diff options
author | nikraj01 <nikhil.raj@arm.com> | 2019-05-22 16:30:44 +0100 |
---|---|---|
committer | Nikhil Raj Arm <nikhil.raj@arm.com> | 2019-05-23 08:44:16 +0000 |
commit | c6494ccc24d5db72e1a6afc0f71c18b5016027e8 (patch) | |
tree | 5bc12b7706a871f3b7d17a06881cddb75be0d96c | |
parent | d6539c58519962acd8967e93ceb2da049631c832 (diff) | |
download | android-nn-driver-c6494ccc24d5db72e1a6afc0f71c18b5016027e8.tar.gz |
IVGCVSW-3093 Update NNAPISupport.txt for 19.05
Change-Id: I51b7a40214945ba89ff2fc4f44d86f47d2e9b13e
Signed-off-by: nikraj01 <nikhil.raj@arm.com>
-rw-r--r-- | NnapiSupport.txt | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/NnapiSupport.txt b/NnapiSupport.txt index c8f77e5f..36412512 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -12,35 +12,35 @@ For integration and usage documentation, please see README.md. The following AndroidNN operations are currently supported. AndroidNN operator Tensor type supported -ADD (FLOAT32,QUANT8_ASYMM) +ADD (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) AVERAGE_POOL_2D (FLOAT32,QUANT8_ASYMM) BATCH_TO_SPACE_ND (FLOAT32,QUANT8_ASYMM) -CONCATENATION (FLOAT32) +CONCATENATION (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) CONV_2D (FLOAT32,QUANT8_ASYMM) DEPTHWISE_CONV_2D* (FLOAT32,QUANT8_ASYMM) -DIV (FLOAT32,QUANT8_ASYMM) +DIV (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) FLOOR (FLOAT32) FULLY_CONNECTED (FLOAT32,QUANT8_ASYMM) L2_NORMALIZATION (FLOAT32) -L2_POOL_2D (FLOAT32) +L2_POOL_2D (FLOAT32,QUANT8_ASYMM) LOCAL_RESPONSE_NORMALIZATION (FLOAT32) -LOGISTIC (FLOAT32,QUANT8_ASYMM) +LOGISTIC (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) LSTM (FLOAT32) MAX_POOL_2D (FLOAT32,QUANT8_ASYMM) MEAN (FLOAT32,QUANT8_ASYMM) -MUL (FLOAT32,QUANT8_ASYMM) +MUL (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) PAD (FLOAT32,QUANT8_ASYMM) -RELU (FLOAT32,QUANT8_ASYMM) -RELU1 (FLOAT32,QUANT8_ASYMM) -RELU6 (FLOAT32,QUANT8_ASYMM) +RELU (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) +RELU1 (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) +RELU6 (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) RESHAPE (FLOAT32,QUANT8_ASYMM) -RESIZE_BILINEAR (FLOAT32) +RESIZE_BILINEAR (FLOAT32,QUANT8_ASYMM) SOFTMAX (FLOAT32,QUANT8_ASYMM) SPACE_TO_BATCH_ND (FLOAT32,QUANT8_ASYMM) SQUEEZE (FLOAT32,QUANT8_ASYMM) STRIDED_SLICE (FLOAT32,QUANT8_ASYMM) -SUB (FLOAT32,QUANT8_ASYMM) -TANH (FLOAT32) +SUB (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) +TANH (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) TRANSPOSE (FLOAT32,QUANT8_ASYMM) * Depthwise convolution only supports a value of 1 for the depth multiplier. In addition, the QUANT8_ASYMM version only supports 3x3 kernels. |