From c6494ccc24d5db72e1a6afc0f71c18b5016027e8 Mon Sep 17 00:00:00 2001 From: nikraj01 Date: Wed, 22 May 2019 16:30:44 +0100 Subject: IVGCVSW-3093 Update NNAPISupport.txt for 19.05 Change-Id: I51b7a40214945ba89ff2fc4f44d86f47d2e9b13e Signed-off-by: nikraj01 --- NnapiSupport.txt | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/NnapiSupport.txt b/NnapiSupport.txt index c8f77e5f..36412512 100644 --- a/NnapiSupport.txt +++ b/NnapiSupport.txt @@ -12,35 +12,35 @@ For integration and usage documentation, please see README.md. The following AndroidNN operations are currently supported. AndroidNN operator Tensor type supported -ADD (FLOAT32,QUANT8_ASYMM) +ADD (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) AVERAGE_POOL_2D (FLOAT32,QUANT8_ASYMM) BATCH_TO_SPACE_ND (FLOAT32,QUANT8_ASYMM) -CONCATENATION (FLOAT32) +CONCATENATION (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) CONV_2D (FLOAT32,QUANT8_ASYMM) DEPTHWISE_CONV_2D* (FLOAT32,QUANT8_ASYMM) -DIV (FLOAT32,QUANT8_ASYMM) +DIV (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) FLOOR (FLOAT32) FULLY_CONNECTED (FLOAT32,QUANT8_ASYMM) L2_NORMALIZATION (FLOAT32) -L2_POOL_2D (FLOAT32) +L2_POOL_2D (FLOAT32,QUANT8_ASYMM) LOCAL_RESPONSE_NORMALIZATION (FLOAT32) -LOGISTIC (FLOAT32,QUANT8_ASYMM) +LOGISTIC (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) LSTM (FLOAT32) MAX_POOL_2D (FLOAT32,QUANT8_ASYMM) MEAN (FLOAT32,QUANT8_ASYMM) -MUL (FLOAT32,QUANT8_ASYMM) +MUL (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) PAD (FLOAT32,QUANT8_ASYMM) -RELU (FLOAT32,QUANT8_ASYMM) -RELU1 (FLOAT32,QUANT8_ASYMM) -RELU6 (FLOAT32,QUANT8_ASYMM) +RELU (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) +RELU1 (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) +RELU6 (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) RESHAPE (FLOAT32,QUANT8_ASYMM) -RESIZE_BILINEAR (FLOAT32) +RESIZE_BILINEAR (FLOAT32,QUANT8_ASYMM) SOFTMAX (FLOAT32,QUANT8_ASYMM) SPACE_TO_BATCH_ND (FLOAT32,QUANT8_ASYMM) SQUEEZE (FLOAT32,QUANT8_ASYMM) STRIDED_SLICE (FLOAT32,QUANT8_ASYMM) -SUB (FLOAT32,QUANT8_ASYMM) -TANH (FLOAT32) +SUB (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) +TANH (FLOAT32,QUANT8_ASYMM,QUANT16_SYMM) TRANSPOSE (FLOAT32,QUANT8_ASYMM) * Depthwise convolution only supports a value of 1 for the depth multiplier. In addition, the QUANT8_ASYMM version only supports 3x3 kernels. -- cgit v1.2.1