///
/// Copyright (c) 2021 Arm Limited.
///
/// SPDX-License-Identifier: MIT
///
/// Permission is hereby granted, free of charge, to any person obtaining a copy
/// of this software and associated documentation files (the "Software"), to
/// deal in the Software without restriction, including without limitation the
/// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
/// sell copies of the Software, and to permit persons to whom the Software is
/// furnished to do so, subject to the following conditions:
///
/// The above copyright notice and this permission notice shall be included in all
/// copies or substantial portions of the Software.
///
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
/// SOFTWARE.
///
namespace arm_compute
{
/**
@page operators_list Supported Operators
@tableofcontents
@section S9_1_operators_list Supported Operators
Compute Library supports operators that are listed in below table.
Compute Library supports a wide list of data-types, information can been directly found in the documentation of each kernel/function.
The main data-types that the Machine Learning functions support are the following:
Function
| Description
| Equivalent Android NNAPI Op
| Backends
| Data Layouts
| Data Types
|
ActivationLayer
| Function to simulate an activation layer with the specified activation function.
|
- ANEURALNETWORKS_ELU
- ANEURALNETWORKS_HARD_SWISH
- ANEURALNETWORKS_LOGISTIC
- ANEURALNETWORKS_RELU
- ANEURALNETWORKS_RELU1
- ANEURALNETWORKS_RELU6
- ANEURALNETWORKS_TANH
| NEActivationLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| QSYMM16 | QSYMM16
| F16 | F16
| F32 | F32
|
|
CLActivationLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| QSYMM16 | QSYMM16
| F16 | F16
| F32 | F32
|
|
ConcatenateLayer
| Function to concatenate tensors along a given axis.
|
- ANEURALNETWORKS_CONCATENATION
| NEConcatenateLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| F16 | F16
| F32 | F32
|
|
CLConcatenateLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| F16 | F16
| F32 | F32
|
|
ConvertFullyConnectedWeights
| Function to tranpose the wieghts for the fully connected layer.
|
| NEConvertFullyConnectedWeights
|
|
|
CLConvertFullyConnectedWeights
|
|
|
Copy
| Function to copy a tensor.
|
| NECopy
|
|
|
CLCopy
|
|
|
DequantizationLayer
| Function to dequantize the values in a tensor
|
- ANEURALNETWORKS_DEQUANTIZE
| NEDequantizationLayer
|
|
src | dst
|
---|
QASYMM8 | F16
| QASYMM8 | F32
| QASYMM8_SIGNED | F16
| QASYMM8_SIGNED | F32
| QSYMM8_PER_CHANNEL | F16
| QSYMM8_PER_CHANNEL | F32
| QSYMM8 | F16
| QSYMM8 | F32
| QSYMM16 | F16
| QSYMM16 | F32
|
|
CLDequantizationLayer
|
|
src | dst
|
---|
QASYMM8 | F16
| QASYMM8 | F32
| QASYMM8_SIGNED | F16
| QASYMM8_SIGNED | F32
| QSYMM8_PER_CHANNEL | F16
| QSYMM8_PER_CHANNEL | F32
| QSYMM8 | F16
| QSYMM8 | F32
| QSYMM16 | F16
| QSYMM16 | F32
|
|
DirectConvolutionLayer
| Function to
|
| NEDirectConvolutionLayer
|
|
src0 | src1 | src2 | dst
|
---|
F16 | F16 | F16 | F16
| F32 | F32 | F32 | F32
|
|
CLDirectConvolutionLayer
|
|
src0 | src1 | src2 | dst
|
---|
F16 | F16 | F16 | F16
| F32 | F32 | F32 | F32
| QASYMM8 | QASYMM8 | S32 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED | S32 | QASYMM8_SIGNED
|
|
FFT1D
| Fast Fourier Transform 1D
|
| NEFFT1D
|
|
|
CLFFT1D
|
|
|
FFT2D
| Fast Fourier Transform 2D
|
| NEFFT2D
|
|
|
CLFFT2D
|
|
|
FFTConvolutionLayer
| Fast Fourier Transform Convolution
|
| NEFFTConvolutionLayer
|
|
|
CLFFTConvolutionLayer
|
|
|
Fill
| Set the values of a tensor with a given value
|
| NEFill
|
|
|
CLFill
|
|
|
Floor
| Round the value to the lowest number
|
| NEFloor
|
|
|
CLFloor
|
|
|
Permute
| Function to transpose an ND tensor.
|
- ANEURALNETWORKS_TRANSPOSE
| NEPermute
|
|
|
CLPermute
|
|
|
PixelWiseMultiplication
| Function to performe a multiplication.
|
| NEPixelWiseMultiplication
|
|
src0 | src1 | dst
|
---|
QASYMM8 | QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED | QASYMM8_SIGNED
| QSYMM16 | QSYMM16 | QASYMM16
| QSYMM16 | QSYMM16 | S32
| U8 | U8 | U8
| U8 | U8 | S16
| U8 | S16 | S16
| S16 | U8 | S16
| S16 | S16 | S16
| F16 | F16 | F16
| F32 | S32 | F32
|
|
CLPixelWiseMultiplication
|
|
src0 | src1 | dst
|
---|
QASYMM8 | QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED | QASYMM8_SIGNED
| QSYMM16 | QSYMM16 | QASYMM16
| QSYMM16 | QSYMM16 | S32
| U8 | U8 | U8
| U8 | U8 | S16
| U8 | S16 | S16
| S16 | U8 | S16
| S16 | S16 | S16
| F16 | F16 | F16
| F32 | S32 | F32
|
|
PoolingLayer
| Function to performe pooling with the specified pooling operation.
|
- ANEURALNETWORKS_AVERAGE_POOL_2D
- ANEURALNETWORKS_L2_POOL_2D
- ANEURALNETWORKS_MAX_POOL_2D
| NEPoolingLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| F16 | F16
| F32 | F32
|
|
CLPoolingLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| F16 | F16
| F32 | F32
|
|
PReluLayer
| Function to compute the activation layer with the PRELU activation function.
|
| NEPReluLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| F16 | F16
| F32 | F32
|
|
CLPReluLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| F16 | F16
| F32 | F32
|
|
QuantizationLayer
| Function to perform quantization layer
|
| NEQuantizationLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8 | QASYMM8_SIGNED
| QASYMM8 | QASYMM16
| QASYMM8_SIGNED | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| QASYMM8_SIGNED | QASYMM16
| F16 | QASYMM8
| F16 | QASYMM8_SIGNED
| F16 | QASYMM16
| F32 | QASYMM8
| F32 | QASYMM8_SIGNED
| F32 | QASYMM16
|
|
CLQuantizationLayer
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8 | QASYMM8_SIGNED
| QASYMM8 | QASYMM16
| QASYMM8_SIGNED | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| QASYMM8_SIGNED | QASYMM16
| F16 | QASYMM8
| F16 | QASYMM8_SIGNED
| F16 | QASYMM16
| F32 | QASYMM8
| F32 | QASYMM8_SIGNED
| F32 | QASYMM16
|
|
ReshapeLayer
| Fucntion to reshape a tensor
|
- ANEURALNETWORKS_RESHAPE
- ANEURALNETWORKS_SQUEEZE
| NEReshapeLayer
|
|
|
CLReshapeLayer
|
|
|
Scale
| Fucntion to perform resize a tensor using to interpolate: - Bilenear - Nearest neighbor
|
- ANEURALNETWORKS_RESIZE_BILINEAR
- ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
| NEScale
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| F16 | F16
| F32 | F32
| U8 | U8
| S16 | S16
|
|
CLScale
|
|
src | dst
|
---|
QASYMM8 | QASYMM8
| QASYMM8_SIGNED | QASYMM8_SIGNED
| F16 | F16
| F32 | F32
| U8 | U8
| S16 | S16
|
|
Slice
| Function to perform tensor slicing.
|
| NESlice
|
|
|
CLSlice
|
|
|
StridedSlice
| Function to extract a strided slice of a tensor.
|
- ANEURALNETWORKS_STRIDED_SLICE
| NEStridedSlice
|
|
|
CLStridedSlice
|
|
|
Transpose
| Function to transpose an 2D tensor.
|
- ANEURALNETWORKS_TRANSPOSE
| NETranspose
|
|
|
CLTranspose
|
|
|
*/
} // namespace