From 62687420901c12be609426f3cf9dee300d25746a Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Wed, 28 Apr 2021 10:58:49 +0100 Subject: Update operator list documentation. Part 2. All data type and data layout information for the operators are store in the function header files Signed-off-by: Teresa Charlin Change-Id: I30b564f7eda6bbd99bf3ad36ddb6639ac118eb8b Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/319829 Tested-by: bsgcomp Reviewed-by: Michele DiGiorgio Comments-Addressed: bsgcomp Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5531 Tested-by: Arm Jenkins Reviewed-by: SiCong Li Comments-Addressed: Arm Jenkins --- docs/09_operators_list.dox | 2156 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 1894 insertions(+), 262 deletions(-) (limited to 'docs/09_operators_list.dox') diff --git a/docs/09_operators_list.dox b/docs/09_operators_list.dox index 82a127bbd3..244f292f82 100644 --- a/docs/09_operators_list.dox +++ b/docs/09_operators_list.dox @@ -107,14 +107,1531 @@ where N = batches, C = channels, H = height, W = width F16F16 F32F32 + + ArgMinMaxLayer + Function to calculate the index of the minimum or maximum values in a tensor based on an axis. + +
    +
  • ANEURALNETWORKS_ARGMAX +
  • ANEURALNETWORKS_ARGMIN +
+ NEArgMinMaxLayer + +
    +
  • All +
+ + +
srcdst +
QASYMM8U32, S32 +
QASYMM8_SIGNEDU32, S32 +
S32U32, S32 +
F16U32, S32 +
F32U32, S32 +
+ + CLArgMinMaxLayer + +
    +
  • All +
+ + +
srcdst +
QASYMM8U32, S32 +
QASYMM8_SIGNEDU32, S32 +
S32U32, S32 +
F16U32, S32 +
F32U32, S32 +
+ + BatchNormalizationLayer + Function to perform batch normalization. + +
    +
  • n/a +
+ NEBatchNormalizationLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + CLBatchNormalizationLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + BatchToSpaceLayer + Batch to space transformation. + +
    +
  • ANEURALNETWORKS_BATCH_TO_SPACE_ND +
+ NEBatchToSpaceLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1dst +
Alls32All +
+ + CLBatchToSpaceLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1dst +
Alls32All +
+ + BitwiseAnd + Function to performe bitwise AND between 2 tensors. + +
    +
  • ANEURALNETWORKS_LOGICAL_AND +
+ NEBitwiseAnd + +
    +
  • All +
+ + +
srcdst +
U8U8 +
+ + CLBitwiseAnd + +
    +
  • All +
+ + +
srcdst +
U8U8 +
+ + BitwiseNot + Function to performe bitwise NOT. + +
    +
  • ANEURALNETWORKS_LOGICAL_NOT +
+ NEBitwiseNot + +
    +
  • All +
+ + +
srcdst +
U8U8 +
+ + CLBitwiseNot + +
    +
  • All +
+ + +
srcdst +
U8U8 +
+ + BitwiseOr + Function to performe bitwise OR between 2 tensors. + +
    +
  • ANEURALNETWORKS_LOGICAL_OR +
+ NEBitwiseOr + +
    +
  • All +
+ + +
srcdst +
U8U8 +
+ + CLBitwiseOr + +
    +
  • All +
+ + +
srcdst +
U8U8 +
+ + BitwiseXor + Function to performe bitwise XOR between 2 tensors. + +
    +
  • n/a +
+ NEBitwiseXor + +
    +
  • All +
+ + +
srcdst +
U8U8 +
+ + CLBitwiseXor + +
    +
  • All +
+ + +
srcdst +
U8U8 +
+ + BoundingBoxTransform + Transform proposal bounding boxes to target bounding box using bounding box deltas. + +
    +
  • n/a +
+ NEBoundingBoxTransform + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1dst +
QASYMM16QASYMM8QASYMM16 +
F16F16F16 +
F32F32F32 +
+ + CLBoundingBoxTransform + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1dst +
QASYMM16QASYMM8QASYMM16 +
F16F16F16 +
F32F32F32 +
+ + Cast + Function to cast a tensor. + +
    +
  • ANEURALNETWORKS_CAST +
+ NECast + +
    +
  • All +
+ + +
srcdst +
QASYMM8_SIGNEDS16, S32, F32, F16 +
QASYMM8U16, S16, S32, F32, F16 +
U8U16, S16, S32, F32, F16 +
U16U8, U32 +
S16QASYMM8_SIGNED, U8, S32 +
F16QASYMM8_SIGNED, QASYMM8, F32, S32, U8 +
S32QASYMM8_SIGNED, QASYMM8, F16, F32, U8 +
F32QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8 +
+ + CLCast + +
    +
  • All +
+ + +
srcdst +
U8S8, U16, S16, U32, S32, F16, F32 +
U16U8, S8, S16, U32, S32, F16, F32 +
S16U8, S8, U16, U32, S32, F16, F32 +
U32U8, S8, U16, S16, S32, F16, F32 +
S32U8, S8, U16, S16, U32, F16, F32 +
F16U8, S8, U16, S16, U32, F32 +
F32U8, S8, U16, S16, U32, F16 +
+ + ChannelShuffleLayer + Function to shuffle the channels of the input tensor. + +
    +
  • ANEURALNETWORKS_CHANNEL_SHUFFLE +
+ NEChannelShuffleLayer + +
    +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + CLChannelShuffleLayer + +
    +
  • NCHW +
+ + +
srcdst +
AllAll +
ConcatenateLayer Function to concatenate tensors along a given axis.
    -
  • ANEURALNETWORKS_CONCATENATION +
  • ANEURALNETWORKS_CONCATENATION +
+ NEConcatenateLayer + +
    +
  • All +
+ + +
srcdst +
QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNED +
F16F16 +
F32F32 +
+ + CLConcatenateLayer + +
    +
  • All +
+ + +
srcdst +
QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNED +
F16F16 +
F32F32 +
+ + ConvertFullyConnectedWeights + Function to tranpose the wieghts for the fully connected layer. + +
    +
  • n/a +
+ NEConvertFullyConnectedWeights + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + CLConvertFullyConnectedWeights + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + ConvolutionLayer + Function to compute a convolution layer. + +
    +
  • ANEURALNETWORKS_CONV_2D +
+ NEConvolutionLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
+ + CLConvolutionLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
+ + Copy + Function to copy a tensor. + +
    +
  • n/a +
+ NECopy + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + CLCopy + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + CropResize + Function to perform cropping and resizing. + +
    +
  • n/a +
+ NECropResize + +
    +
  • NHWC +
+ + +
src0src1src2dst +
AllF32F32F32 +
+ + CLCropResize + +
    +
  • NHWC +
+ + +
src0src1src2dst +
AllF32F32F32 +
+ + DeconvolutionLayer + Function to compute a deconvolution or tranpose convolution. + +
    +
  • ANEURALNETWORKS_TRANSPOSE_CONV_2D +
+ NEDeconvolutionLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
+ + CLDeconvolutionLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
+ + DepthConvertLayer + Performs a down-scaling depth conversion. + +
    +
  • n/a +
+ NEDepthConvertLayer + +
    +
  • All +
+ + +
srcdst +
QASYMM8F16, F32 +
U8U16, S16, S32 +
U16U8, U32 +
S16U8, S32 +
BFLOAT16F32 +
F16QASYMM8, F32 +
F32QASYMM8, F16, BFLOAT16 +
+ + CLDepthConvertLayer + +
    +
  • All +
+ + +
srcdst +
U8S8, U16, S16, U32, S32, F16, F32 +
U16U8, S8, S16, U32, S32, F16, F32 +
S16U8, S8, U16, U32, S32, F16, F32 +
U32U8, S8, U16, S16, S32, F16, F32 +
S32U8, S8, U16, S16, U32, F16, F32 +
F16U8, S8, U16, S16, U32, F32 +
F32U8, S8, U16, S16, U32, F16 +
+ + DepthToSpaceLayer + Depth to Space transformation. + +
    +
  • ANEURALNETWORKS_DEPTH_TO_SPACE +
+ NEDepthToSpaceLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + CLDepthToSpaceLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + DepthwiseConvolutionLayer + Function to perform depthwise separable convolution. + +
    +
  • ANEURALNETWORKS_DEPTHWISE_CONV_2D +
+ NEDepthwiseConvolutionLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
+ + CLDepthwiseConvolutionLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
+ + DequantizationLayer + Function to dequantize the values in a tensor. + +
    +
  • ANEURALNETWORKS_DEQUANTIZE +
+ NEDequantizationLayer + +
    +
  • All +
+ + +
srcdst +
QASYMM8F16, F32 +
QASYMM8_SIGNEDF16, F32 +
QSYMM8_PER_CHANNELF16, F32 +
QSYMM8F16, F32 +
QSYMM16F16, F32 +
+ + CLDequantizationLayer + +
    +
  • All +
+ + +
srcdst +
QASYMM8F16, F32 +
QASYMM8_SIGNEDF16, F32 +
QSYMM8_PER_CHANNELF16, F32 +
QSYMM8F16, F32 +
QSYMM16F16, F32 +
+ + DirectConvolutionLayer + Function to compute direct convolution. + +
    +
  • ANEURALNETWORKS_CONV_2D +
+ NEDirectConvolutionLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
+ + CLDirectConvolutionLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
+ + FFT1D + Fast Fourier Transform 1D. + +
    +
  • n/a +
+ NEFFT1D + +
    +
  • All +
+ + +
srcdst +
F32F32 +
+ + CLFFT1D + +
    +
  • All +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + FFT2D + Fast Fourier Transform 2D. + +
    +
  • n/a +
+ NEFFT2D + +
    +
  • All +
+ + +
srcdst +
F32F32 +
+ + CLFFT2D + +
    +
  • All +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + FFTConvolutionLayer + Fast Fourier Transform Convolution. + +
    +
  • ANEURALNETWORKS_CONV_2D +
+ NEFFTConvolutionLayer + +
    +
  • All +
+ + +
srcdst +
F32F32 +
+ + CLFFTConvolutionLayer + +
    +
  • All +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + Fill + Set the values of a tensor with a given value. + +
    +
  • ANEURALNETWORKS_FILL +
+ NEFill + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + CLFill + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + FillBorder + Function to . + +
    +
  • n/a +
+ NEFillBorder + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + CLFillBorder + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + FlattenLayer + Reshape a tensor to be 1D + +
    +
  • ANEURALNETWORKS_RESHAPE +
+ NEFlattenLayer + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + CLFlattenLayer + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + Floor + Round the value to the lowest number. + +
    +
  • ANEURALNETWORKS_FLOOR +
+ NEFloor + +
    +
  • All +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + CLFloor + +
    +
  • All +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + FullyConnectedLayer + Function to perform a fully connected / dense layer. + +
    +
  • ANEURALNETWORKS_FULLY_CONNECTED +
+ NEFullyConnectedLayerReshapeWeightsManaged + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
+ + CLFullyConnectedLayerReshapeWeightsManaged + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
+ + FuseBatchNormalization + Function to fuse the batch normalization node to a preceding convolution node. + +
    +
  • n/a +
+ NEFuseBatchNormalization + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + CLFuseBatchNormalization + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + Gather + Performs the Gather operation along the chosen axis. + +
    +
  • ANEURALNETWORKS_GATHER +
+ NEGather + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + CLGather + +
    +
  • All +
+ + +
srcdst +
AllAll +
+ + GEMM + General Matrix Multiplication. + +
    +
  • n/a +
+ NEGEMM + +
    +
  • All +
+ + +
src0src1src2dst +
F32F32F32F32 +
F16F16F16F16 +
BFLOAT16BFLOAT16BFLOAT16BFLOAT16 +
+ + CLGEMMReshapeRHSMatrixKernelManaged + +
    +
  • All +
+ + +
src0src1src2dst +
F32F32F32F32 +
F16F16F16F16 +
+ + GEMMConvolutionLayer + General Matrix Multiplication. + +
    +
  • ANEURALNETWORKS_CONV_2D +
+ NEConvolutionLayerReshapeWeights + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
BFLOAT16BFLOAT16BFLOAT16BFLOAT16 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
+ + CLConvolutionLayerReshapeWeights + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
+ + GEMMLowpMatrixMultiplyCore + General Matrix Multiplication. + +
    +
  • n/a +
+ NEGEMMLowpMatrixMultiplyCore + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8QSYMM8S32QASYMM8 +
QASYMM8QASYMM8S32S32 +
QASYMM8QSYMM8_PER_CHANNELS32S32 +
QASYMM8QSYMM8S32S32 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8S32QASYMM8_SIGNED +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32S32 +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32S32 +
QASYMM8_SIGNEDQSYMM8S32S32 +
+ + CLGEMMLowpMatrixMultiplyCore + +
    +
  • NHWC +
  • NCHW +
+ + +
src0src1src2dst +
QASYMM8QASYMM8S32QASYMM8 +
QASYMM8QSYMM8_PER_CHANNELS32QASYMM8 +
QASYMM8QSYMM8S32QASYMM8 +
QASYMM8QASYMM8S32S32 +
QASYMM8QSYMM8_PER_CHANNELS32S32 +
QASYMM8QSYMM8S32S32 +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32QASYMM8_SIGNED +
QASYMM8_SIGNEDQSYMM8S32QASYMM8_SIGNED +
QASYMM8_SIGNEDQASYMM8_SIGNEDS32S32 +
QASYMM8_SIGNEDQSYMM8_PER_CHANNELS32S32 +
QASYMM8_SIGNEDQSYMM8S32S32 +
+ + GenerateProposalsLayer + Function to generate proposals for a RPN (Region Proposal Network). + +
    +
  • ANEURALNETWORKS_GENERATE_PROPOSALS +
+ NEGenerateProposalsLayer + +
    +
  • All +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QSYMM8QSYMM16QASYMM8 +
+ + CLGenerateProposalsLayer + +
    +
  • All +
+ + +
src0src1src2dst +
F16F16F16F16 +
F32F32F32F32 +
QASYMM8QSYMM8QSYMM16QASYMM8 +
+ + InstanceNormalizationLayer + Function to perform a Instance normalization on a given axis. + +
    +
  • ANEURALNETWORKS_INSTANCE_NORMALIZATION +
+ NEInstanceNormalizationLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F16F16 +
F32F32 +
+ + CLInstanceNormalizationLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F16F16 +
F32F32 +
+ + L2NormalizeLayer + Function to perform a L2 normalization on a given axis. + +
    +
  • ANEURALNETWORKS_L2_NORMALIZATION +
+ NEL2NormalizeLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F16F16 +
F32F32 +
+ + CLL2NormalizeLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F16F16 +
F32F32 +
+ + LSTMLayer + Function to perform a single time step in a Long Short-Term Memory (LSTM) layer. + +
    +
  • ANEURALNETWORKS_LSTM +
+ NELSTMLayer + +
    +
  • All +
+ + +
src0 - src13dst0 - dst3 +
F16F16 +
F32F32 +
+ + CLLSTMLayer + +
    +
  • All +
+ + +
src0 - src13dst0 - dst3 +
F16F16 +
F32F32 +
+ + LSTMLayerQuantized + Function to perform quantized LSTM (Long Short-Term Memory) + +
    +
  • ANEURALNETWORKS_QUANTIZED_LSTM +
  • ANEURALNETWORKS_QUANTIZED_16BIT_LSTM +
+ NELSTMLayerQuantized + +
    +
  • All +
+ + +
src0 - src8src9 - src12src13src14dst0dst1 +
QASYMM8S32QSYMM16QASYMM8QSYMM16QASYMM8 +
+ + CLLSTMLayerQuantized + +
    +
  • All +
+ + +
src0 - src8src9 - src12src13src14dst0dst1 +
QASYMM8S32QSYMM16QASYMM8QSYMM16QASYMM8 +
+ + MaxUnpoolingLayer + Function to perform MaxUnpooling. + +
    +
  • n/a +
+ NEMaxUnpoolingLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNED +
F16F16 +
F32F32 +
+ + CLMaxUnpoolingLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNED +
F16F16 +
F32F32 +
+ + MeanStdDevNormalizationLayer + Function to execute mean and standard deviation normalization. + +
    +
  • n/a +
+ NEMeanStdDevNormalizationLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + CLMeanStdDevNormalizationLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + NormalizationLayer + Function to compute normalization layer. + +
    +
  • ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION +
+ NENormalizationLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + CLNormalizationLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
+ + PadLayer + Function to pad a tensor. + +
    +
  • ANEURALNETWORKS_PAD +
  • ANEURALNETWORKS_PAD_V2 +
+ NEPadLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + CLPadLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + Permute + Function to transpose an ND tensor. + +
    +
  • ANEURALNETWORKS_TRANSPOSE +
+ NEPermute + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + CLPermute + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
AllAll +
+ + PixelWiseMultiplication + Function to performe a multiplication. + +
    +
  • ANEURALNETWORKS_MUL +
+ NEPixelWiseMultiplication + +
    +
  • All +
+ + +
src0src1dst +
QASYMM8QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNED +
QSYMM16QSYMM16QASYMM16 +
QSYMM16QSYMM16S32 +
U8U8U8 +
U8U8S16 +
U8S16S16 +
S16U8S16 +
S16S16S16 +
F16F16F16 +
F32S32F32 +
+ + CLPixelWiseMultiplication + +
    +
  • All +
+ + +
src0src1dst +
QASYMM8QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNED +
QSYMM16QSYMM16QASYMM16 +
QSYMM16QSYMM16S32 +
U8U8U8 +
U8U8S16 +
U8S16S16 +
S16U8S16 +
S16S16S16 +
F16F16F16 +
F32S32F32 +
+ + PoolingLayer + Function to performe pooling with the specified pooling operation. + +
    +
  • ANEURALNETWORKS_AVERAGE_POOL_2D +
  • ANEURALNETWORKS_L2_POOL_2D +
  • ANEURALNETWORKS_MAX_POOL_2D
- NEConcatenateLayer + NEPoolingLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNED +
F16F16 +
F32F32 +
+ + CLPoolingLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNED +
F16F16 +
F32F32 +
+ + PReluLayer + Function to compute the activation layer with the PRELU activation function. + +
    +
  • ANEURALNETWORKS_PRELU +
+ NEPReluLayer
  • All @@ -128,7 +1645,7 @@ where N = batches, C = channels, H = height, W = width F32F32 - CLConcatenateLayer + CLPReluLayer
    • All @@ -142,13 +1659,224 @@ where N = batches, C = channels, H = height, W = width F32F32 - ConvertFullyConnectedWeights - Function to tranpose the wieghts for the fully connected layer. + PriorBoxLayer + Function to .
        -
      • None +
      • n/a
      - NEConvertFullyConnectedWeights + NEPriorBoxLayer + +
        +
      • NHWC +
      • NCHW +
      + + +
      src0src1dst +
      F32F32F32 +
      + + CLPriorBoxLayer + +
        +
      • NHWC +
      • NCHW +
      + + +
      src0src1dst +
      F32F32F32 +
      + + QLSTMLayer + Function to perform quantized LSTM (Long Short-Term Memory). + +
        +
      • ANEURALNETWORKS_QUANTIZED_LSTM +
      • ANEURALNETWORKS_QUANTIZED_16BIT_LSTM +
      + NEQLSTMLayer + +
        +
      • All +
      + + +
      src0src1 - src6src7 -src9src10src11dst0dst1 - dst2 +
      QASYMM8_SIGNEDQASYMM8S32QSYMM16QASYMM8_SIGNEDQSYMM16QASYMM8_SIGNED +
      + + CLQLSTMLayer + +
        +
      • All +
      + + +
      src0src1 - src6src7 -src9src10src11dst0dst1 - dst2 +
      QASYMM8_SIGNEDQASYMM8S32QSYMM16QASYMM8_SIGNEDQSYMM16QASYMM8_SIGNED +
      + + QuantizationLayer + Function to perform quantization layer + +
        +
      • ANEURALNETWORKS_QUANTIZE +
      + NEQuantizationLayer + +
        +
      • All +
      + + +
      srcdst +
      QASYMM8QASYMM8, QASYMM8_SIGNED, QASYMM16 +
      QASYMM8_SIGNEDQASYMM8, QASYMM8_SIGNED, QASYMM16 +
      F16QASYMM8, QASYMM8_SIGNED, QASYMM16 +
      F32QASYMM8, QASYMM8_SIGNED, QASYMM16 +
      + + CLQuantizationLayer + +
        +
      • All +
      + + +
      srcdst +
      QASYMM8QASYMM8, QASYMM8_SIGNED, QASYMM16 +
      QASYMM8_SIGNEDQASYMM8, QASYMM8_SIGNED, QASYMM16 +
      F16QASYMM8, QASYMM8_SIGNED, QASYMM16 +
      F32QASYMM8, QASYMM8_SIGNED, QASYMM16 +
      + + Range + Function to generates a sequence of numbers starting from START and extends by increments of 'STEP' up to but not including 'END'. + +
        +
      • n/a +
      + NERange + +
        +
      • All +
      + + +
      dst +
      U8 +
      S8 +
      U16 +
      S16 +
      U32 +
      S32 +
      F16 +
      F32 +
      + + CLRange + +
        +
      • All +
      + + +
      dst +
      U8 +
      S8 +
      QASYMM8 +
      U16 +
      S16 +
      U32 +
      S32 +
      F16 +
      F32 +
      + + ReduceMean + Function to performe reduce mean operation. + +
        +
      • ANEURALNETWORKS_MEAN +
      + NEReduceMean + +
        +
      • All +
      + + +
      srcdst +
      QASYMM8QASYMM8 +
      QASYMM8_SIGNEDQASYMM8_SIGNED +
      F16F16 +
      F32F32 +
      + + CLReduceMean + +
        +
      • All +
      + + +
      srcdst +
      QASYMM8QASYMM8 +
      QASYMM8_SIGNEDQASYMM8_SIGNED +
      F16F16 +
      F32F32 +
      + + ReductionOperation + Function to performe reduce with the following operations - ARG_IDX_MAX: Index of the max value - ARG_IDX_MIN: Index of the min value - MEAN_SUM: Mean of sum - PROD: Product - SUM_SQUARE: Sum of squares - SUM: Sum - MIN: Min - MAX: Max + +
        +
      • ANEURALNETWORKS_REDUCE_ALL +
      • ANEURALNETWORKS_REDUCE_ANY +
      • ANEURALNETWORKS_REDUCE_MAX +
      • ANEURALNETWORKS_REDUCE_MIN +
      • ANEURALNETWORKS_REDUCE_PROD +
      • ANEURALNETWORKS_REDUCE_SUM +
      + NEReductionOperation + +
        +
      • All +
      + + +
      srcdst +
      QASYMM8QASYMM8 +
      QASYMM8_SIGNEDQASYMM8_SIGNED +
      F16F16 +
      F32F32 +
      S32S32 +
      + + CLReductionOperation + +
        +
      • All +
      + + +
      srcdst +
      QASYMM8QASYMM8 +
      QASYMM8_SIGNEDQASYMM8_SIGNED +
      F16F16 +
      F32F32 +
      S32S32 +
      + + ReorgLayer + Performs a reorganization layer of input tensor to the output tensor. + +
        +
      • n/a +
      + NEReorgLayer
      • NHWC @@ -160,7 +1888,7 @@ where N = batches, C = channels, H = height, W = width AllAll - CLConvertFullyConnectedWeights + CLReorgLayer
        • NHWC @@ -172,13 +1900,14 @@ where N = batches, C = channels, H = height, W = width AllAll - Copy - Function to copy a tensor. + ReshapeLayer + Function to reshape a tensor.
            -
          • None +
          • ANEURALNETWORKS_RESHAPE +
          • ANEURALNETWORKS_SQUEEZE
          - NECopy + NEReshapeLayer
          • All @@ -189,7 +1918,7 @@ where N = batches, C = channels, H = height, W = width AllAll - CLCopy + CLReshapeLayer
            • All @@ -200,59 +1929,41 @@ where N = batches, C = channels, H = height, W = width AllAll - DequantizationLayer - Function to dequantize the values in a tensor + Reverse + Function to reverse tensor according to axis.
                -
              • ANEURALNETWORKS_DEQUANTIZE +
              • n/a
              - NEDequantizationLayer + NEReverse
              • All
              -
              srcdst -
              QASYMM8F16 -
              QASYMM8F32 -
              QASYMM8_SIGNEDF16 -
              QASYMM8_SIGNEDF32 -
              QSYMM8_PER_CHANNELF16 -
              QSYMM8_PER_CHANNELF32 -
              QSYMM8F16 -
              QSYMM8F32 -
              QSYMM16F16 -
              QSYMM16F32 +
              src0src1dst +
              AllU32All
              - CLDequantizationLayer + CLReverse
              • All
              -
              srcdst -
              QASYMM8F16 -
              QASYMM8F32 -
              QASYMM8_SIGNEDF16 -
              QASYMM8_SIGNEDF32 -
              QSYMM8_PER_CHANNELF16 -
              QSYMM8_PER_CHANNELF32 -
              QSYMM8F16 -
              QSYMM8F32 -
              QSYMM16F16 -
              QSYMM16F32 +
              src0src1dst +
              AllU32All
              - DirectConvolutionLayer - Function to + RNNLayer + Function to perform recurrent neural network layer.
                -
              • ANEURALNETWORKS_CONV_2D +
              • ANEURALNETWORKS_RNN
              - NEDirectConvolutionLayer + NERNNLayer
              • NHWC @@ -260,12 +1971,12 @@ where N = batches, C = channels, H = height, W = width
              -
              src0src1src2dst -
              F16F16F16F16 -
              F32F32F32F32 +
              src0src1src2src3dst0dst1 +
              F16F16F16F16F16F16 +
              F32F32F32F32F32F32
              - CLDirectConvolutionLayer + CLRNNLayer
              • NHWC @@ -273,135 +1984,152 @@ where N = batches, C = channels, H = height, W = width
              -
              src0src1src2dst -
              F16F16F16F16 -
              F32F32F32F32 -
              QASYMM8QASYMM8S32QASYMM8 -
              QASYMM8_SIGNEDQASYMM8_SIGNEDS32QASYMM8_SIGNED +
              src0src1src2src3dst0dst1 +
              F16F16F16F16F16F16 +
              F32F32F32F32F32F32
              - FFT1D - Fast Fourier Transform 1D + ROIAlignLayer + Function to perform ROI alignment.
                -
              • None +
              • ANEURALNETWORKS_ROI_ALIGN
              - NEFFT1D + NEROIAlignLayer
              • All
              -
              srcdst -
              F32F32 +
              src0src1dst +
              F16F16F16 +
              F32F32F32 +
              QASYMM8QASYMM16QASYMM8 +
              QASYMM8_SIGNEDQASYMM16QASYMM8_SIGNED
              - CLFFT1D + CLROIAlignLayer
              • All
              -
              srcdst -
              F32F32 -
              F16F16 +
              src0src1dst +
              F16F16F16 +
              F32F32F32 +
              QASYMM8QASYMM16QASYMM8 +
              QASYMM8_SIGNEDQASYMM16QASYMM8_SIGNED
              - FFT2D - Fast Fourier Transform 2D + ROIPoolingLayer + Function to perform ROI pooling.
                -
              • None +
              • ANEURALNETWORKS_ROI_POOLING
              - NEFFT2D + NEROIPoolingLayer
              • All
              -
              srcdst -
              F32F32 +
              src0src1dst +
              F32U16F32 +
              QASYMM8U16QASYMM8
              - CLFFT2D + CLROIPoolingLayer
              • All
              -
              srcdst -
              F32F32 -
              F16F16 +
              src0src1dst +
              F16U16F16 +
              F32U16F32 +
              QASYMM8U16QASYMM8
              - FFTConvolutionLayer - Fast Fourier Transform Convolution + Scale + Function to perform resize a tensor using to interpolate: - Bilinear - Nearest neighbor
                -
              • ANEURALNETWORKS_CONV_2D +
              • ANEURALNETWORKS_RESIZE_BILINEAR +
              • ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR
              - NEFFTConvolutionLayer + NEScale
                -
              • All +
              • NHWC +
              • NCHW
              srcdst +
              QASYMM8QASYMM8 +
              QASYMM8_SIGNEDQASYMM8_SIGNED +
              F16F16
              F32F32 +
              U8U8 +
              S16S16
              - CLFFTConvolutionLayer + CLScale
                -
              • All +
              • NHWC +
              • NCHW
              srcdst -
              F32F32 +
              QASYMM8QASYMM8 +
              QASYMM8_SIGNEDQASYMM8_SIGNED
              F16F16 +
              F32F32 +
              U8U8 +
              S16S16
              - Fill - Set the values of a tensor with a given value + Select + Function to select values from 2 tensors depending on an input tensor of booleans.
                -
              • ANEURALNETWORKS_FILL +
              • ANEURALNETWORKS_SELECT
              - NEFill + NESelect
              • All
              -
              srcdst -
              AllAll +
              src0src1src2dst +
              U8AllAllAll
              - CLFill + CLSelect
              • All
              -
              srcdst -
              AllAll +
              src0src1src2dst +
              U8AllAllAll
              - Floor - Round the value to the lowest number + Slice + Function to perform tensor slicing.
                -
              • ANEURALNETWORKS_FLOOR +
              • ANEURALNETWORKS_SLICE
              - NEFloor + NESlice
              • All @@ -409,11 +2137,10 @@ where N = batches, C = channels, H = height, W = width
                srcdst -
                F32F32 -
                F16F16 +
                AllAll
                - CLFloor + CLSlice
                • All @@ -421,17 +2148,16 @@ where N = batches, C = channels, H = height, W = width
                  srcdst -
                  F32F32 -
                  F16F16 +
                  AllAll
                  - Permute - Function to transpose an ND tensor. + SpaceToBatchLayer + Function to divide a tensor spatially.
                    -
                  • ANEURALNETWORKS_TRANSPOSE +
                  • ANEURALNETWORKS_SPACE_TO_BATCH_ND
                  - NEPermute + NESpaceToBatchLayer
                  • NHWC @@ -439,11 +2165,11 @@ where N = batches, C = channels, H = height, W = width
                  -
                  srcdst -
                  AllAll +
                  src0src1src2dst +
                  AllS32S32All
                  - CLPermute + CLSpaceToBatchLayer
                  • NHWC @@ -451,67 +2177,17 @@ where N = batches, C = channels, H = height, W = width
                  -
                  srcdst -
                  AllAll -
                  - - PixelWiseMultiplication - Function to performe a multiplication. - -
                    -
                  • ANEURALNETWORKS_MUL -
                  - NEPixelWiseMultiplication - -
                    -
                  • All -
                  - - -
                  src0src1dst -
                  QASYMM8QASYMM8QASYMM8 -
                  QASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNED -
                  QSYMM16QSYMM16QASYMM16 -
                  QSYMM16QSYMM16S32 -
                  U8U8U8 -
                  U8U8S16 -
                  U8S16S16 -
                  S16U8S16 -
                  S16S16S16 -
                  F16F16F16 -
                  F32S32F32 -
                  - - CLPixelWiseMultiplication - -
                    -
                  • All -
                  - - -
                  src0src1dst -
                  QASYMM8QASYMM8QASYMM8 -
                  QASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNED -
                  QSYMM16QSYMM16QASYMM16 -
                  QSYMM16QSYMM16S32 -
                  U8U8U8 -
                  U8U8S16 -
                  U8S16S16 -
                  S16U8S16 -
                  S16S16S16 -
                  F16F16F16 -
                  F32S32F32 +
                  src0src1src2dst +
                  AllS32S32All
                  - PoolingLayer - Function to performe pooling with the specified pooling operation. + SpaceToDepthLayer + Function to rearrange blocks of spatial data into depth.
                    -
                  • ANEURALNETWORKS_AVERAGE_POOL_2D -
                  • ANEURALNETWORKS_L2_POOL_2D -
                  • ANEURALNETWORKS_MAX_POOL_2D +
                  • ANEURALNETWORKS_SPACE_TO_DEPTH
                  - NEPoolingLayer + NESpaceToDepthLayer
                  • NHWC @@ -520,13 +2196,10 @@ where N = batches, C = channels, H = height, W = width
                    srcdst -
                    QASYMM8QASYMM8 -
                    QASYMM8_SIGNEDQASYMM8_SIGNED -
                    F16F16 -
                    F32F32 +
                    AllAll
                    - CLPoolingLayer + CLSpaceToDepthLayer
                    • NHWC @@ -535,19 +2208,16 @@ where N = batches, C = channels, H = height, W = width
                      srcdst -
                      QASYMM8QASYMM8 -
                      QASYMM8_SIGNEDQASYMM8_SIGNED -
                      F16F16 -
                      F32F32 +
                      AllAll
                      - PReluLayer - Function to compute the activation layer with the PRELU activation function. + Split + Function to split a tensor along a given axis.
                        -
                      • ANEURALNETWORKS_PRELU +
                      • ANEURALNETWORKS_SPLIT
                      - NEPReluLayer + NESplit
                      • All @@ -555,13 +2225,10 @@ where N = batches, C = channels, H = height, W = width
                        srcdst -
                        QASYMM8QASYMM8 -
                        QASYMM8_SIGNEDQASYMM8_SIGNED -
                        F16F16 -
                        F32F32 +
                        AllAll
                        - CLPReluLayer + CLSplit
                        • All @@ -569,19 +2236,16 @@ where N = batches, C = channels, H = height, W = width
                          srcdst -
                          QASYMM8QASYMM8 -
                          QASYMM8_SIGNEDQASYMM8_SIGNED -
                          F16F16 -
                          F32F32 +
                          AllAll
                          - QuantizationLayer - Function to perform quantization layer + StackLayer + Function to stack tensors along an axis.
                            -
                          • ANEURALNETWORKS_QUANTIZE +
                          • n/a
                          - NEQuantizationLayer + NEStackLayer
                          • All @@ -589,21 +2253,10 @@ where N = batches, C = channels, H = height, W = width
                            srcdst -
                            QASYMM8QASYMM8 -
                            QASYMM8QASYMM8_SIGNED -
                            QASYMM8QASYMM16 -
                            QASYMM8_SIGNEDQASYMM8 -
                            QASYMM8_SIGNEDQASYMM8_SIGNED -
                            QASYMM8_SIGNEDQASYMM16 -
                            F16QASYMM8 -
                            F16QASYMM8_SIGNED -
                            F16QASYMM16 -
                            F32QASYMM8 -
                            F32QASYMM8_SIGNED -
                            F32QASYMM16 +
                            AllAll
                            - CLQuantizationLayer + CLStackLayer
                            • All @@ -611,28 +2264,16 @@ where N = batches, C = channels, H = height, W = width
                              srcdst -
                              QASYMM8QASYMM8 -
                              QASYMM8QASYMM8_SIGNED -
                              QASYMM8QASYMM16 -
                              QASYMM8_SIGNEDQASYMM8 -
                              QASYMM8_SIGNEDQASYMM8_SIGNED -
                              QASYMM8_SIGNEDQASYMM16 -
                              F16QASYMM8 -
                              F16QASYMM8_SIGNED -
                              F16QASYMM16 -
                              F32QASYMM8 -
                              F32QASYMM8_SIGNED -
                              F32QASYMM16 +
                              AllAll
                              - ReshapeLayer - Fucntion to reshape a tensor + StridedSlice + Function to extract a strided slice of a tensor.
                                -
                              • ANEURALNETWORKS_RESHAPE -
                              • ANEURALNETWORKS_SQUEEZE +
                              • ANEURALNETWORKS_STRIDED_SLICE
                              - NEReshapeLayer + NEStridedSlice
                              • All @@ -643,7 +2284,7 @@ where N = batches, C = channels, H = height, W = width AllAll - CLReshapeLayer + CLStridedSlice
                                • All @@ -654,54 +2295,41 @@ where N = batches, C = channels, H = height, W = width AllAll - Scale - Fucntion to perform resize a tensor using to interpolate: - Bilenear - Nearest neighbor + Tile + Function to construct a tensor by tiling a given tensor.
                                    -
                                  • ANEURALNETWORKS_RESIZE_BILINEAR -
                                  • ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR +
                                  • ANEURALNETWORKS_TILE
                                  - NEScale + NETile
                                    -
                                  • NHWC -
                                  • NCHW +
                                  • All
                                  srcdst -
                                  QASYMM8QASYMM8 -
                                  QASYMM8_SIGNEDQASYMM8_SIGNED -
                                  F16F16 -
                                  F32F32 -
                                  U8U8 -
                                  S16S16 +
                                  AllAll
                                  - CLScale + CLTile
                                    -
                                  • NHWC -
                                  • NCHW +
                                  • All
                                  srcdst -
                                  QASYMM8QASYMM8 -
                                  QASYMM8_SIGNEDQASYMM8_SIGNED -
                                  F16F16 -
                                  F32F32 -
                                  U8U8 -
                                  S16S16 +
                                  AllAll
                                  - Slice - Function to perform tensor slicing. + Transpose + Function to transpose a 2D tensor.
                                    -
                                  • ANEURALNETWORKS_SLICE +
                                  • ANEURALNETWORKS_TRANSPOSE
                                  - NESlice + NETranspose
                                  • All @@ -712,7 +2340,7 @@ where N = batches, C = channels, H = height, W = width AllAll - CLSlice + CLTranspose
                                    • All @@ -723,13 +2351,13 @@ where N = batches, C = channels, H = height, W = width AllAll - StridedSlice - Function to extract a strided slice of a tensor. + Unstack + Function to unpack a rank-R tensor into rank-(R-1) tensors.
                                        -
                                      • ANEURALNETWORKS_STRIDED_SLICE +
                                      • n/a
                                      - NEStridedSlice + NEUnstack
                                      • All @@ -740,7 +2368,7 @@ where N = batches, C = channels, H = height, W = width AllAll - CLStridedSlice + CLUnstack
                                        • All @@ -751,32 +2379,36 @@ where N = batches, C = channels, H = height, W = width AllAll - Transpose - Function to transpose an 2D tensor. + WinogradConvolutionLayer + Function to do Winograd Convolution.
                                            -
                                          • ANEURALNETWORKS_TRANSPOSE +
                                          • ANEURALNETWORKS_CONV_2D
                                          - NETranspose + NEWinogradConvolutionLayer
                                            -
                                          • All +
                                          • NHWC +
                                          • NCHW
                                          -
                                          srcdst -
                                          AllAll +
                                          src0src1src2dst +
                                          F16F16F16F16 +
                                          F32F32F32F32
                                          - CLTranspose + CLWinogradConvolutionLayer
                                            -
                                          • All +
                                          • NHWC +
                                          • NCHW
                                          -
                                          srcdst -
                                          AllAll +
                                          src0src1src2dst +
                                          F16F16F16F16 +
                                          F32F32F32F32
                                          -- cgit v1.2.1