From 667e82f0bcb770ac76461a6e7733b160f6359c84 Mon Sep 17 00:00:00 2001 From: Jakub Sujak Date: Tue, 7 Nov 2023 22:39:30 +0000 Subject: Update list of supported operators in documentation Resolves: COMPMID-6633 Change-Id: I1e78df468876ec3569fa46597734e7de328b06f4 Signed-off-by: Jakub Sujak Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10663 Reviewed-by: Gunes Bayir Tested-by: Arm Jenkins Benchmark: Arm Jenkins Comments-Addressed: Arm Jenkins --- .../CL/functions/CLNormalizePlanarYUVLayer.h | 20 ++++- arm_compute/runtime/OperatorList.h | 48 ++++++++++- docs/user_guide/operator_list.dox | 92 ++++++++++++++++++++++ 3 files changed, 152 insertions(+), 8 deletions(-) diff --git a/arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h b/arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h index 3473af1004..cdccc16a51 100644 --- a/arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h +++ b/arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2020, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CLNORMALIZEPLANARYUVLAYER_H -#define ARM_COMPUTE_CLNORMALIZEPLANARYUVLAYER_H +#ifndef ACL_ARM_COMPUTE_RUNTIME_CL_FUNCTIONS_CLNORMALIZEPLANARYUVLAYER_H +#define ACL_ARM_COMPUTE_RUNTIME_CL_FUNCTIONS_CLNORMALIZEPLANARYUVLAYER_H #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/ICLSimpleFunction.h" @@ -43,6 +43,18 @@ class CLNormalizePlanarYUVLayer : public ICLSimpleFunction { public: /** Set the input and output tensors. + * + * Valid data layouts: + * - NHWC + * - NCHW + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F32 |F32 | + * |F16 |F16 | + * |QASYMM8 |QASYMM8 | + * |QASYMM8_SIGNED |QASYMM8_SIGNED | * * @param[in] input Source tensor. 3 lower dimensions represent a single input with dimensions [width, height, channels]. * Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32. @@ -82,4 +94,4 @@ public: validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std); }; } // namespace arm_compute -#endif /* ARM_COMPUTE_CLNORMALIZEPLANARYUVLAYER_H */ +#endif // ACL_ARM_COMPUTE_RUNTIME_CL_FUNCTIONS_CLNORMALIZEPLANARYUVLAYER_H diff --git a/arm_compute/runtime/OperatorList.h b/arm_compute/runtime/OperatorList.h index 92b5079e7e..8bcdf5d3bf 100644 --- a/arm_compute/runtime/OperatorList.h +++ b/arm_compute/runtime/OperatorList.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2022 Arm Limited. + * Copyright (c) 2021-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_OPERATOR_LIST_H -#define ARM_COMPUTE_OPERATOR_LIST_H +#ifndef ACL_ARM_COMPUTE_RUNTIME_OPERATORLIST_H +#define ACL_ARM_COMPUTE_RUNTIME_OPERATORLIST_H /** ActivationLayer * @@ -40,6 +40,16 @@ * */ +/** AddMulAdd + * + * Description: + * Performs a fused Add + Mul + Add [+ Relu-based-Activation] operation. + * + * Equivalent Android NNAPI Op: + * n/a + * + */ + /** ArgMinMaxLayer * * Description: @@ -647,6 +657,16 @@ * */ +/** MatMul + * + * Description: + * Computes a matrix multiplication in batches. + * + * Equivalent Android NNAPI Op: + * ANEURALNETWORKS_BATCH_MATMUL + * + */ + /** MaxUnpoolingLayer * * Description: @@ -677,6 +697,16 @@ * */ +/** NormalizePlanarYUVLayer + * + * Description: + * Function to compute normalization planar YUV layer. + * + * Equivalent Android NNAPI Op: + * n/a + * + */ + /** PadLayer * * Description: @@ -814,6 +844,16 @@ * */ +/** ReorderLayer + * + * Description: + * Reorders a tensor to a different weights format. + * + * Equivalent Android NNAPI Op: + * n/a + * + */ + /** ReorgLayer * * Description: @@ -1009,4 +1049,4 @@ * */ -#endif /* ARM_COMPUTE_OPERATOR_LIST_H */ \ No newline at end of file +#endif // ACL_ARM_COMPUTE_RUNTIME_OPERATORLIST_H diff --git a/docs/user_guide/operator_list.dox b/docs/user_guide/operator_list.dox index 0c74214c73..25c856da10 100644 --- a/docs/user_guide/operator_list.dox +++ b/docs/user_guide/operator_list.dox @@ -108,6 +108,26 @@ where N = batches, C = channels, H = height, W = width, D = depth F16F16 F32F32 + + AddMulAdd + Performs a fused Add + Mul + Add [+ Relu-based-Activation] operation. + +
    +
  • n/a +
+ NEAddMulAdd + +
    +
  • Any +
+ + +
input1input2bn_mulbn_addadd_outputfinal_output +
QASYMM8QASYMM8QASYMM8QASYMM8QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNED +
F16F16F16F16F16F16 +
F32F32F32F32F32F32 +
ArgMinMaxLayer Function to calculate the index of the minimum or maximum values in a tensor based on an axis. @@ -2054,6 +2074,40 @@ where N = batches, C = channels, H = height, W = width, D = depth src0 - src8src9 - src12src13src14dst0dst1 QASYMM8S32QSYMM16QASYMM8QSYMM16QASYMM8 + + MatMul + Computes a matrix multiplication in batches. + +
    +
  • ANEURALNETWORKS_BATCH_MATMUL +
+ NEMatMul + +
    +
  • Any +
+ + +
lhsrhsdst +
F32F32F32 +
F16F16F16 +
QASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNED +
QASYMM8QASYMM8QASYMM8 +
+ + CLMatMul + +
    +
  • All +
+ + +
lhsrhsdst +
F32F32F32 +
F16F16F16 +
QASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNED +
QASYMM8QASYMM8QASYMM8 +
MaxUnpoolingLayer Function to perform MaxUnpooling. @@ -2154,6 +2208,27 @@ where N = batches, C = channels, H = height, W = width, D = depth F32F32 F16F16 + + NormalizePlanarYUVLayer + Function to compute normalization planar YUV layer. + +
    +
  • n/a +
+ CLNormalizePlanarYUVLayer + +
    +
  • NHWC +
  • NCHW +
+ + +
srcdst +
F32F32 +
F16F16 +
QASYMM8QASYMM8 +
QASYMM8_SIGNEDQASYMM8_SIGNED +
PadLayer Function to pad a tensor. @@ -2581,6 +2656,23 @@ where N = batches, C = channels, H = height, W = width, D = depth F32F32 S32S32 + + ReorderLayer + Reorders a tensor to a different weights format. + +
    +
  • n/a +
+ NEReorderLayer + +
    +
  • NCHW +
+ + +
srcdst +
F32F32 +
ReorgLayer Performs a reorganization layer of input tensor to the output tensor. -- cgit v1.2.1