aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Sujak <jakub.sujak@arm.com>2023-11-07 22:39:30 +0000
committerAnitha Raj <anitha.raj@arm.com>2023-11-10 14:48:40 +0000
commit3e7452ea8bb1634fc85f5ee49a9401ce310cf673 (patch)
tree3c66d8e0b7941d4944bdc39bb7b6798b3c96734c
parent0e1ccebfd52248fd8ead2614eaf45828d1fab340 (diff)
downloadComputeLibrary-3e7452ea8bb1634fc85f5ee49a9401ce310cf673.tar.gz
Update list of supported operators in documentation
Resolves: COMPMID-6633 Change-Id: I1e78df468876ec3569fa46597734e7de328b06f4 Signed-off-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10663 Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h20
-rw-r--r--arm_compute/runtime/OperatorList.h48
-rw-r--r--docs/user_guide/operator_list.dox92
3 files changed, 152 insertions, 8 deletions
diff --git a/arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h b/arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h
index 3473af1004..cdccc16a51 100644
--- a/arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h
+++ b/arm_compute/runtime/CL/functions/CLNormalizePlanarYUVLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CLNORMALIZEPLANARYUVLAYER_H
-#define ARM_COMPUTE_CLNORMALIZEPLANARYUVLAYER_H
+#ifndef ACL_ARM_COMPUTE_RUNTIME_CL_FUNCTIONS_CLNORMALIZEPLANARYUVLAYER_H
+#define ACL_ARM_COMPUTE_RUNTIME_CL_FUNCTIONS_CLNORMALIZEPLANARYUVLAYER_H
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
@@ -44,6 +44,18 @@ class CLNormalizePlanarYUVLayer : public ICLSimpleFunction
public:
/** Set the input and output tensors.
*
+ * Valid data layouts:
+ * - NHWC
+ * - NCHW
+ *
+ * Valid data type configurations:
+ * |src |dst |
+ * |:--------------|:--------------|
+ * |F32 |F32 |
+ * |F16 |F16 |
+ * |QASYMM8 |QASYMM8 |
+ * |QASYMM8_SIGNED |QASYMM8_SIGNED |
+ *
* @param[in] input Source tensor. 3 lower dimensions represent a single input with dimensions [width, height, channels].
* Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[out] output Destinationfeature tensor. Data type supported: same as @p input
@@ -82,4 +94,4 @@ public:
validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *std);
};
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CLNORMALIZEPLANARYUVLAYER_H */
+#endif // ACL_ARM_COMPUTE_RUNTIME_CL_FUNCTIONS_CLNORMALIZEPLANARYUVLAYER_H
diff --git a/arm_compute/runtime/OperatorList.h b/arm_compute/runtime/OperatorList.h
index 92b5079e7e..8bcdf5d3bf 100644
--- a/arm_compute/runtime/OperatorList.h
+++ b/arm_compute/runtime/OperatorList.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2022 Arm Limited.
+ * Copyright (c) 2021-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_OPERATOR_LIST_H
-#define ARM_COMPUTE_OPERATOR_LIST_H
+#ifndef ACL_ARM_COMPUTE_RUNTIME_OPERATORLIST_H
+#define ACL_ARM_COMPUTE_RUNTIME_OPERATORLIST_H
/** ActivationLayer
*
@@ -40,6 +40,16 @@
*
*/
+/** AddMulAdd
+ *
+ * Description:
+ * Performs a fused Add + Mul + Add [+ Relu-based-Activation] operation.
+ *
+ * Equivalent Android NNAPI Op:
+ * n/a
+ *
+ */
+
/** ArgMinMaxLayer
*
* Description:
@@ -647,6 +657,16 @@
*
*/
+/** MatMul
+ *
+ * Description:
+ * Computes a matrix multiplication in batches.
+ *
+ * Equivalent Android NNAPI Op:
+ * ANEURALNETWORKS_BATCH_MATMUL
+ *
+ */
+
/** MaxUnpoolingLayer
*
* Description:
@@ -677,6 +697,16 @@
*
*/
+/** NormalizePlanarYUVLayer
+ *
+ * Description:
+ * Function to compute normalization planar YUV layer.
+ *
+ * Equivalent Android NNAPI Op:
+ * n/a
+ *
+ */
+
/** PadLayer
*
* Description:
@@ -814,6 +844,16 @@
*
*/
+/** ReorderLayer
+ *
+ * Description:
+ * Reorders a tensor to a different weights format.
+ *
+ * Equivalent Android NNAPI Op:
+ * n/a
+ *
+ */
+
/** ReorgLayer
*
* Description:
@@ -1009,4 +1049,4 @@
*
*/
-#endif /* ARM_COMPUTE_OPERATOR_LIST_H */ \ No newline at end of file
+#endif // ACL_ARM_COMPUTE_RUNTIME_OPERATORLIST_H
diff --git a/docs/user_guide/operator_list.dox b/docs/user_guide/operator_list.dox
index 0c74214c73..25c856da10 100644
--- a/docs/user_guide/operator_list.dox
+++ b/docs/user_guide/operator_list.dox
@@ -109,6 +109,26 @@ where N = batches, C = channels, H = height, W = width, D = depth
<tr><td>F32<td>F32
</table>
<tr>
+ <td rowspan="1">AddMulAdd
+ <td rowspan="1" style="width:200px;"> Performs a fused Add + Mul + Add [+ Relu-based-Activation] operation.
+ <td rowspan="1">
+ <ul>
+ <li>n/a
+ </ul>
+ <td>NEAddMulAdd
+ <td>
+ <ul>
+ <li>Any
+ </ul>
+ <td>
+ <table>
+ <tr><th>input1<th>input2<th>bn_mul<th>bn_add<th>add_output<th>final_output
+ <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8<td>QASYMM8<td>QASYMM8<td>QASYMM8
+ <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
+ <tr><td>F16<td>F16<td>F16<td>F16<td>F16<td>F16
+ <tr><td>F32<td>F32<td>F32<td>F32<td>F32<td>F32
+ </table>
+<tr>
<td rowspan="2">ArgMinMaxLayer
<td rowspan="2" style="width:200px;"> Function to calculate the index of the minimum or maximum values in a tensor based on an axis.
<td rowspan="2">
@@ -2055,6 +2075,40 @@ where N = batches, C = channels, H = height, W = width, D = depth
<tr><td>QASYMM8<td>S32<td>QSYMM16<td>QASYMM8<td>QSYMM16<td>QASYMM8
</table>
<tr>
+ <td rowspan="2">MatMul
+ <td rowspan="2" style="width:200px;"> Computes a matrix multiplication in batches.
+ <td rowspan="2">
+ <ul>
+ <li>ANEURALNETWORKS_BATCH_MATMUL
+ </ul>
+ <td>NEMatMul
+ <td>
+ <ul>
+ <li>Any
+ </ul>
+ <td>
+ <table>
+ <tr><th>lhs<th>rhs<th>dst
+ <tr><td>F32<td>F32<td>F32
+ <tr><td>F16<td>F16<td>F16
+ <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
+ <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
+ </table>
+<tr>
+ <td>CLMatMul
+ <td>
+ <ul>
+ <li>All
+ </ul>
+ <td>
+ <table>
+ <tr><th>lhs<th>rhs<th>dst
+ <tr><td>F32<td>F32<td>F32
+ <tr><td>F16<td>F16<td>F16
+ <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
+ <tr><td>QASYMM8<td>QASYMM8<td>QASYMM8
+ </table>
+<tr>
<td rowspan="2">MaxUnpoolingLayer
<td rowspan="2" style="width:200px;"> Function to perform MaxUnpooling.
<td rowspan="2">
@@ -2155,6 +2209,27 @@ where N = batches, C = channels, H = height, W = width, D = depth
<tr><td>F16<td>F16
</table>
<tr>
+ <td rowspan="1">NormalizePlanarYUVLayer
+ <td rowspan="1" style="width:200px;"> Function to compute normalization planar YUV layer.
+ <td rowspan="1">
+ <ul>
+ <li>n/a
+ </ul>
+ <td>CLNormalizePlanarYUVLayer
+ <td>
+ <ul>
+ <li>NHWC
+ <li>NCHW
+ </ul>
+ <td>
+ <table>
+ <tr><th>src<th>dst
+ <tr><td>F32<td>F32
+ <tr><td>F16<td>F16
+ <tr><td>QASYMM8<td>QASYMM8
+ <tr><td>QASYMM8_SIGNED<td>QASYMM8_SIGNED
+ </table>
+<tr>
<td rowspan="2">PadLayer
<td rowspan="2" style="width:200px;"> Function to pad a tensor.
<td rowspan="2">
@@ -2582,6 +2657,23 @@ where N = batches, C = channels, H = height, W = width, D = depth
<tr><td>S32<td>S32
</table>
<tr>
+ <td rowspan="1">ReorderLayer
+ <td rowspan="1" style="width:200px;"> Reorders a tensor to a different weights format.
+ <td rowspan="1">
+ <ul>
+ <li>n/a
+ </ul>
+ <td>NEReorderLayer
+ <td>
+ <ul>
+ <li>NCHW
+ </ul>
+ <td>
+ <table>
+ <tr><th>src<th>dst
+ <tr><td>F32<td>F32
+ </table>
+<tr>
<td rowspan="2">ReorgLayer
<td rowspan="2" style="width:200px;"> Performs a reorganization layer of input tensor to the output tensor.
<td rowspan="2">