From 1b6377b603c5833e52d704dc17db14f446ebc670 Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Mon, 9 Jan 2023 15:34:20 +0000 Subject: Add broadcast batched matmul validation cases Related to: COMPMID-5660 Signed-off-by: SiCong Li Change-Id: I2314c8b21acc638402c77080d59db2f3fed58fe2 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8911 Reviewed-by: Jakub Sujak Reviewed-by: Mohmun02 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- arm_compute/runtime/NEON/functions/NEGEMM.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arm_compute/runtime/NEON/functions/NEGEMM.h') diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h index 7ce2521148..db15923165 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMM.h +++ b/arm_compute/runtime/NEON/functions/NEGEMM.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2022 Arm Limited. + * Copyright (c) 2017-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -66,6 +66,8 @@ public: * @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C]. * @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function. * + * @note Batched GEMM only supports broadcasting cases where RHS rank < LHS rank but not the other way around + * * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: BFLOAT16/F16/F32 * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a -- cgit v1.2.1