aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/CL/functions/CLMatMul.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime/CL/functions/CLMatMul.h')
-rw-r--r--arm_compute/runtime/CL/functions/CLMatMul.h22
1 files changed, 16 insertions, 6 deletions
diff --git a/arm_compute/runtime/CL/functions/CLMatMul.h b/arm_compute/runtime/CL/functions/CLMatMul.h
index 2af9a4a9a6..a11c1ed6a2 100644
--- a/arm_compute/runtime/CL/functions/CLMatMul.h
+++ b/arm_compute/runtime/CL/functions/CLMatMul.h
@@ -24,6 +24,8 @@
#ifndef ACL_ARM_COMPUTE_RUNTIME_CL_FUNCTIONS_CLMATMUL
#define ACL_ARM_COMPUTE_RUNTIME_CL_FUNCTIONS_CLMATMUL
+#include "arm_compute/core/ActivationLayerInfo.h"
+#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IFunction.h"
#include <memory>
@@ -83,21 +85,29 @@ public:
* @param[in] rhs Right-hand side tensor info containing the input weights as Matrix B. Data types supported: same as @p lhs.
* @param[out] dst Output tensor to store the result of the batched matrix multiplication. Data types supported: same as @p lhs.
* @param[in] matmul_info Contains MatMul operation information described in @ref MatMulInfo.
- * @param[in] settings Class containing flags for function level settings
+ * @param[in] settings Contains flags for function level settings
+ * @param[in] act_info (Optional) Contains activation function and lower and upper bound values for bounded activation functions.
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *rhs, ICLTensor *lhs, ICLTensor *dst, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings = GpuMatMulSettings{});
+ void configure(const CLCompileContext &compile_context, ICLTensor *rhs, ICLTensor *lhs, ICLTensor *dst, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings = GpuMatMulSettings{}, const
+ ActivationLayerInfo &act_info = ActivationLayerInfo{});
/** Initialise the kernel's inputs and output
*
* Similar to @ref CLMatMul::configure()
*/
- void configure(ICLTensor *lhs, ICLTensor *rhs, ICLTensor *dst, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings = GpuMatMulSettings{});
+ void configure(ICLTensor *lhs, ICLTensor *rhs, ICLTensor *dst, const MatMulInfo &matmul_info, const GpuMatMulSettings &settings = GpuMatMulSettings{}, const ActivationLayerInfo &act_info =
+ ActivationLayerInfo{});
/** Static function to check if given info will lead to a valid configuration of @ref CLMatMul.
*
- * Similar to @ref CLMatMul::configure()
*
- * @return a status
+ * @note All tensors must have the same data type.
+ *
+ * @param[in] lhs Left-hand side (Matrix A) tensor info. Data types supported: F16/F32/QASYMM8_SIGNED/QASYMM8.
+ * @param[in] rhs Right-hand side (Matrix B) tensor info. Data types supported: same as @p lhs.
+ * @param[out] output Output tensor info to store the result of the batched matrix multiplication. Data types supported: same as @p lhs.
+ * @param[in] matmul_info Contains MatMul operation information described in @ref MatMulInfo.
+ * @param[in] act_info (Optional) Contains activation function and lower and upper bound values for bounded activation functions.
*/
- static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulInfo &matmul_info);
+ static Status validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *output, const MatMulInfo &matmul_info, const ActivationLayerInfo &act_info = ActivationLayerInfo{});
// Inherited methods overridden:
void run() override;