From c7b183ab741650653289f8ce3bdeb4926521fdbd Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 6 Mar 2020 18:12:09 +0000 Subject: COMPMID-3160: Add Bfloat16 support in NEGEMMConvolutionLayer Signed-off-by: Georgios Pinitas Change-Id: I0e449306c138a562ffc1455e76ec44b2fd059d85 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2860 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- arm_compute/runtime/NEON/functions/NEGEMM.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arm_compute/runtime/NEON/functions/NEGEMM.h') diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h index f08bd9fac5..c87e806d0c 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMM.h +++ b/arm_compute/runtime/NEON/functions/NEGEMM.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -74,7 +74,7 @@ public: * @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C]. * @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function. * - * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: F16/F32 + * @param[in] a First input tensor (Matrix A or Vector A). Data type supported: BLOAT16/F16/F32 * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a * @param[out] d Output tensor. Data type supported: same as @p a @@ -86,7 +86,7 @@ public: void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); /** Static function to check if given info will lead to a valid configuration of @ref NEGEMM. * - * @param[in] a First input tensor info (Matrix or Vector A). Data types supported: F16/F32 + * @param[in] a First input tensor info (Matrix or Vector A). Data types supported: BLOAT16/F16/F32 * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a. * @param[in] c Third input tensor info (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a. * @param[out] output Output tensor info. Data type supported: same as @p a -- cgit v1.2.1