From c7b183ab741650653289f8ce3bdeb4926521fdbd Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 6 Mar 2020 18:12:09 +0000 Subject: COMPMID-3160: Add Bfloat16 support in NEGEMMConvolutionLayer Signed-off-by: Georgios Pinitas Change-Id: I0e449306c138a562ffc1455e76ec44b2fd059d85 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2860 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp') diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp index f82e7b4e47..5d8eae4866 100644 --- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp +++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp @@ -29,7 +29,7 @@ template<> void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const __fp16 *bias, Activation act, bool append) { const __fp16 *inptr = in; - __fp16 nullbias[24] = { 0 }; + __fp16 nullbias[24]; __fp16 minval = - static_cast<__fp16>(std::numeric_limits::infinity()); __fp16 maxval = static_cast<__fp16>(std::numeric_limits::infinity()); -- cgit v1.2.1