From df24618b53cffed1c574e11e9fd4ba7740f8c009 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Mon, 3 Jul 2017 16:25:09 +0100 Subject: COMPMID-421: Added FP16 suppot to NENormalizationLayer and NEPixelWiseMultiplication. Change-Id: If174f8071502fc5cc94b27cd44a9b1d5e451a9e2 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/79553 Tested-by: Kaizen Reviewed-by: Georgios Pinitas --- arm_compute/runtime/NEON/functions/NENormalizationLayer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arm_compute/runtime') diff --git a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h index 3202867c43..4cfea226f3 100644 --- a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h @@ -52,7 +52,7 @@ public: /** Set the input and output tensors. * * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM], - * and an optional 4th dimension for batch of inputs. Data type supported: QS8/F32 + * and an optional 4th dimension for batch of inputs. Data type supported: QS8/F16/F32 * @param[out] output Destination with the same dimensions, data type and number of channels of @p input * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters. */ -- cgit v1.2.1