From b9626ab169a168a7c1ca57edd1996e1e80938bf1 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Mon, 13 May 2019 17:41:01 +0100 Subject: COMPMID-2243 ArgMinMaxLayer: support new datatypes Change-Id: I846e833e0c94090cbbdcd6aee6061cea8295f4f9 Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/1131 Reviewed-by: Giuseppe Rossini Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h') diff --git a/arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h b/arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h index 87d77a5e13..55b39e45ec 100644 --- a/arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h +++ b/arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h @@ -48,7 +48,7 @@ public: NEArgMinMaxLayer(std::shared_ptr memory_manager = nullptr); /** Set the input and output tensors. * - * @param[in] input Input source tensor. Data types supported: F16/F32. + * @param[in] input Input source tensor. Data types supported: QASYMM8/S32/F16/F32. * @param[in] axis Axis to find max/min index. * @param[out] output Output source tensor. Data types supported: U32. * @param[in] op Operation to perform: min or max @@ -56,7 +56,7 @@ public: void configure(ITensor *input, int axis, ITensor *output, const ReductionOperation &op); /** Static function to check if given info will lead to a valid configuration of @ref NEArgMinMaxLayer * - * @param[in] input Input source tensor info. Data types supported: F16/F32. + * @param[in] input Input source tensor info. Data types supported: QASYMM8/S32/F16/F32. * @param[in] axis Axis to find max/min index. * @param[in] output Output source tensor info. Data types supported: U32. * @param[in] op Operation to perform: min or max -- cgit v1.2.1