From 4b3fba1850fdf84ba3f9a0c98acf3de672330b34 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 4 Jun 2019 17:31:46 +0100 Subject: COMPMID-2372: Add support for QASYMM8 for Tanh -Perform calculations in the floating point domain -Extends checks for Logistic as scale should be 1/256 and offset 0 Change-Id: I90ef4a042f053976936f5d28f8e09b54eec196a2 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/1287 Tested-by: Arm Jenkins Reviewed-by: Michalis Spyrou Comments-Addressed: Arm Jenkins --- tests/validation/reference/ActivationLayer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tests/validation/reference/ActivationLayer.h') diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h index cc43bc10a8..5beca7c76d 100644 --- a/tests/validation/reference/ActivationLayer.h +++ b/tests/validation/reference/ActivationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -87,7 +87,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a } template -SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info); +SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info = QuantizationInfo()); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1