From 8b2a7d3aa119e7f1d6a03690d05eb27c5d178b9f Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Tue, 11 Feb 2020 17:21:31 +0000 Subject: COMPMID-3101 Fuse activation with floating point elementwise operation layers in CL Signed-off-by: Giorgio Arena Change-Id: I1693f8664ba7c0dc8c076bbe7365cef1e667bd25 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2718 Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins --- tests/validation/reference/ActivationLayer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tests/validation/reference/ActivationLayer.h') diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h index f41e87123e..4585a9db10 100644 --- a/tests/validation/reference/ActivationLayer.h +++ b/tests/validation/reference/ActivationLayer.h @@ -82,7 +82,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a ret = x; break; case ActivationLayerInfo::ActivationFunction::HARD_SWISH: - ret = x * ((std::min(std::max((x + 3), 0.0f), 6.0f)) * 0.166666667f); + ret = x * ((std::min(std::max(static_cast(x + 3), static_cast(0.0f)), static_cast(6.0f))) * 0.166666667f); break; default: ARM_COMPUTE_ERROR("Unsupported activation function"); -- cgit v1.2.1