From 36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38 Mon Sep 17 00:00:00 2001 From: Renato Arantes Date: Fri, 26 Jan 2024 17:31:18 +0000 Subject: =?UTF-8?q?[ONCPUML-1451]=20Add=20matmul=20kernel=20to=20enable=20?= =?UTF-8?q?bf16=20to=20bf16=20operations=20via=20PyTorch=C2=AE=20autocast(?= =?UTF-8?q?)=20function?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The full range of tests must be added with [MLINFSW-482] epic due to the lack of reordering kernels implemented in Acl. Co-Authored-By: David Mansell Change-Id: I820d316295a1ec94fdc89c37e4144a268f914c36 Signed-off-by: Renato Arantes Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11169 Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- tests/validation/reference/ActivationLayer.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'tests/validation/reference/ActivationLayer.h') diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h index a813ba5037..7f896bd696 100644 --- a/tests/validation/reference/ActivationLayer.h +++ b/tests/validation/reference/ActivationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, 2022 Arm Limited. + * Copyright (c) 2017-2020,2022,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_ACTIVATION_LAYER_H -#define ARM_COMPUTE_TEST_ACTIVATION_LAYER_H +#ifndef ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H +#define ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H #include "tests/SimpleTensor.h" #include "tests/validation/Helpers.h" @@ -40,7 +40,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a { T ret; - switch(activation) + switch (activation) { case ActivationLayerInfo::ActivationFunction::ABS: ret = std::abs(x); @@ -61,13 +61,13 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a ret = std::min(a, std::max(b, x)); break; case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - ret = (x > 0) ? x : a * x; + ret = x > static_cast(0) ? x : static_cast(a * x); break; case ActivationLayerInfo::ActivationFunction::SOFT_RELU: ret = std::log(static_cast(1) + std::exp(static_cast(x))); break; case ActivationLayerInfo::ActivationFunction::ELU: - ret = (x > 0) ? x : a * (std::exp(x) - static_cast(1)); + ret = x > static_cast(0) ? x : static_cast(a * (std::exp(x) - static_cast(1))); break; case ActivationLayerInfo::ActivationFunction::SQRT: ret = std::sqrt(x); @@ -82,10 +82,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a ret = x; break; case ActivationLayerInfo::ActivationFunction::HARD_SWISH: - ret = x * ((std::min(std::max(static_cast(x + 3), static_cast(0.0f)), static_cast(6.0f))) * 0.166666667f); + ret = x * ((std::min(std::max(static_cast(x + 3), static_cast(0.0f)), static_cast(6.0f))) * + 0.166666667f); break; case ActivationLayerInfo::ActivationFunction::SWISH: - ret = static_cast(x) / (static_cast(1) + std::exp(-a*x)); + ret = static_cast(x) / (static_cast(1) + std::exp(-a * x)); break; case ActivationLayerInfo::ActivationFunction::GELU: ret = x * 0.5f * (1 + erf(x / std::sqrt(2.0f))); @@ -99,9 +100,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a } template -SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info = QuantizationInfo()); +SimpleTensor activation_layer(const SimpleTensor &src, + ActivationLayerInfo info, + const QuantizationInfo &oq_info = QuantizationInfo()); } // namespace reference } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_ACTIVATION_LAYER_H */ +#endif // ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H -- cgit v1.2.1