aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/ActivationLayer.h
diff options
context:
space:
mode:
authorRenato Arantes <renato.arantes@arm.com>2024-01-26 17:31:18 +0000
committerRenato Barros Arantes <renato.arantes@arm.com>2024-03-21 11:15:30 +0000
commit36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38 (patch)
tree0701d615ef30444b9d0789db691b59b81fd9e86e /tests/validation/reference/ActivationLayer.h
parentd2191150736dde66d79eb97e0c8ee506eef3c8fc (diff)
downloadComputeLibrary-36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38.tar.gz
[ONCPUML-1451] Add matmul kernel to enable bf16 to bf16 operations via PyTorch® autocast() function
The full range of tests must be added with [MLINFSW-482] epic due to the lack of reordering kernels implemented in Acl. Co-Authored-By: David Mansell <David.Mansell@arm.com> Change-Id: I820d316295a1ec94fdc89c37e4144a268f914c36 Signed-off-by: Renato Arantes <renato.arantes@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11169 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/reference/ActivationLayer.h')
-rw-r--r--tests/validation/reference/ActivationLayer.h23
1 files changed, 13 insertions, 10 deletions
diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h
index a813ba5037..7f896bd696 100644
--- a/tests/validation/reference/ActivationLayer.h
+++ b/tests/validation/reference/ActivationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022 Arm Limited.
+ * Copyright (c) 2017-2020,2022,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_ACTIVATION_LAYER_H
-#define ARM_COMPUTE_TEST_ACTIVATION_LAYER_H
+#ifndef ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H
+#define ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H
#include "tests/SimpleTensor.h"
#include "tests/validation/Helpers.h"
@@ -40,7 +40,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
{
T ret;
- switch(activation)
+ switch (activation)
{
case ActivationLayerInfo::ActivationFunction::ABS:
ret = std::abs(x);
@@ -61,13 +61,13 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
ret = std::min<T>(a, std::max<T>(b, x));
break;
case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- ret = (x > 0) ? x : a * x;
+ ret = x > static_cast<T>(0) ? x : static_cast<T>(a * x);
break;
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
ret = std::log(static_cast<T>(1) + std::exp(static_cast<double>(x)));
break;
case ActivationLayerInfo::ActivationFunction::ELU:
- ret = (x > 0) ? x : a * (std::exp(x) - static_cast<T>(1));
+ ret = x > static_cast<T>(0) ? x : static_cast<T>(a * (std::exp(x) - static_cast<T>(1)));
break;
case ActivationLayerInfo::ActivationFunction::SQRT:
ret = std::sqrt(x);
@@ -82,10 +82,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
ret = x;
break;
case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
- ret = x * ((std::min(std::max(static_cast<T>(x + 3), static_cast<T>(0.0f)), static_cast<T>(6.0f))) * 0.166666667f);
+ ret = x * ((std::min(std::max(static_cast<T>(x + 3), static_cast<T>(0.0f)), static_cast<T>(6.0f))) *
+ 0.166666667f);
break;
case ActivationLayerInfo::ActivationFunction::SWISH:
- ret = static_cast<T>(x) / (static_cast<T>(1) + std::exp(-a*x));
+ ret = static_cast<T>(x) / (static_cast<T>(1) + std::exp(-a * x));
break;
case ActivationLayerInfo::ActivationFunction::GELU:
ret = x * 0.5f * (1 + erf(x / std::sqrt(2.0f)));
@@ -99,9 +100,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a
}
template <typename T>
-SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info = QuantizationInfo());
+SimpleTensor<T> activation_layer(const SimpleTensor<T> &src,
+ ActivationLayerInfo info,
+ const QuantizationInfo &oq_info = QuantizationInfo());
} // namespace reference
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_ACTIVATION_LAYER_H */
+#endif // ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H