aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/ActivationLayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/NEON/ActivationLayer.cpp')
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp132
1 files changed, 114 insertions, 18 deletions
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 577603d07d..73f5de68ac 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,10 +23,13 @@
*/
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/Traits.h"
+#include "arm_compute/core/utils/StringUtils.h"
#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
#include "arm_compute/runtime/RuntimeContext.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
+#include "src/common/cpuinfo/CpuIsaInfo.h"
+#include "src/cpu/kernels/CpuActivationKernel.h"
#include "tests/NEON/Accessor.h"
#include "tests/PaddingCalculator.h"
#include "tests/datasets/ActivationFunctionsDataset.h"
@@ -37,7 +40,8 @@
#include "tests/validation/Validation.h"
#include "tests/validation/fixtures/ActivationLayerFixture.h"
-#include "support/Requires.h"
+#include "arm_compute/Acl.hpp"
+#include "support/AclRequires.h"
namespace arm_compute
{
@@ -65,14 +69,16 @@ RelativeTolerance<float> relative_tolerance(DataType data_type, ActivationLayerI
case ActivationLayerInfo::ActivationFunction::SQRT:
case ActivationLayerInfo::ActivationFunction::TANH:
case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
+ case ActivationLayerInfo::ActivationFunction::SWISH:
+ case ActivationLayerInfo::ActivationFunction::GELU:
switch(data_type)
{
case DataType::F16:
-#if defined(__ARM_FEATURE_SVE)
+#if defined(ENABLE_SVE)
return RelativeTolerance<float>(0.25f);
-#else // !defined(__ARM_FEATURE_SVE)
+#else // !defined(ENABLE_SVE)
return RelativeTolerance<float>(0.1f);
-#endif // defined(__ARM_FEATURE_SVE)
+#endif // defined(ENABLE_SVE)
default:
return RelativeTolerance<float>(0.05f);
}
@@ -80,11 +86,11 @@ RelativeTolerance<float> relative_tolerance(DataType data_type, ActivationLayerI
switch(data_type)
{
case DataType::F16:
-#if defined(__ARM_FEATURE_SVE)
+#if defined(ENABLE_SVE)
return RelativeTolerance<float>(0.9f);
-#else // !defined(__ARM_FEATURE_SVE)
+#else // !defined(ENABLE_SVE)
return RelativeTolerance<float>(0.01f);
-#endif // defined(__ARM_FEATURE_SVE)
+#endif // defined(ENABLE_SVE)
default:
return RelativeTolerance<float>(0.00001f);
}
@@ -107,15 +113,16 @@ AbsoluteTolerance<float> absolute_tolerance(DataType data_type, ActivationLayerI
case ActivationLayerInfo::ActivationFunction::LOGISTIC:
case ActivationLayerInfo::ActivationFunction::SQRT:
case ActivationLayerInfo::ActivationFunction::TANH:
+ case ActivationLayerInfo::ActivationFunction::SWISH:
case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
switch(data_type)
{
case DataType::F16:
-#if defined(__ARM_FEATURE_SVE)
+#if defined(ENABLE_SVE)
return AbsoluteTolerance<float>(0.25f);
-#else // !defined(__ARM_FEATURE_SVE)
+#else // !defined(ENABLE_SVE)
return AbsoluteTolerance<float>(0.01f);
-#endif // defined(__ARM_FEATURE_SVE)
+#endif // defined(ENABLE_SVE)
default:
return AbsoluteTolerance<float>(0.00001f);
}
@@ -123,11 +130,11 @@ AbsoluteTolerance<float> absolute_tolerance(DataType data_type, ActivationLayerI
switch(data_type)
{
case DataType::F16:
-#if defined(__ARM_FEATURE_SVE)
+#if defined(ENABLE_SVE)
return AbsoluteTolerance<float>(0.9f);
-#else // !defined(__ARM_FEATURE_SVE)
+#else // !defined(ENABLE_SVE)
return AbsoluteTolerance<float>(0.01f);
-#endif // defined(__ARM_FEATURE_SVE)
+#endif // defined(ENABLE_SVE)
default:
return AbsoluteTolerance<float>(0.00001f);
}
@@ -169,7 +176,8 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F32,
});
-const auto NeonActivationFunctionsDataset = concat(datasets::ActivationFunctions(), framework::dataset::make("ActivationFunction", ActivationLayerInfo::ActivationFunction::HARD_SWISH));
+const auto NeonActivationFunctionsDataset = concat(datasets::ActivationFunctions(),
+ framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::HARD_SWISH, ActivationLayerInfo::ActivationFunction::SWISH }));
/** Input data sets. */
const auto ActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), NeonActivationFunctionsDataset), framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
@@ -215,6 +223,48 @@ void test_float_sqrt_boundary_value()
TEST_SUITE(NEON)
TEST_SUITE(ActivationLayer)
+/** Test case for memory injection in @ref cpu::CpuWinogradConv2d.
+ *
+ * Configure the operator once and inject memory at run-time in multiple executions.
+ *
+ * Checks performed in order:
+ * - Both runs compute the same output
+ */
+TEST_CASE(ActivationAPI, framework::DatasetMode::ALL)
+{
+ acl::StatusCode err = acl::StatusCode::Success;
+
+ // Create context & Queue
+ acl::Context ctx(acl::Target::Cpu, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::Queue queue(ctx, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ // Create activation operator
+ acl::TensorDescriptor src_info({ 2, 3 }, acl::DataType::Float32);
+ acl::TensorDescriptor dst_info({ 2, 3 }, acl::DataType::Float32);
+ acl::ActivationDesc desc{ AclRelu, 6.f, 0.f, false };
+
+ acl::Activation act(ctx, src_info, dst_info, desc, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ // Create tensors and feed
+ acl::Tensor src(ctx, src_info, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+ acl::Tensor dst(ctx, dst_info, &err);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ acl::TensorPack pack(ctx);
+ err = pack.add(src, ACL_SRC);
+ err = pack.add(dst, ACL_DST);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+
+ // Execute operator
+ err = act.run(queue, pack);
+ ARM_COMPUTE_ASSERT(err == acl::StatusCode::Success);
+}
+
// *INDENT-OFF*
// clang-format off
DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
@@ -236,6 +286,49 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
bool is_valid = bool(NEActivationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), act_info));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
+
+DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL, concat(concat(
+ combine(framework::dataset::make("CpuExt", std::string("NEON")),
+ framework::dataset::make("DataType", { DataType::F32,
+ DataType::F16,
+ DataType::QASYMM8,
+ DataType::QASYMM8_SIGNED,
+ DataType::QSYMM16
+ })),
+ combine(framework::dataset::make("CpuExt", std::string("SVE")),
+ framework::dataset::make("DataType", { DataType::F32,
+ DataType::F16,
+ }))),
+ combine(framework::dataset::make("CpuExt", std::string("SVE2")),
+ framework::dataset::make("DataType", { DataType::QASYMM8,
+ DataType::QASYMM8_SIGNED,
+ DataType::QSYMM16
+ }))),
+ cpu_ext, data_type)
+{
+ using namespace cpu::kernels;
+
+ cpuinfo::CpuIsaInfo cpu_isa{};
+ cpu_isa.neon = (cpu_ext == "NEON");
+ cpu_isa.sve = (cpu_ext == "SVE");
+ cpu_isa.sve2 = (cpu_ext == "SVE2");
+ cpu_isa.fp16 = (data_type == DataType::F16);
+
+ const auto *selected_impl = CpuActivationKernel::get_implementation(ActivationDataTypeISASelectorData{data_type, CPUModel::GENERIC, cpu_isa,ActivationLayerInfo::ActivationFunction::BOUNDED_RELU}, cpu::KernelSelectionType::Preferred);
+
+ ARM_COMPUTE_ERROR_ON_NULLPTR(selected_impl);
+ std::string expected = lower_string(cpu_ext) + "_" + cpu_impl_dt(data_type) + "_activation";
+ if( data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED)
+ {
+#ifdef __aarch64__
+ expected = "neon_q8_activation_lut";
+#else // __aarch64__
+ expected = lower_string(cpu_ext) + "_" + cpu_impl_dt(data_type) + "_activation";
+#endif // __aarch64__
+ }
+ std::string actual = selected_impl->name;
+ ARM_COMPUTE_EXPECT_EQUAL(expected, actual, framework::LogLevel::ERRORS);
+}
// clang-format on
// *INDENT-ON*
@@ -316,9 +409,12 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int8_t>, fram
TEST_SUITE_END() // QASYMM8_SIGNED
/** Input data sets. */
-const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LOGISTIC,
- ActivationLayerInfo::ActivationFunction::TANH
- });
+const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction",
+{
+ ActivationLayerInfo::ActivationFunction::LOGISTIC,
+ ActivationLayerInfo::ActivationFunction::TANH,
+ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
+});
const auto Int16QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), Int16QuantizedActivationFunctionsDataset),
framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));