aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormorgolock <pablo.tello@arm.com>2020-03-04 14:57:46 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2020-03-10 18:01:33 +0000
commit6b3865ad038d60a126fe1f90df815a480527a29f (patch)
tree5f363b89bde5fc0ca98cf12d2ef82fafe1cb0bd4
parentce3a7b27f80960e88415bb6cabbb75de2239cea8 (diff)
downloadComputeLibrary-6b3865ad038d60a126fe1f90df815a480527a29f.tar.gz
COMPMID-3080: Implement Hard-Swish activation in CL
Change-Id: I5ed35a5e0fba09791e4371b1a74108a1fdfed900 Signed-off-by: morgolock <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2848 Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/activation_float_helpers.h6
-rw-r--r--src/core/CL/cl_kernels/activation_quant_helpers.h9
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp10
-rw-r--r--src/core/Utils.cpp2
-rw-r--r--tests/validation/CL/ActivationLayer.cpp19
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp1
6 files changed, 33 insertions, 14 deletions
diff --git a/src/core/CL/cl_kernels/activation_float_helpers.h b/src/core/CL/cl_kernels/activation_float_helpers.h
index 0c82f83dbc..8590f25635 100644
--- a/src/core/CL/cl_kernels/activation_float_helpers.h
+++ b/src/core/CL/cl_kernels/activation_float_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,10 @@
#else // GPU_ARCH == GPU_ARCH_BIFROST
#define MLA(a, b, c) ((b) * (c) + (a))
#endif // GPU_ARCH == GPU_ARCH_BIFROST
+
+// Hard-Swish
+#define hard_swish_op(DATA_TYPE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))
+
// Logistic Activation
#define logistic_op(DATA_TYPE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))
diff --git a/src/core/CL/cl_kernels/activation_quant_helpers.h b/src/core/CL/cl_kernels/activation_quant_helpers.h
index 0e4eb2b32e..7eaf082df3 100644
--- a/src/core/CL/cl_kernels/activation_quant_helpers.h
+++ b/src/core/CL/cl_kernels/activation_quant_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,6 +48,11 @@ inline TYPE lu_brelu_op(TYPE x)
{
return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL);
}
+// Hard Swish Activation
+inline TYPE hard_swish_op(TYPE x)
+{
+ return (x * ((min(max((TYPE)(x + (TYPE)3.f), (TYPE)0.f), (TYPE)6.f)) * (TYPE)0.166666667f));
+}
#define ACTIVATION_OP2(op, x) op##_op(x)
#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x)
@@ -81,4 +86,4 @@ inline TYPE lu_brelu_op(TYPE x)
({ \
data = ACTIVATION_OP(act, data); \
})
-#endif /* defined(S1_VAL) && defined(S2_VAL) */ \ No newline at end of file
+#endif /* defined(S1_VAL) && defined(S2_VAL) */
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index 270eb78dcb..350929500b 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -54,7 +54,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
ActivationLayerInfo::ActivationFunction::LOGISTIC,
- ActivationLayerInfo::ActivationFunction::TANH
+ ActivationLayerInfo::ActivationFunction::TANH,
+ ActivationLayerInfo::ActivationFunction::HARD_SWISH
};
const DataType data_type = input->data_type();
const QuantizationInfo &oq_info = (output != nullptr) ? output->quantization_info() : input->quantization_info();
@@ -139,9 +140,10 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
float a_const = act_info.a();
float b_const = act_info.b();
- const ActivationLayerInfo::ActivationFunction f_act = act_info.activation();
- const bool is_quantized = is_data_type_quantized(dt);
- const bool perform_activation_in_float = (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) || (f_act == ActivationLayerInfo::ActivationFunction::TANH);
+ const ActivationLayerInfo::ActivationFunction f_act = act_info.activation();
+ const bool is_quantized = is_data_type_quantized(dt);
+ const bool perform_activation_in_float =
+ (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) || (f_act == ActivationLayerInfo::ActivationFunction::TANH) || (f_act == ActivationLayerInfo::ActivationFunction::HARD_SWISH);
// Set build options
CLBuildOptions build_opts;
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index fb86d78cb7..f5db6e1502 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -178,6 +178,8 @@ const std::string &arm_compute::string_from_activation_func(ActivationLayerInfo:
{ ActivationLayerInfo::ActivationFunction::SQUARE, "SQUARE" },
{ ActivationLayerInfo::ActivationFunction::TANH, "TANH" },
{ ActivationLayerInfo::ActivationFunction::IDENTITY, "IDENTITY" },
+ { ActivationLayerInfo::ActivationFunction::HARD_SWISH, "HARD_SWISH" }
+
};
return act_map[act];
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index 8b12b0b28b..9ebbedc3c4 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -73,6 +73,8 @@ AbsoluteTolerance<float> tolerance(ActivationLayerInfo::ActivationFunction activ
return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
case ActivationLayerInfo::ActivationFunction::TANH:
return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
+ case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
+ return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : epsilon);
default:
return AbsoluteTolerance<float>(epsilon);
}
@@ -222,12 +224,17 @@ TEST_SUITE_END() // Float
template <typename T>
using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
-const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), datasets::ActivationFunctionsQuantized()),
- framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
+const auto QuantizedActivationDataset8 = combine(combine(framework::dataset::make("InPlace", { false }),
+ concat(datasets::ActivationFunctionsQuantized(), framework::dataset::make("ActivationFunction", ActivationLayerInfo::ActivationFunction::HARD_SWISH))),
+ framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
+
+const auto QuantizedActivationDataset16 = combine(combine(framework::dataset::make("InPlace", { false }),
+ datasets::ActivationFunctionsQuantized()),
+ framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
framework::dataset::make("DataType",
DataType::QASYMM8)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
@@ -237,7 +244,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<uint8_t>, fra
}
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
framework::dataset::make("DataType",
DataType::QASYMM8_SIGNED)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 10.0f) })))
@@ -247,7 +254,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int8_t>, fram
}
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE(QSYMM16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset16),
framework::dataset::make("DataType",
DataType::QSYMM16)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
@@ -255,7 +262,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int16_t>, fra
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qsymm16);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset16),
framework::dataset::make("DataType",
DataType::QSYMM16)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index f9f7451ed7..de8003434f 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -242,7 +242,6 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ
ActivationLayerInfo::ActivationFunction::LOGISTIC,
ActivationLayerInfo::ActivationFunction::TANH
});
-const auto NeonActivationFunctionsDataset = concat(datasets::ActivationFunctions(), framework::dataset::make("ActivationFunction", ActivationLayerInfo::ActivationFunction::HARD_SWISH));
const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }),
concat(QuantizedActivationFunctionsDataset, framework::dataset::make("ActivationFunction", ActivationLayerInfo::ActivationFunction::HARD_SWISH))),