diff options
author | Moritz Pflanzer <moritz.pflanzer@arm.com> | 2017-09-01 20:41:12 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:35:24 +0000 |
commit | a09de0c8b2ed0f1481502d3b023375609362d9e3 (patch) | |
tree | e34b56d9ca69b025d7d9b943cc4df59cd458f6cb /tests/validation/CPP/ActivationLayer.cpp | |
parent | 5280071b336d53aff94ca3a6c70ebbe6bf03f4c3 (diff) | |
download | ComputeLibrary-a09de0c8b2ed0f1481502d3b023375609362d9e3.tar.gz |
COMPMID-415: Rename and move tests
The boost validation is now "standalone" in validation_old and builds as
arm_compute_validation_old. The new validation builds now as
arm_compute_validation.
Change-Id: Ib93ba848a25680ac60afb92b461d574a0757150d
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86187
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/CPP/ActivationLayer.cpp')
-rw-r--r-- | tests/validation/CPP/ActivationLayer.cpp | 158 |
1 files changed, 158 insertions, 0 deletions
diff --git a/tests/validation/CPP/ActivationLayer.cpp b/tests/validation/CPP/ActivationLayer.cpp new file mode 100644 index 0000000000..fa393be5e1 --- /dev/null +++ b/tests/validation/CPP/ActivationLayer.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "ActivationLayer.h" + +#include "tests/validation/FixedPoint.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/half.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type> +SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info) +{ + // Create reference + SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + + // Compute reference + const T a(info.a()); + const T b(info.b()); + + for(int i = 0; i < src.num_elements(); ++i) + { + T x = src[i]; + + switch(info.activation()) + { + case ActivationLayerInfo::ActivationFunction::ABS: + dst[i] = std::abs(x); + break; + case ActivationLayerInfo::ActivationFunction::LINEAR: + dst[i] = a * x + b; + break; + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + dst[i] = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-x)); + break; + case ActivationLayerInfo::ActivationFunction::RELU: + dst[i] = std::max<T>(static_cast<T>(0), x); + break; + case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: + dst[i] = std::min<T>(a, std::max(static_cast<T>(0), x)); + break; + case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: + dst[i] = (x > 0) ? x : a * x; + break; + case ActivationLayerInfo::ActivationFunction::SOFT_RELU: + dst[i] = std::log(static_cast<T>(1) + std::exp(x)); + break; + case ActivationLayerInfo::ActivationFunction::SQRT: + dst[i] = std::sqrt(x); + break; + case ActivationLayerInfo::ActivationFunction::SQUARE: + dst[i] = x * x; + break; + case ActivationLayerInfo::ActivationFunction::TANH: + dst[i] = a * std::tanh(b * x); + break; + default: + ARM_COMPUTE_ERROR("Unsupported activation function"); + } + } + + return dst; +} + +template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type> +SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info) +{ + using namespace fixed_point_arithmetic; + + // Create reference + SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() }; + + // Compute reference + const int fixed_point_position = src.fixed_point_position(); + const fixed_point<T> a(info.a(), fixed_point_position); + const fixed_point<T> b(info.b(), fixed_point_position); + const fixed_point<T> const_0(0, fixed_point_position); + const fixed_point<T> const_1(1, fixed_point_position); + + for(int i = 0; i < src.num_elements(); ++i) + { + fixed_point<T> x(src[i], fixed_point_position, true); + + switch(info.activation()) + { + case ActivationLayerInfo::ActivationFunction::ABS: + dst[i] = abs(x).raw(); + break; + case ActivationLayerInfo::ActivationFunction::LINEAR: + dst[i] = add(b, mul(a, x)).raw(); + break; + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + dst[i] = (const_1 / (const_1 + exp(-x))).raw(); + break; + case ActivationLayerInfo::ActivationFunction::RELU: + dst[i] = max(const_0, x).raw(); + break; + case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: + dst[i] = min(a, max(const_0, x)).raw(); + break; + case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: + dst[i] = (x > const_0) ? x.raw() : mul(a, x).raw(); + break; + case ActivationLayerInfo::ActivationFunction::SOFT_RELU: + dst[i] = log(const_1 + exp(x)).raw(); + break; + case ActivationLayerInfo::ActivationFunction::SQRT: + dst[i] = (const_1 / inv_sqrt(x)).raw(); + break; + case ActivationLayerInfo::ActivationFunction::SQUARE: + dst[i] = mul(x, x).raw(); + break; + case ActivationLayerInfo::ActivationFunction::TANH: + dst[i] = mul(a, tanh(mul(b, x))).raw(); + break; + default: + ARM_COMPUTE_ERROR("Unsupported activation function"); + } + } + + return dst; +} + +template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info); +template SimpleTensor<half_float::half> activation_layer(const SimpleTensor<half_float::half> &src, ActivationLayerInfo info); +template SimpleTensor<qint8_t> activation_layer(const SimpleTensor<qint8_t> &src, ActivationLayerInfo info); +template SimpleTensor<qint16_t> activation_layer(const SimpleTensor<qint16_t> &src, ActivationLayerInfo info); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute |