diff options
author | Moritz Pflanzer <moritz.pflanzer@arm.com> | 2017-07-21 17:36:33 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-09-17 14:16:42 +0100 |
commit | 572ade736ab344a62afa7da214cd9407fe53a281 (patch) | |
tree | adc0b31c0e236b65822dcbc9fb45ce401cc6ead4 /tests/validation/TensorOperations.h | |
parent | 8e6faf1e9f1af7a03441612c30644776e87fd235 (diff) | |
download | ComputeLibrary-572ade736ab344a62afa7da214cd9407fe53a281.tar.gz |
COMPMID-415: Move ActivationLayer to new validation
Change-Id: I38ce20d95640f9c1baf699a095c35e592ad4339f
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/81115
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests/validation/TensorOperations.h')
-rw-r--r-- | tests/validation/TensorOperations.h | 103 |
1 files changed, 0 insertions, 103 deletions
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h index b472e3d5cf..db145c19ad 100644 --- a/tests/validation/TensorOperations.h +++ b/tests/validation/TensorOperations.h @@ -935,109 +935,6 @@ void threshold(const Tensor<T> &in, Tensor<T> &out, uint8_t threshold, uint8_t f } } -// Activation Layer for floating point type -template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr> -void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info) -{ - const T a = static_cast<T>(act_info.a()); - const T b = static_cast<T>(act_info.b()); - - for(int i = 0; i < in.num_elements(); ++i) - { - T x = in[i]; - switch(act_info.activation()) - { - case ActivationLayerInfo::ActivationFunction::ABS: - out[i] = std::abs(x); - break; - case ActivationLayerInfo::ActivationFunction::LINEAR: - out[i] = a * x + b; - break; - case ActivationLayerInfo::ActivationFunction::LOGISTIC: - out[i] = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-x)); - break; - case ActivationLayerInfo::ActivationFunction::RELU: - out[i] = std::max(static_cast<T>(0), x); - break; - case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: - out[i] = std::min<T>(a, std::max(static_cast<T>(0), x)); - break; - case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - out[i] = (x > 0) ? x : a * x; - break; - case ActivationLayerInfo::ActivationFunction::SOFT_RELU: - out[i] = std::log(static_cast<T>(1) + std::exp(x)); - break; - case ActivationLayerInfo::ActivationFunction::SQRT: - out[i] = std::sqrt(x); - break; - case ActivationLayerInfo::ActivationFunction::SQUARE: - out[i] = x * x; - break; - case ActivationLayerInfo::ActivationFunction::TANH: - out[i] = a * std::tanh(b * x); - break; - default: - ARM_COMPUTE_ERROR("Activation function not recognised"); - break; - } - } -} - -// Activation Layer for fixed point type -template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr> -void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info) -{ - using namespace fixed_point_arithmetic; - int fixed_point_position = in.fixed_point_position(); - ActivationLayerInfo::ActivationFunction act_func = act_info.activation(); - const fixed_point<T> a(act_info.a(), fixed_point_position); - const fixed_point<T> b(act_info.b(), fixed_point_position); - const fixed_point<T> const_0(0, fixed_point_position); - const fixed_point<T> const_1(1, fixed_point_position); - - for(int i = 0; i < in.num_elements(); ++i) - { - fixed_point<T> x(in[i], fixed_point_position, true); - switch(act_func) - { - case ActivationLayerInfo::ActivationFunction::ABS: - out[i] = abs(x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LINEAR: - out[i] = add(b, mul(a, x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LOGISTIC: - out[i] = (const_1 / (const_1 + exp(-x))).raw(); - break; - case ActivationLayerInfo::ActivationFunction::RELU: - out[i] = max(const_0, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: - out[i] = min(a, max(const_0, x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - out[i] = (x > const_0) ? x.raw() : mul(a, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SOFT_RELU: - out[i] = log(const_1 + exp(x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SQRT: - out[i] = (const_1 / inv_sqrt(x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SQUARE: - out[i] = mul(x, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::TANH: - out[i] = mul(a, tanh(mul(b, x))).raw(); - break; - default: - ARM_COMPUTE_ERROR("Activation function not recognised"); - break; - } - } -} - // Batch Normalization Layer for fixed point type template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr> void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position) |