From 572ade736ab344a62afa7da214cd9407fe53a281 Mon Sep 17 00:00:00 2001 From: Moritz Pflanzer Date: Fri, 21 Jul 2017 17:36:33 +0100 Subject: COMPMID-415: Move ActivationLayer to new validation Change-Id: I38ce20d95640f9c1baf699a095c35e592ad4339f Reviewed-on: http://mpd-gerrit.cambridge.arm.com/81115 Reviewed-by: Anthony Barbier Tested-by: Kaizen --- tests/validation/TensorOperations.h | 103 ------------------------------------ 1 file changed, 103 deletions(-) (limited to 'tests/validation/TensorOperations.h') diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h index b472e3d5cf..db145c19ad 100644 --- a/tests/validation/TensorOperations.h +++ b/tests/validation/TensorOperations.h @@ -935,109 +935,6 @@ void threshold(const Tensor &in, Tensor &out, uint8_t threshold, uint8_t f } } -// Activation Layer for floating point type -template ::value, int>::type * = nullptr> -void activation_layer(const Tensor &in, Tensor &out, ActivationLayerInfo act_info) -{ - const T a = static_cast(act_info.a()); - const T b = static_cast(act_info.b()); - - for(int i = 0; i < in.num_elements(); ++i) - { - T x = in[i]; - switch(act_info.activation()) - { - case ActivationLayerInfo::ActivationFunction::ABS: - out[i] = std::abs(x); - break; - case ActivationLayerInfo::ActivationFunction::LINEAR: - out[i] = a * x + b; - break; - case ActivationLayerInfo::ActivationFunction::LOGISTIC: - out[i] = static_cast(1) / (static_cast(1) + std::exp(-x)); - break; - case ActivationLayerInfo::ActivationFunction::RELU: - out[i] = std::max(static_cast(0), x); - break; - case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: - out[i] = std::min(a, std::max(static_cast(0), x)); - break; - case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - out[i] = (x > 0) ? x : a * x; - break; - case ActivationLayerInfo::ActivationFunction::SOFT_RELU: - out[i] = std::log(static_cast(1) + std::exp(x)); - break; - case ActivationLayerInfo::ActivationFunction::SQRT: - out[i] = std::sqrt(x); - break; - case ActivationLayerInfo::ActivationFunction::SQUARE: - out[i] = x * x; - break; - case ActivationLayerInfo::ActivationFunction::TANH: - out[i] = a * std::tanh(b * x); - break; - default: - ARM_COMPUTE_ERROR("Activation function not recognised"); - break; - } - } -} - -// Activation Layer for fixed point type -template ::value, int>::type * = nullptr> -void activation_layer(const Tensor &in, Tensor &out, ActivationLayerInfo act_info) -{ - using namespace fixed_point_arithmetic; - int fixed_point_position = in.fixed_point_position(); - ActivationLayerInfo::ActivationFunction act_func = act_info.activation(); - const fixed_point a(act_info.a(), fixed_point_position); - const fixed_point b(act_info.b(), fixed_point_position); - const fixed_point const_0(0, fixed_point_position); - const fixed_point const_1(1, fixed_point_position); - - for(int i = 0; i < in.num_elements(); ++i) - { - fixed_point x(in[i], fixed_point_position, true); - switch(act_func) - { - case ActivationLayerInfo::ActivationFunction::ABS: - out[i] = abs(x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LINEAR: - out[i] = add(b, mul(a, x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LOGISTIC: - out[i] = (const_1 / (const_1 + exp(-x))).raw(); - break; - case ActivationLayerInfo::ActivationFunction::RELU: - out[i] = max(const_0, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: - out[i] = min(a, max(const_0, x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - out[i] = (x > const_0) ? x.raw() : mul(a, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SOFT_RELU: - out[i] = log(const_1 + exp(x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SQRT: - out[i] = (const_1 / inv_sqrt(x)).raw(); - break; - case ActivationLayerInfo::ActivationFunction::SQUARE: - out[i] = mul(x, x).raw(); - break; - case ActivationLayerInfo::ActivationFunction::TANH: - out[i] = mul(a, tanh(mul(b, x))).raw(); - break; - default: - ARM_COMPUTE_ERROR("Activation function not recognised"); - break; - } - } -} - // Batch Normalization Layer for fixed point type template ::value, int>::type * = nullptr> void batch_normalization_layer(const Tensor &in, Tensor &out, const Tensor &mean, const Tensor &var, const Tensor &beta, const Tensor &gamma, float epsilon, int fixed_point_position) -- cgit v1.2.1