aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/TensorOperations.h
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/TensorOperations.h')
-rw-r--r--tests/validation/TensorOperations.h103
1 files changed, 0 insertions, 103 deletions
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index b472e3d5cf..db145c19ad 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -935,109 +935,6 @@ void threshold(const Tensor<T> &in, Tensor<T> &out, uint8_t threshold, uint8_t f
}
}
-// Activation Layer for floating point type
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
-void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
-{
- const T a = static_cast<T>(act_info.a());
- const T b = static_cast<T>(act_info.b());
-
- for(int i = 0; i < in.num_elements(); ++i)
- {
- T x = in[i];
- switch(act_info.activation())
- {
- case ActivationLayerInfo::ActivationFunction::ABS:
- out[i] = std::abs(x);
- break;
- case ActivationLayerInfo::ActivationFunction::LINEAR:
- out[i] = a * x + b;
- break;
- case ActivationLayerInfo::ActivationFunction::LOGISTIC:
- out[i] = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-x));
- break;
- case ActivationLayerInfo::ActivationFunction::RELU:
- out[i] = std::max(static_cast<T>(0), x);
- break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- out[i] = std::min<T>(a, std::max(static_cast<T>(0), x));
- break;
- case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- out[i] = (x > 0) ? x : a * x;
- break;
- case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
- out[i] = std::log(static_cast<T>(1) + std::exp(x));
- break;
- case ActivationLayerInfo::ActivationFunction::SQRT:
- out[i] = std::sqrt(x);
- break;
- case ActivationLayerInfo::ActivationFunction::SQUARE:
- out[i] = x * x;
- break;
- case ActivationLayerInfo::ActivationFunction::TANH:
- out[i] = a * std::tanh(b * x);
- break;
- default:
- ARM_COMPUTE_ERROR("Activation function not recognised");
- break;
- }
- }
-}
-
-// Activation Layer for fixed point type
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
-void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
-{
- using namespace fixed_point_arithmetic;
- int fixed_point_position = in.fixed_point_position();
- ActivationLayerInfo::ActivationFunction act_func = act_info.activation();
- const fixed_point<T> a(act_info.a(), fixed_point_position);
- const fixed_point<T> b(act_info.b(), fixed_point_position);
- const fixed_point<T> const_0(0, fixed_point_position);
- const fixed_point<T> const_1(1, fixed_point_position);
-
- for(int i = 0; i < in.num_elements(); ++i)
- {
- fixed_point<T> x(in[i], fixed_point_position, true);
- switch(act_func)
- {
- case ActivationLayerInfo::ActivationFunction::ABS:
- out[i] = abs(x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LINEAR:
- out[i] = add(b, mul(a, x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LOGISTIC:
- out[i] = (const_1 / (const_1 + exp(-x))).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::RELU:
- out[i] = max(const_0, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- out[i] = min(a, max(const_0, x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
- out[i] = (x > const_0) ? x.raw() : mul(a, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
- out[i] = log(const_1 + exp(x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SQRT:
- out[i] = (const_1 / inv_sqrt(x)).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::SQUARE:
- out[i] = mul(x, x).raw();
- break;
- case ActivationLayerInfo::ActivationFunction::TANH:
- out[i] = mul(a, tanh(mul(b, x))).raw();
- break;
- default:
- ARM_COMPUTE_ERROR("Activation function not recognised");
- break;
- }
- }
-}
-
// Batch Normalization Layer for fixed point type
template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)