aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/TensorOperations.h
diff options
context:
space:
mode:
authorMoritz Pflanzer <moritz.pflanzer@arm.com>2017-07-21 17:19:58 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commitf6ad98a95cc4a638e133538ae682185032c16201 (patch)
tree7940632c316c141bc0bb2557578b09f86ca7ca73 /tests/validation/TensorOperations.h
parentf5d76f28b51e93447273d1f7fa7512b3e0a54166 (diff)
downloadComputeLibrary-f6ad98a95cc4a638e133538ae682185032c16201.tar.gz
COMPMID-415: Move SoftmaxLayer to new validation
Change-Id: I68bb359021256e67892e4fc00d436f9027a3bd07 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80942 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests/validation/TensorOperations.h')
-rw-r--r--tests/validation/TensorOperations.h79
1 files changed, 0 insertions, 79 deletions
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 359dfe8d03..5018bfdb2b 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -1606,85 +1606,6 @@ void roi_pooling_layer(const Tensor<T> &in, Tensor<T> &out, const std::vector<RO
}
}
-// Softmax Layer
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
-void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
-{
- const int cols = static_cast<int>(in.shape()[0]);
- const int upper_dims = in.shape().total_size() / cols;
- for(int r = 0; r < upper_dims; ++r)
- {
- // Find max
- T max = std::numeric_limits<T>::lowest();
- for(int c = 0; c < cols; ++c)
- {
- const T x = in[r * cols + c];
- if(x > max)
- {
- max = x;
- }
- }
-
- // Regularize
- T sum(0);
- for(int c = 0; c < cols; ++c)
- {
- const T res = exp(in[r * cols + c] - max);
- out[r * cols + c] = res;
- sum += res;
- }
-
- // Normalize
- const T norm_val = static_cast<T>(1) / sum;
- for(int c = 0; c < cols; ++c)
- {
- out[r * cols + c] *= norm_val;
- }
- }
-}
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
-void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
-{
- using namespace fixed_point_arithmetic;
- using promoted_T = typename test::traits::promote<T>::type;
-
- const int fixed_point_position = in.fixed_point_position();
- const int cols = static_cast<int>(in.shape()[0]);
- const int upper_dims = in.shape().total_size() / cols;
-
- for(int r = 0; r < upper_dims; ++r)
- {
- // Find max
- fixed_point<T> max(std::numeric_limits<T>::lowest(), fixed_point_position, true);
- for(int c = 0; c < cols; ++c)
- {
- const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
- if(x > max)
- {
- max = x;
- }
- }
-
- // Regularize
- fixed_point<promoted_T> sum(0, fixed_point_position);
- for(int c = 0; c < cols; ++c)
- {
- const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
- fixed_point<T> res = exp(x - max);
- out[r * cols + c] = res.raw();
- sum = add(sum, static_cast<fixed_point<promoted_T>>(res));
- }
-
- // Normalize
- fixed_point<T> sat_sum(sum);
- for(int c = 0; c < cols; ++c)
- {
- const fixed_point<T> x(out[r * cols + c], fixed_point_position, true);
- out[r * cols + c] = div(x, sat_sum).raw();
- }
- }
-}
-
// Fixed point operations
template <typename T>
void fixed_point_operation(const Tensor<T> &in, Tensor<T> &out, FixedPointOp op)