From 69d3341e6903c1ea87c46e39d6d3e64b2a0d5b4e Mon Sep 17 00:00:00 2001 From: Moritz Pflanzer Date: Wed, 9 Aug 2017 11:45:15 +0100 Subject: COMPMID-415: Move FullyConnectedLayer to new validation Change-Id: I7f60d6fb484d3962b88874e1531cec734c11e416 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/83556 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- tests/validation/TensorOperations.h | 68 ------------------------------------- 1 file changed, 68 deletions(-) (limited to 'tests/validation/TensorOperations.h') diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h index c4884be7ca..f5be139dcf 100644 --- a/tests/validation/TensorOperations.h +++ b/tests/validation/TensorOperations.h @@ -58,52 +58,6 @@ struct is_floating_point { }; -template ::value, int>::type * = nullptr> -void vector_matrix_multiply(const T *in, const T *weights, const T *bias, T *out, int cols_weights, int rows_weights, uint8_t fixed_point_position) -{ - for(int x = 0; x < cols_weights; ++x) - { - T acc(0); - for(int y = 0; y < rows_weights; ++y) - { - acc += in[y] * weights[x + y * cols_weights]; - } - out[x] = acc + bias[x]; - } -} - -// Vector matrix multiply for fixed point type -template ::value, int>::type * = nullptr> -void vector_matrix_multiply(const T *in, const T *weights, const T *bias, T *out, int cols_weights, int rows_weights, uint8_t fixed_point_position) -{ - using namespace fixed_point_arithmetic; - using promoted_type = typename fixed_point_arithmetic::traits::promote::type; - - for(int x = 0; x < cols_weights; ++x) - { - // Reset accumulator - fixed_point acc(0, fixed_point_position); - - for(int y = 0; y < rows_weights; ++y) - { - const fixed_point i_value(in[y], fixed_point_position, true); - const fixed_point w_value(weights[x + y * cols_weights], fixed_point_position, true); - const fixed_point iw = i_value * w_value; - acc = iw + acc; - } - - // Get the bias - const fixed_point b(bias[x], fixed_point_position, true); - - // Convert back and accumulate the bias - fixed_point res(acc); - res = res + b; - - // Store the result - out[x] = res.raw(); - } -} - // Return a tensor element at a specified coordinate with different border modes template T tensor_elem_at(const Tensor &in, Coordinates coord, BorderMode border_mode, T constant_border_value) @@ -1117,28 +1071,6 @@ void batch_normalization_layer(const Tensor &in, Tensor &out, const Tensor } } -// Fully connected layer -template -void fully_connected_layer(const Tensor &in, const Tensor &weights, const Tensor &bias, Tensor &out) -{ - ARM_COMPUTE_ERROR_ON(weights.shape().x() != out.shape().x()); - ARM_COMPUTE_ERROR_ON(weights.shape().y() != in.shape().x() * in.shape().y() * in.shape().z()); - const int cols_weights = weights.shape().x(); - const int rows_weights = weights.shape().y(); - const int num_batches = in.shape().total_size() / rows_weights; - - for(int k = 0; k < num_batches; ++k) - { - vector_matrix_multiply(in.data() + k * rows_weights, - weights.data(), - bias.data(), - out.data() + k * cols_weights, - cols_weights, - rows_weights, - in.fixed_point_position()); - } -} - // Pooling layer template ::value, int>::type * = nullptr> void pooling_layer(const Tensor &in, Tensor &out, PoolingLayerInfo pool_info) -- cgit v1.2.1