From a09de0c8b2ed0f1481502d3b023375609362d9e3 Mon Sep 17 00:00:00 2001 From: Moritz Pflanzer Date: Fri, 1 Sep 2017 20:41:12 +0100 Subject: COMPMID-415: Rename and move tests The boost validation is now "standalone" in validation_old and builds as arm_compute_validation_old. The new validation builds now as arm_compute_validation. Change-Id: Ib93ba848a25680ac60afb92b461d574a0757150d Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86187 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- tests/validation_new/CPP/FullyConnectedLayer.cpp | 133 ----------------------- 1 file changed, 133 deletions(-) delete mode 100644 tests/validation_new/CPP/FullyConnectedLayer.cpp (limited to 'tests/validation_new/CPP/FullyConnectedLayer.cpp') diff --git a/tests/validation_new/CPP/FullyConnectedLayer.cpp b/tests/validation_new/CPP/FullyConnectedLayer.cpp deleted file mode 100644 index 7852dab27b..0000000000 --- a/tests/validation_new/CPP/FullyConnectedLayer.cpp +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (c) 2017 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "FullyConnectedLayer.h" - -#include "tests/validation_new/FixedPoint.h" -#include "tests/validation_new/half.h" - -#include - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -namespace reference -{ -namespace -{ -// Vector matrix multiply for floating point -template ::value, int>::type = 0> -void vector_matrix_multiply(const T *src, const T *weights, const T *bias, T *dst, int cols_weights, int rows_weights, uint8_t fixed_point_position) -{ - ARM_COMPUTE_UNUSED(fixed_point_position); - - for(int y = 0; y < rows_weights; ++y) - { - dst[y] = std::inner_product(src, src + cols_weights, weights, static_cast(0)) + bias[y]; - weights += cols_weights; - } -} - -// Vector matrix multiply for fixed point type -template ::value, int>::type = 0> -void vector_matrix_multiply(const T *src, const T *weights, const T *bias, T *dst, int cols_weights, int rows_weights, uint8_t fixed_point_position) -{ - using namespace fixed_point_arithmetic; - using promoted_type = fixed_point_arithmetic::traits::promote_t; - - for(int y = 0; y < rows_weights; ++y) - { - // Reset accumulator - fixed_point acc(0, fixed_point_position); - - for(int x = 0; x < cols_weights; ++x) - { - const fixed_point i_value(src[x], fixed_point_position, true); - const fixed_point w_value(weights[x], fixed_point_position, true); - acc = acc + i_value * w_value; - } - - // Get the bias - const fixed_point b(bias[y], fixed_point_position, true); - - // Convert back and accumulate the bias - fixed_point res(acc); - res = res + b; - - // Store the result - dst[y] = res.raw(); - - weights += cols_weights; - } -} -} // namespace - -template -SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape) -{ - // Create reference - SimpleTensor dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.fixed_point_position() }; - - // Sanity checks - const int num_batch_dimensions = std::max(0, static_cast(dst_shape.num_dimensions()) - 1); - const int num_input_dimensions = src.shape().num_dimensions() - num_batch_dimensions; - const unsigned int linear_input_size = src.shape().total_size_lower(num_input_dimensions); - - ARM_COMPUTE_UNUSED(num_batch_dimensions); - ARM_COMPUTE_UNUSED(num_input_dimensions); - ARM_COMPUTE_UNUSED(linear_input_size); - ARM_COMPUTE_ERROR_ON(weights.shape().x() != linear_input_size); - ARM_COMPUTE_ERROR_ON(weights.shape().y() != bias.shape().x()); - ARM_COMPUTE_ERROR_ON(weights.shape().y() != dst.shape().x()); - - // Compute reference - const int cols_weights = weights.shape().x(); - const int rows_weights = weights.shape().y(); - const int num_batches = dst_shape.total_size_upper(1); - - for(int k = 0; k < num_batches; ++k) - { - vector_matrix_multiply(src.data() + k * cols_weights, - weights.data(), - bias.data(), - dst.data() + k * rows_weights, - cols_weights, - rows_weights, - src.fixed_point_position()); - } - - return dst; -} - -template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape); -template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, - const TensorShape &dst_shape); -template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape); -template SimpleTensor fully_connected_layer(const SimpleTensor &src, const SimpleTensor &weights, const SimpleTensor &bias, const TensorShape &dst_shape); -} // namespace reference -} // namespace validation -} // namespace test -} // namespace arm_compute -- cgit v1.2.1