aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/FullyConnectedLayer.cpp
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-02 09:13:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit014333d73883c3872e458cedda5ccef586a7ccd4 (patch)
tree0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/reference/FullyConnectedLayer.cpp
parentde01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff)
downloadComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/reference/FullyConnectedLayer.cpp')
-rw-r--r--tests/validation/reference/FullyConnectedLayer.cpp64
1 files changed, 9 insertions, 55 deletions
diff --git a/tests/validation/reference/FullyConnectedLayer.cpp b/tests/validation/reference/FullyConnectedLayer.cpp
index 5384715ace..3ef10eacea 100644
--- a/tests/validation/reference/FullyConnectedLayer.cpp
+++ b/tests/validation/reference/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,10 +44,8 @@ namespace
// Vector matrix multiply for floating point
template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
- int rows_weights, uint8_t fixed_point_position)
+ int rows_weights)
{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
const T *src_ptr = src.data() + offset_src;
const T *weights_ptr = weights.data();
const TB *bias_ptr = bias.data();
@@ -60,57 +58,16 @@ void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &w
}
}
-// Vector matrix multiply for fixed point type
-template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
-void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst, int cols_weights,
- int rows_weights, uint8_t fixed_point_position)
+// Vector matrix multiply for quantized type
+template < typename T, typename TB, typename std::enable_if < std::is_same<T, uint8_t>::value &&std::is_same<TB, int32_t>::value, int >::type = 0 >
+void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst,
+ int cols_weights, int rows_weights)
{
const T *src_ptr = src.data() + offset_src;
const T *weights_ptr = weights.data();
const TB *bias_ptr = bias.data();
T *dst_ptr = dst.data() + offset_dst;
- using namespace fixed_point_arithmetic;
- using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
-
- for(int y = 0; y < rows_weights; ++y)
- {
- // Reset accumulator
- fixed_point<promoted_type> acc(0, fixed_point_position);
-
- for(int x = 0; x < cols_weights; ++x)
- {
- const fixed_point<promoted_type> i_value(src_ptr[x], fixed_point_position, true);
- const fixed_point<promoted_type> w_value(weights_ptr[x], fixed_point_position, true);
- acc = acc + i_value * w_value;
- }
-
- // Get the bias
- const fixed_point<T> b(bias_ptr[y], fixed_point_position, true);
-
- // Convert back and accumulate the bias
- fixed_point<T> res(acc);
- res = res + b;
-
- // Store the result
- dst_ptr[y] = res.raw();
-
- weights_ptr += cols_weights;
- }
-}
-
-// Vector matrix multiply for quantized type
-template <>
-void vector_matrix_multiply(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &dst, int offset_src, int offset_dst,
- int cols_weights, int rows_weights, uint8_t fixed_point_position)
-{
- ARM_COMPUTE_UNUSED(fixed_point_position);
-
- const uint8_t *src_ptr = src.data() + offset_src;
- const uint8_t *weights_ptr = weights.data();
- const int32_t *bias_ptr = bias.data();
- uint8_t *dst_ptr = dst.data() + offset_dst;
-
const int input_offset = -src.quantization_info().offset;
const float input_scale = src.quantization_info().scale;
const int weights_offset = -weights.quantization_info().offset;
@@ -141,7 +98,7 @@ void vector_matrix_multiply(const SimpleTensor<uint8_t> &src, const SimpleTensor
acc = utility::clamp<int32_t>(acc, 0, 255);
// Store the result
- dst_ptr[y] = static_cast<uint8_t>(acc);
+ dst_ptr[y] = static_cast<T>(acc);
weights_ptr += cols_weights;
}
@@ -152,7 +109,7 @@ template <typename T, typename TB>
SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &dst_shape)
{
// Create reference
- SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
+ SimpleTensor<T> dst{ TensorShape{ dst_shape }, src.data_type(), 1, src.quantization_info() };
// Sanity checks
const int num_batch_dimensions = std::max(0, static_cast<int>(dst_shape.num_dimensions()) - 1);
@@ -183,8 +140,7 @@ SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTe
offset_in,
offset_out,
cols_weights,
- rows_weights,
- src.fixed_point_position());
+ rows_weights);
}
return dst;
@@ -192,8 +148,6 @@ SimpleTensor<T> fully_connected_layer(const SimpleTensor<T> &src, const SimpleTe
template SimpleTensor<float> fully_connected_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &weights, const SimpleTensor<float> &bias, const TensorShape &dst_shape);
template SimpleTensor<half> fully_connected_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &weights, const SimpleTensor<half> &bias, const TensorShape &dst_shape);
-template SimpleTensor<qint8_t> fully_connected_layer(const SimpleTensor<qint8_t> &src, const SimpleTensor<qint8_t> &weights, const SimpleTensor<qint8_t> &bias, const TensorShape &dst_shape);
-template SimpleTensor<qint16_t> fully_connected_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &dst_shape);
template SimpleTensor<uint8_t> fully_connected_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &dst_shape);
} // namespace reference
} // namespace validation