aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2020-01-08 11:33:44 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2020-01-14 10:55:58 +0000
commit9c700378f2227cb9d51455ed4a5086daaac5532a (patch)
tree53eb4acf5a9226e941e332b93db8ef260fb2d42b /tests
parentab709a0ea0f7f5c8e02c315afffc300e09c783a8 (diff)
downloadComputeLibrary-9c700378f2227cb9d51455ed4a5086daaac5532a.tar.gz
COMPMID-2769: Add support for QASYMM8_SIGNED in NEFullyConnectedLayer
Change-Id: I4c35c522375ae5a5de78716e079ebb9ffad15956 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/2581 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp14
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h11
-rw-r--r--tests/validation/reference/FullyConnectedLayer.cpp11
3 files changed, 29 insertions, 7 deletions
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index a7b837fedf..fae116aa9f 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,6 +53,7 @@ constexpr float tolerance_num_f16 = 0.07f;
/** Tolerance for quantized asymmetric operations */
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(1);
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
@@ -235,6 +236,17 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEFullyConnectedLayerQuantizedFixture<uint8_t>,
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END()
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(
+ combine(datasets::SmallFullyConnectedLayerDataset(),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ QuantizationData))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END()
TEST_SUITE_END()
TEST_SUITE_END()
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 0449d80de8..ff6ac17744 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class FullyConnectedLayerValidationGenericFixture : public framework::Fixture
{
public:
- using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
+ using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value || std::is_same<typename std::decay<T>::type, int8_t>::value, int32_t, T >::type;
public:
template <typename...>
@@ -71,11 +71,16 @@ protected:
template <typename U>
void fill(U &&tensor, int i)
{
- if(is_data_type_quantized_asymmetric(_data_type))
+ if(_data_type == DataType::QASYMM8)
{
std::uniform_int_distribution<uint8_t> distribution(0, 30);
library->fill(tensor, distribution, i);
}
+ else if(_data_type == DataType::QASYMM8_SIGNED)
+ {
+ std::uniform_int_distribution<int8_t> distribution(-15, 15);
+ library->fill(tensor, distribution, i);
+ }
else if(_data_type == DataType::S32)
{
std::uniform_int_distribution<int32_t> distribution(-50, 50);
diff --git a/tests/validation/reference/FullyConnectedLayer.cpp b/tests/validation/reference/FullyConnectedLayer.cpp
index 261c6453b9..9aecd6cf14 100644
--- a/tests/validation/reference/FullyConnectedLayer.cpp
+++ b/tests/validation/reference/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,7 +58,7 @@ void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &w
}
// Vector matrix multiply for quantized type
-template < typename T, typename TB, typename std::enable_if < std::is_same<T, uint8_t>::value &&std::is_same<TB, int32_t>::value, int >::type = 0 >
+template < typename T, typename TB, typename std::enable_if < (std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value) &&std::is_same<TB, int32_t>::value, int >::type = 0 >
void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, int offset_src, int offset_dst,
int cols_weights, int rows_weights)
{
@@ -83,6 +83,9 @@ void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &w
const float multiplier = input_scale * weights_scale / output_scale;
arm_compute::quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
+ const int min = std::numeric_limits<T>::lowest();
+ const int max = std::numeric_limits<T>::max();
+
for(int y = 0; y < rows_weights; ++y)
{
// Reset accumulator
@@ -97,7 +100,7 @@ void vector_matrix_multiply(const SimpleTensor<T> &src, const SimpleTensor<T> &w
acc += bias_ptr[y];
// Quantize down
- acc = quantize_down_scale_by_fixedpoint(acc, output_multiplier, output_shift, output_offset, 0, 255);
+ acc = quantize_down_scale_by_fixedpoint(acc, output_multiplier, output_shift, output_offset, min, max);
// Store the result
dst_ptr[y] = static_cast<T>(acc);
@@ -160,6 +163,8 @@ template SimpleTensor<half> fully_connected_layer(const SimpleTensor<half> &src,
QuantizationInfo out_quant_info);
template SimpleTensor<uint8_t> fully_connected_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &dst_shape,
QuantizationInfo out_quant_info);
+template SimpleTensor<int8_t> fully_connected_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &dst_shape,
+ QuantizationInfo out_quant_info);
} // namespace reference
} // namespace validation
} // namespace test