aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-12-02 19:01:25 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-12-04 12:44:28 +0000
commit6e1791b1bfabc81f08d3117939f6eb5264ed4edf (patch)
treeb984d58856ef9baa168bcf878659caddf599f623 /tests
parent5cb49dcf7ad74cc6e7e91790b7132ae4dd845515 (diff)
downloadComputeLibrary-6e1791b1bfabc81f08d3117939f6eb5264ed4edf.tar.gz
COMPMID-2764: Add support for QASYMM8_SIGNED in NEConvolutionLayer.
Change-Id: I8fbbd2e399f48968337a60147098d04f27c2d1c0 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/2402 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/Helpers.cpp35
-rw-r--r--tests/validation/Helpers.h8
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp13
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h11
-rw-r--r--tests/validation/reference/ActivationLayer.cpp11
-rw-r--r--tests/validation/reference/Convolution3d.h10
-rw-r--r--tests/validation/reference/ConvolutionLayer.cpp6
7 files changed, 71 insertions, 23 deletions
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index fef4510405..afefee77be 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -122,53 +122,53 @@ SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src)
}
template <>
-SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint16_t> &src)
+SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<int8_t> &src)
{
const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
SimpleTensor<float> dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
for(int i = 0; i < src.num_elements(); ++i)
{
- dst[i] = dequantize_qasymm16(src[i], quantization_info);
+ dst[i] = dequantize_qasymm8_signed(src[i], quantization_info);
}
return dst;
}
template <>
-SimpleTensor<uint8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
+SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint16_t> &src)
{
- SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, quantization_info };
- const UniformQuantizationInfo &qinfo = quantization_info.uniform();
+ const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
+ SimpleTensor<float> dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
for(int i = 0; i < src.num_elements(); ++i)
{
- dst[i] = quantize_qasymm8(src[i], qinfo);
+ dst[i] = dequantize_qasymm16(src[i], quantization_info);
}
return dst;
}
template <>
-SimpleTensor<int8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
+SimpleTensor<uint8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
{
- SimpleTensor<int8_t> dst{ src.shape(), DataType::QASYMM8_SIGNED, 1, quantization_info };
+ SimpleTensor<uint8_t> dst{ src.shape(), DataType::QASYMM8, 1, quantization_info };
const UniformQuantizationInfo &qinfo = quantization_info.uniform();
for(int i = 0; i < src.num_elements(); ++i)
{
- dst[i] = quantize_qasymm8_signed(src[i], qinfo);
+ dst[i] = quantize_qasymm8(src[i], qinfo);
}
return dst;
}
template <>
-SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<int8_t> &src)
+SimpleTensor<int8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
{
- const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
- SimpleTensor<float> dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
+ SimpleTensor<int8_t> dst{ src.shape(), DataType::QASYMM8_SIGNED, 1, quantization_info };
+ const UniformQuantizationInfo &qinfo = quantization_info.uniform();
for(int i = 0; i < src.num_elements(); ++i)
{
- dst[i] = dequantize_qasymm8_signed(src[i], quantization_info);
+ dst[i] = quantize_qasymm8_signed(src[i], qinfo);
}
return dst;
}
@@ -354,6 +354,15 @@ std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, flo
return std::pair<int, int> { min_bound, max_bound };
}
+std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &quant_info, float min, float max)
+{
+ ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
+
+ const int min_bound = quantize_qasymm8_signed(min, quant_info.uniform());
+ const int max_bound = quantize_qasymm8_signed(max, quant_info.uniform());
+ return std::pair<int, int> { min_bound, max_bound };
+}
+
std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id)
{
ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 3227a98b05..b481b52443 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -262,6 +262,14 @@ void zeros(SimpleTensor<T> &in, const Coordinates &anchor, const TensorShape &sh
*/
std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, float min, float max);
+/** Helper function to compute asymmetric quantized signed min and max bounds
+ *
+ * @param[in] quant_info Quantization info to be used for conversion
+ * @param[in] min Floating point minimum value to be quantized
+ * @param[in] max Floating point maximum value to be quantized
+ */
+std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &quant_info, float min, float max);
+
/** Helper function to compute symmetric quantized min and max bounds
*
* @param[in] quant_info Quantization info to be used for conversion
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index c2a0cb56a2..1d7805d024 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -462,6 +462,19 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>
}
TEST_SUITE_END() // QASYMM8
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ QuantizedActivationFunctionsDataset))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
TEST_SUITE(QSYMM8_PER_CHANNEL)
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedPerChannelFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerReducedDataset(),
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index c5cddc28db..3c4b625ac6 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -52,7 +52,9 @@ template <typename TensorType, typename AccessorType, typename FunctionType, typ
class ConvolutionValidationGenericFixture : public framework::Fixture
{
public:
- using TBias = typename std::conditional<std::is_same<typename std::decay<T>::type, uint8_t>::value, int32_t, T>::type;
+ using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value
+ || std::is_same<typename std::decay<T>::type, int8_t>::value,
+ int32_t, T >::type;
public:
template <typename...>
@@ -84,6 +86,13 @@ protected:
library->fill(tensor, distribution, i);
break;
}
+ case DataType::QASYMM8_SIGNED:
+ {
+ std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f);
+ std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second);
+ library->fill(tensor, distribution, i);
+ break;
+ }
case DataType::QSYMM8_PER_CHANNEL:
{
int min_bound = 128;
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index 6cdba09c75..7a699c5f86 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -66,6 +66,17 @@ SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src
}
template <>
+SimpleTensor<int8_t> activation_layer<int8_t>(const SimpleTensor<int8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+{
+ const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
+
+ SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
+ SimpleTensor<float> dst_tmp = activation_layer<float>(src_tmp, info);
+ SimpleTensor<int8_t> dst = convert_to_asymmetric<int8_t>(dst_tmp, dst_qinfo);
+ return dst;
+}
+
+template <>
SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
{
const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
diff --git a/tests/validation/reference/Convolution3d.h b/tests/validation/reference/Convolution3d.h
index 6ac5df93b3..6168f10741 100644
--- a/tests/validation/reference/Convolution3d.h
+++ b/tests/validation/reference/Convolution3d.h
@@ -24,6 +24,7 @@
#ifndef ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H
#define ARM_COMPUTE_TEST_VALIDATION_CONVOLUTION_H
+#include "arm_compute/core/utils/misc/Requires.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "tests/validation/Helpers.h"
#include "tests/validation/reference/UtilsQuantizedAsymm.h"
@@ -94,10 +95,8 @@ inline void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<TW> &wei
}
// 3D convolution for QASYMM8 type
-template < typename T, typename TW, typename TB, typename std::enable_if < std::is_same<T, uint8_t>::value &&(std::is_same<TW, uint8_t>::value
- || std::is_same<TW, int8_t>::value)
- &&std::is_same<TB, int32_t>::value,
- int >::type = 0 >
+template < typename T, typename TW, typename TB, REQUIRES_TA((std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value) &&(std::is_same<TW, uint8_t>::value
+ || std::is_same<TW, int8_t>::value)) >
inline void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<TW> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
int i_offset, int w_offset, int b_offset, int o_offset,
int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int dilation_x = 1, int dilation_y = 1, int filter_id = 0)
@@ -172,7 +171,8 @@ inline void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<TW> &wei
acc += (*b_ptr);
// Quantize down
- acc = validation::quantize_down_scale_by_fixedpoint(acc, output_multiplier, output_shift, output_offset, 0, 255);
+ acc = validation::quantize_down_scale_by_fixedpoint(acc, output_multiplier, output_shift, output_offset,
+ std::numeric_limits<T>::lowest(), std::numeric_limits<T>::max());
// Store the result
*out_ptr = acc;
diff --git a/tests/validation/reference/ConvolutionLayer.cpp b/tests/validation/reference/ConvolutionLayer.cpp
index 4d2c1acb6f..c9ad8d38b9 100644
--- a/tests/validation/reference/ConvolutionLayer.cpp
+++ b/tests/validation/reference/ConvolutionLayer.cpp
@@ -41,10 +41,6 @@ namespace validation
{
namespace reference
{
-namespace
-{
-} // namespace
-
template <typename T, typename TW, typename TB>
SimpleTensor<T> convolution_layer_nchw(const SimpleTensor<T> &src, const SimpleTensor<TW> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &dst, const PadStrideInfo &info,
const Size2D &dilation, unsigned int num_groups)
@@ -141,6 +137,8 @@ template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &sr
const PadStrideInfo &info, const Size2D &dilation, unsigned int num_groups, QuantizationInfo out_quant_info);
template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<int8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
const PadStrideInfo &info, const Size2D &dilation, unsigned int num_groups, QuantizationInfo out_quant_info);
+template SimpleTensor<int8_t> convolution_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
+ const PadStrideInfo &info, const Size2D &dilation, unsigned int num_groups, QuantizationInfo out_quant_info);
} // namespace reference
} // namespace validation
} // namespace test