aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichel Iwaniec <michel.iwaniec@arm.com>2017-12-07 17:26:40 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commit66cc12f27dcc2171e1c6f5b357e95e54689a48f0 (patch)
treeaddb2601f873ba230f3b7058e6faa325b862d5bf
parentafa5d817b1d083837cd7ea30d32f845d82620c12 (diff)
downloadComputeLibrary-66cc12f27dcc2171e1c6f5b357e95e54689a48f0.tar.gz
IVGCVSW-839: Add QASYMM8 validation tests for CL ActivationLayer
Change-Id: Ia564ccc6c5901aad2037fa84f69ef913b4118b24 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112421 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--tests/validation/CL/ActivationLayer.cpp30
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h57
-rw-r--r--tests/validation/reference/ActivationLayer.cpp9
3 files changed, 81 insertions, 15 deletions
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index d3f55d9959..5ceaecaaa9 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -237,7 +237,7 @@ TEST_SUITE_END()
template <typename T>
using CLActivationLayerFixedPointFixture = ActivationValidationFixedPointFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
-TEST_SUITE(Quantized)
+TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [3,5] because [1,2] and [6,7] ranges cause
// overflowing issues in most of the transcendentals functions.
@@ -280,6 +280,34 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerFixedPointFixture<int16_t>, fr
TEST_SUITE_END()
TEST_SUITE_END()
+template <typename T>
+using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
+
+/** Input data sets. */
+const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU })),
+ framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index 75e625f715..e212c7bd9b 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -43,20 +43,21 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ActivationValidationFixedPointFixture : public framework::Fixture
+class ActivationValidationGenericFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits)
+ void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
{
- _fractional_bits = fractional_bits;
- _data_type = data_type;
- _function = function;
+ _fractional_bits = fractional_bits;
+ _quantization_info = quantization_info;
+ _data_type = data_type;
+ _function = function;
ActivationLayerInfo info(function, alpha_beta, alpha_beta);
- _target = compute_target(shape, in_place, info, data_type, fractional_bits);
- _reference = compute_reference(shape, info, data_type, fractional_bits);
+ _target = compute_target(shape, in_place, info, data_type, fractional_bits, quantization_info);
+ _reference = compute_reference(shape, info, data_type, fractional_bits, quantization_info);
}
protected:
@@ -71,6 +72,10 @@ protected:
std::uniform_real_distribution<> distribution(min_bound, max_bound);
library->fill(tensor, distribution, 0);
}
+ else if(is_data_type_quantized_asymmetric(tensor.data_type()))
+ {
+ library->fill_tensor_uniform(tensor, 0);
+ }
else
{
int min_bound = 0;
@@ -81,11 +86,11 @@ protected:
}
}
- TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, int fixed_point_position = 0)
+ TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
+ TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info);
+ TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position, quantization_info);
// Create and configure function
FunctionType act_layer;
@@ -123,10 +128,10 @@ protected:
}
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, int fixed_point_position = 0)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, int fixed_point_position, QuantizationInfo quantization_info)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position };
+ SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position, quantization_info };
// Fill reference
fill(src);
@@ -137,20 +142,44 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
int _fractional_bits{};
+ QuantizationInfo _quantization_info{};
DataType _data_type{};
ActivationLayerInfo::ActivationFunction _function{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ActivationValidationFixture : public ActivationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+class ActivationValidationFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type)
{
- ActivationValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0);
+ ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, QuantizationInfo());
}
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ActivationValidationFixedPointFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, int fractional_bits)
+ {
+ ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, fractional_bits, QuantizationInfo());
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ActivationValidationQuantizedFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
+ {
+ ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, 0, quantization_info);
+ }
+};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index 2243e6ff59..df7f6534bc 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -154,6 +154,15 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
return dst;
}
+template <>
+SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info)
+{
+ SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
+ SimpleTensor<float> dst_tmp = activation_layer<float>(src_tmp, info);
+ SimpleTensor<uint8_t> dst = convert_to_asymmetric(dst_tmp, src.quantization_info());
+ return dst;
+}
+
template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info);
template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info);
template SimpleTensor<qint8_t> activation_layer(const SimpleTensor<qint8_t> &src, ActivationLayerInfo info);