diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2019-06-04 17:31:46 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2019-06-05 14:14:48 +0000 |
commit | 4b3fba1850fdf84ba3f9a0c98acf3de672330b34 (patch) | |
tree | 1b65639ec7387c474903583ff0927918c8c7d837 /tests/validation | |
parent | c625acd2a60a4fe34633c5cecef85c230933f772 (diff) | |
download | ComputeLibrary-4b3fba1850fdf84ba3f9a0c98acf3de672330b34.tar.gz |
COMPMID-2372: Add support for QASYMM8 for Tanh
-Perform calculations in the floating point domain
-Extends checks for Logistic as scale should be 1/256 and offset 0
Change-Id: I90ef4a042f053976936f5d28f8e09b54eec196a2
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1287
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r-- | tests/validation/CL/ActivationLayer.cpp | 2 | ||||
-rw-r--r-- | tests/validation/NEON/ActivationLayer.cpp | 3 | ||||
-rw-r--r-- | tests/validation/fixtures/ActivationLayerFixture.h | 53 | ||||
-rw-r--r-- | tests/validation/reference/ActivationLayer.cpp | 16 | ||||
-rw-r--r-- | tests/validation/reference/ActivationLayer.h | 4 |
5 files changed, 51 insertions, 27 deletions
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index e95db7ca60..a286458483 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -137,7 +137,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Window shrink TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Unsupported activation + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid quantization info TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 3a91c9c3be..1d8fffa903 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -223,7 +223,8 @@ using NEActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<T const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, ActivationLayerInfo::ActivationFunction::RELU, ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, - ActivationLayerInfo::ActivationFunction::LOGISTIC + ActivationLayerInfo::ActivationFunction::LOGISTIC, + ActivationLayerInfo::ActivationFunction::TANH }); const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), QuantizedActivationFunctionsDataset), diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h index d29d67c8e6..464382a1ec 100644 --- a/tests/validation/fixtures/ActivationLayerFixture.h +++ b/tests/validation/fixtures/ActivationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -49,14 +49,16 @@ public: template <typename...> void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info) { - _quantization_info = quantization_info; - _data_type = data_type; - _function = function; - ActivationLayerInfo info(function, alpha_beta, alpha_beta); - _target = compute_target(shape, in_place, info, data_type, quantization_info); - _reference = compute_reference(shape, info, data_type, quantization_info); + _in_place = in_place; + _output_quantization_info = calculate_output_quantization_info(info, quantization_info); + _input_quantization_info = in_place ? _output_quantization_info : quantization_info; + _data_type = data_type; + _function = function; + + _target = compute_target(shape, info); + _reference = compute_reference(shape, info); } protected: @@ -85,16 +87,16 @@ protected: } } - TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info) + TensorType compute_target(const TensorShape &shape, ActivationLayerInfo info) { // Create tensors - TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info); - TensorType dst = create_tensor<TensorType>(shape, data_type, 1, quantization_info); + TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info); + TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info); // Create and configure function FunctionType act_layer; - TensorType *dst_ptr = in_place ? &src : &dst; + TensorType *dst_ptr = _in_place ? &src : &dst; act_layer.configure(&src, dst_ptr, info); @@ -105,7 +107,7 @@ protected: src.allocator()->allocate(); ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - if(!in_place) + if(!_in_place) { dst.allocator()->allocate(); ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -117,7 +119,7 @@ protected: // Compute function act_layer.run(); - if(in_place) + if(_in_place) { return src; } @@ -127,20 +129,37 @@ protected: } } - SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info) + SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info) { // Create reference - SimpleTensor<T> src{ shape, data_type, 1, quantization_info }; + SimpleTensor<T> src{ shape, _data_type, 1, _input_quantization_info }; // Fill reference fill(src); - return reference::activation_layer<T>(src, info); + return reference::activation_layer<T>(src, info, _output_quantization_info); + } + +private: + QuantizationInfo calculate_output_quantization_info(const ActivationLayerInfo &act_info, const QuantizationInfo &default_qinfo) + { + switch(act_info.activation()) + { + case ActivationLayerInfo::ActivationFunction::TANH: + return QuantizationInfo(1.f / 128.f, 128); + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + return QuantizationInfo(1.f / 256.f, 0); + default: + return default_qinfo; + } } +protected: TensorType _target{}; SimpleTensor<T> _reference{}; - QuantizationInfo _quantization_info{}; + bool _in_place{}; + QuantizationInfo _input_quantization_info{}; + QuantizationInfo _output_quantization_info{}; DataType _data_type{}; ActivationLayerInfo::ActivationFunction _function{}; }; diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp index 9887e42386..f5e98aa7e8 100644 --- a/tests/validation/reference/ActivationLayer.cpp +++ b/tests/validation/reference/ActivationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -35,8 +35,10 @@ namespace validation namespace reference { template <typename T> -SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info) +SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info) { + ARM_COMPUTE_UNUSED(oq_info); + // Create reference SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 }; @@ -53,16 +55,18 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo } template <> -SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info) +SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info) { + const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info; + SimpleTensor<float> src_tmp = convert_from_asymmetric(src); SimpleTensor<float> dst_tmp = activation_layer<float>(src_tmp, info); - SimpleTensor<uint8_t> dst = convert_to_asymmetric(dst_tmp, src.quantization_info()); + SimpleTensor<uint8_t> dst = convert_to_asymmetric(dst_tmp, dst_qinfo); return dst; } -template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info); -template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info); +template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); +template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h index cc43bc10a8..5beca7c76d 100644 --- a/tests/validation/reference/ActivationLayer.h +++ b/tests/validation/reference/ActivationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -87,7 +87,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a } template <typename T> -SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info); +SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info = QuantizationInfo()); } // namespace reference } // namespace validation } // namespace test |