From 4b3fba1850fdf84ba3f9a0c98acf3de672330b34 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Tue, 4 Jun 2019 17:31:46 +0100 Subject: COMPMID-2372: Add support for QASYMM8 for Tanh -Perform calculations in the floating point domain -Extends checks for Logistic as scale should be 1/256 and offset 0 Change-Id: I90ef4a042f053976936f5d28f8e09b54eec196a2 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/1287 Tested-by: Arm Jenkins Reviewed-by: Michalis Spyrou Comments-Addressed: Arm Jenkins --- tests/validation/fixtures/ActivationLayerFixture.h | 53 +++++++++++++++------- 1 file changed, 36 insertions(+), 17 deletions(-) (limited to 'tests/validation/fixtures/ActivationLayerFixture.h') diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h index d29d67c8e6..464382a1ec 100644 --- a/tests/validation/fixtures/ActivationLayerFixture.h +++ b/tests/validation/fixtures/ActivationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -49,14 +49,16 @@ public: template void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info) { - _quantization_info = quantization_info; - _data_type = data_type; - _function = function; - ActivationLayerInfo info(function, alpha_beta, alpha_beta); - _target = compute_target(shape, in_place, info, data_type, quantization_info); - _reference = compute_reference(shape, info, data_type, quantization_info); + _in_place = in_place; + _output_quantization_info = calculate_output_quantization_info(info, quantization_info); + _input_quantization_info = in_place ? _output_quantization_info : quantization_info; + _data_type = data_type; + _function = function; + + _target = compute_target(shape, info); + _reference = compute_reference(shape, info); } protected: @@ -85,16 +87,16 @@ protected: } } - TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info) + TensorType compute_target(const TensorShape &shape, ActivationLayerInfo info) { // Create tensors - TensorType src = create_tensor(shape, data_type, 1, quantization_info); - TensorType dst = create_tensor(shape, data_type, 1, quantization_info); + TensorType src = create_tensor(shape, _data_type, 1, _input_quantization_info); + TensorType dst = create_tensor(shape, _data_type, 1, _output_quantization_info); // Create and configure function FunctionType act_layer; - TensorType *dst_ptr = in_place ? &src : &dst; + TensorType *dst_ptr = _in_place ? &src : &dst; act_layer.configure(&src, dst_ptr, info); @@ -105,7 +107,7 @@ protected: src.allocator()->allocate(); ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - if(!in_place) + if(!_in_place) { dst.allocator()->allocate(); ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -117,7 +119,7 @@ protected: // Compute function act_layer.run(); - if(in_place) + if(_in_place) { return src; } @@ -127,20 +129,37 @@ protected: } } - SimpleTensor compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info) + SimpleTensor compute_reference(const TensorShape &shape, ActivationLayerInfo info) { // Create reference - SimpleTensor src{ shape, data_type, 1, quantization_info }; + SimpleTensor src{ shape, _data_type, 1, _input_quantization_info }; // Fill reference fill(src); - return reference::activation_layer(src, info); + return reference::activation_layer(src, info, _output_quantization_info); + } + +private: + QuantizationInfo calculate_output_quantization_info(const ActivationLayerInfo &act_info, const QuantizationInfo &default_qinfo) + { + switch(act_info.activation()) + { + case ActivationLayerInfo::ActivationFunction::TANH: + return QuantizationInfo(1.f / 128.f, 128); + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + return QuantizationInfo(1.f / 256.f, 0); + default: + return default_qinfo; + } } +protected: TensorType _target{}; SimpleTensor _reference{}; - QuantizationInfo _quantization_info{}; + bool _in_place{}; + QuantizationInfo _input_quantization_info{}; + QuantizationInfo _output_quantization_info{}; DataType _data_type{}; ActivationLayerInfo::ActivationFunction _function{}; }; -- cgit v1.2.1