aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/ActivationLayerFixture.h
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-04 17:31:46 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-05 14:14:48 +0000
commit4b3fba1850fdf84ba3f9a0c98acf3de672330b34 (patch)
tree1b65639ec7387c474903583ff0927918c8c7d837 /tests/validation/fixtures/ActivationLayerFixture.h
parentc625acd2a60a4fe34633c5cecef85c230933f772 (diff)
downloadComputeLibrary-4b3fba1850fdf84ba3f9a0c98acf3de672330b34.tar.gz
COMPMID-2372: Add support for QASYMM8 for Tanh
-Perform calculations in the floating point domain -Extends checks for Logistic as scale should be 1/256 and offset 0 Change-Id: I90ef4a042f053976936f5d28f8e09b54eec196a2 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1287 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/ActivationLayerFixture.h')
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h53
1 files changed, 36 insertions, 17 deletions
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index d29d67c8e6..464382a1ec 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,14 +49,16 @@ public:
template <typename...>
void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
{
- _quantization_info = quantization_info;
- _data_type = data_type;
- _function = function;
-
ActivationLayerInfo info(function, alpha_beta, alpha_beta);
- _target = compute_target(shape, in_place, info, data_type, quantization_info);
- _reference = compute_reference(shape, info, data_type, quantization_info);
+ _in_place = in_place;
+ _output_quantization_info = calculate_output_quantization_info(info, quantization_info);
+ _input_quantization_info = in_place ? _output_quantization_info : quantization_info;
+ _data_type = data_type;
+ _function = function;
+
+ _target = compute_target(shape, info);
+ _reference = compute_reference(shape, info);
}
protected:
@@ -85,16 +87,16 @@ protected:
}
}
- TensorType compute_target(const TensorShape &shape, bool in_place, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info)
+ TensorType compute_target(const TensorShape &shape, ActivationLayerInfo info)
{
// Create tensors
- TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
- TensorType dst = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
+ TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info);
+ TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info);
// Create and configure function
FunctionType act_layer;
- TensorType *dst_ptr = in_place ? &src : &dst;
+ TensorType *dst_ptr = _in_place ? &src : &dst;
act_layer.configure(&src, dst_ptr, info);
@@ -105,7 +107,7 @@ protected:
src.allocator()->allocate();
ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- if(!in_place)
+ if(!_in_place)
{
dst.allocator()->allocate();
ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -117,7 +119,7 @@ protected:
// Compute function
act_layer.run();
- if(in_place)
+ if(_in_place)
{
return src;
}
@@ -127,20 +129,37 @@ protected:
}
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info, DataType data_type, QuantizationInfo quantization_info)
+ SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info)
{
// Create reference
- SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
+ SimpleTensor<T> src{ shape, _data_type, 1, _input_quantization_info };
// Fill reference
fill(src);
- return reference::activation_layer<T>(src, info);
+ return reference::activation_layer<T>(src, info, _output_quantization_info);
+ }
+
+private:
+ QuantizationInfo calculate_output_quantization_info(const ActivationLayerInfo &act_info, const QuantizationInfo &default_qinfo)
+ {
+ switch(act_info.activation())
+ {
+ case ActivationLayerInfo::ActivationFunction::TANH:
+ return QuantizationInfo(1.f / 128.f, 128);
+ case ActivationLayerInfo::ActivationFunction::LOGISTIC:
+ return QuantizationInfo(1.f / 256.f, 0);
+ default:
+ return default_qinfo;
+ }
}
+protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- QuantizationInfo _quantization_info{};
+ bool _in_place{};
+ QuantizationInfo _input_quantization_info{};
+ QuantizationInfo _output_quantization_info{};
DataType _data_type{};
ActivationLayerInfo::ActivationFunction _function{};
};