aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/FullyConnectedLayerFixture.h
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-02 09:13:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit014333d73883c3872e458cedda5ccef586a7ccd4 (patch)
tree0f28bbc1ab769993af91b40e4584061f6ed6d3fa /tests/validation/fixtures/FullyConnectedLayerFixture.h
parentde01468bbfff3a7d8bcbba3bfdf5698fb2e3b267 (diff)
downloadComputeLibrary-014333d73883c3872e458cedda5ccef586a7ccd4.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed Fixed point position arguments from test sources Change-Id: I8343724723b71611fd501ed34de0866d3fb60e7e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/136382 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/fixtures/FullyConnectedLayerFixture.h')
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h30
1 files changed, 14 insertions, 16 deletions
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index f23fc207a8..895e43b735 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,14 +54,13 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
- DataType data_type, int fractional_bits, QuantizationInfo quantization_info)
+ DataType data_type, QuantizationInfo quantization_info)
{
ARM_COMPUTE_UNUSED(weights_shape);
ARM_COMPUTE_UNUSED(bias_shape);
_data_type = data_type;
_bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
- _fractional_bits = fractional_bits;
_quantization_info = quantization_info;
_target = compute_target(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights);
@@ -126,10 +125,10 @@ protected:
}
// Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _fractional_bits, _quantization_info);
- TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _fractional_bits, _quantization_info);
- TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info);
- TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _fractional_bits, _quantization_info);
+ TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _quantization_info);
+ TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, _data_type, 1, _quantization_info);
+ TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _quantization_info);
+ TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _quantization_info);
// Create and configure function.
FunctionType fc;
@@ -158,7 +157,7 @@ protected:
if(!reshape_weights || !transpose_weights)
{
TensorShape tmp_shape(weights_shape);
- RawTensor tmp(tmp_shape, _data_type, 1, _fractional_bits);
+ RawTensor tmp(tmp_shape, _data_type, 1);
// Fill with original shape
fill(tmp, 1);
@@ -199,9 +198,9 @@ protected:
bool reshape_weights)
{
// Create reference
- SimpleTensor<T> src{ input_shape, _data_type, 1, _fractional_bits, _quantization_info };
- SimpleTensor<T> weights{ weights_shape, _data_type, 1, _fractional_bits, _quantization_info };
- SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _fractional_bits, _quantization_info };
+ SimpleTensor<T> src{ input_shape, _data_type, 1, _quantization_info };
+ SimpleTensor<T> weights{ weights_shape, _data_type, 1, _quantization_info };
+ SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _quantization_info };
// Fill reference
fill(src, 0);
@@ -215,7 +214,6 @@ protected:
SimpleTensor<T> _reference{};
DataType _data_type{};
DataType _bias_data_type{};
- int _fractional_bits{};
QuantizationInfo _quantization_info{};
};
@@ -228,7 +226,7 @@ public:
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- 0, QuantizationInfo());
+ QuantizationInfo());
}
};
@@ -237,11 +235,11 @@ class FullyConnectedLayerValidationFixedPointFixture : public FullyConnectedLaye
{
public:
template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type, int fractional_bits)
+ void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, DataType data_type)
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- fractional_bits, QuantizationInfo());
+ QuantizationInfo());
}
};
@@ -255,7 +253,7 @@ public:
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, run_interleave>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- 0, quantization_info);
+ quantization_info);
}
};
} // namespace validation