From 540d008180a39a84770e51b4bc891cd2ff85980e Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 17 Nov 2017 10:55:00 +0000 Subject: COMPMID-556: Fixes bias in CLDirectConvolutionLayer to be int32. Biases were incorrectly passed as uchar8 in asymmetric quantized calculations while they should be in int32. Change-Id: I461f5e4ef6eb44374c1094378a1bfe9ade5e247d Reviewed-on: http://mpd-gerrit.cambridge.arm.com/96244 Tested-by: Kaizen Reviewed-by: Gian Marco Iodice Reviewed-by: Anthony Barbier --- .../fixtures/DirectConvolutionLayerFixture.h | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'tests/validation/fixtures') diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h index e302657158..279a4897eb 100644 --- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h @@ -43,6 +43,9 @@ namespace validation template class DirectConvolutionValidationGenericFixture : public framework::Fixture { +public: + using TBias = typename std::conditional::type, uint8_t>::value, int32_t, T>::type; + public: template void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, @@ -55,10 +58,11 @@ public: const TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR); - const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info); + const TensorShape output_shape = get_output_shape(input_shape, weights_shape, info); + const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits, quantization_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits, quantization_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, fractional_bits, quantization_info); } protected: @@ -86,12 +90,12 @@ protected: } TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) + DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info) { // Create tensors TensorType src = create_tensor(input_shape, data_type, 1, fixed_point_position, quantization_info); TensorType weights = create_tensor(weights_shape, data_type, 1, fixed_point_position, quantization_info); - TensorType bias = create_tensor(bias_shape, data_type, 1, fixed_point_position, quantization_info); + TensorType bias = create_tensor(bias_shape, bias_data_type, 1, fixed_point_position, quantization_info); TensorType dst = create_tensor(output_shape, data_type, 1, fixed_point_position, quantization_info); // Create and configure function @@ -126,12 +130,12 @@ protected: } SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) + DataType data_type, DataType bias_data_type, int fixed_point_position, QuantizationInfo quantization_info) { // Create reference - SimpleTensor src{ input_shape, data_type, 1, fixed_point_position, quantization_info }; - SimpleTensor weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info }; - SimpleTensor bias{ bias_shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor src{ input_shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor weights{ weights_shape, data_type, 1, fixed_point_position, quantization_info }; + SimpleTensor bias{ bias_shape, bias_data_type, 1, fixed_point_position, quantization_info }; // Fill reference fill(src, 0); -- cgit v1.2.1