From dbdea0d1c025b18d4d82c278c87454427918f5b4 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 16 Oct 2019 19:21:40 +0100 Subject: COMPMID-2308: NEConvolutionLayer: support QUANT8_SYMM_PER_CHANNEL filters Change-Id: Ic1bf5f0d21ccd525f84213a360f7e199d7f50577 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/2177 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- .../validation/fixtures/ConvolutionLayerFixture.h | 77 +++++++++++++++++----- 1 file changed, 61 insertions(+), 16 deletions(-) (limited to 'tests/validation/fixtures/ConvolutionLayerFixture.h') diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h index 52fa8da60b..c5cddc28db 100644 --- a/tests/validation/fixtures/ConvolutionLayerFixture.h +++ b/tests/validation/fixtures/ConvolutionLayerFixture.h @@ -48,7 +48,7 @@ namespace test { namespace validation { -template +template class ConvolutionValidationGenericFixture : public framework::Fixture { public: @@ -57,13 +57,15 @@ public: public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, - DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info) + DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info) { - _data_type = data_type; - _is_quantized = is_data_type_quantized_asymmetric(data_type); - _bias_data_type = _is_quantized ? DataType::S32 : data_type; - _quantization_info = quantization_info; - _data_layout = data_layout; + _data_type = data_type; + _weights_data_type = weights_data_type; + _is_quantized = is_data_type_quantized_asymmetric(data_type); + _bias_data_type = _is_quantized ? DataType::S32 : data_type; + _quantization_info = quantization_info; + _weight_quantization_info = weight_quantization_info; + _data_layout = data_layout; _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, dilation, act_info); _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info); @@ -82,6 +84,26 @@ protected: library->fill(tensor, distribution, i); break; } + case DataType::QSYMM8_PER_CHANNEL: + { + int min_bound = 128; + int max_bound = -127; + for(size_t i = 0; i < _weight_quantization_info.scale().size(); i++) + { + std::pair bounds = get_symm_quantized_per_channel_bounds(tensor.quantization_info(), -1.0f, 1.0f, i); + if(bounds.first < min_bound) + { + min_bound = bounds.first; + } + if(bounds.second > max_bound) + { + max_bound = bounds.second; + } + } + std::uniform_int_distribution distribution(min_bound, max_bound); + library->fill(tensor, distribution, i); + break; + } case DataType::S32: { std::uniform_int_distribution distribution(-100, 100); @@ -122,7 +144,7 @@ protected: // Create tensors TensorType src = create_tensor(input_shape, _data_type, 1, _quantization_info, _data_layout); - TensorType weights = create_tensor(reshaped_weights_shape, _data_type, 1, _quantization_info, _data_layout); + TensorType weights = create_tensor(reshaped_weights_shape, _weights_data_type, 1, _weight_quantization_info, _data_layout); TensorType bias = create_tensor(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout); TensorType dst = create_tensor(output_shape, _data_type, 1, _quantization_info, _data_layout); @@ -166,7 +188,7 @@ protected: // Create reference SimpleTensor src{ input_shape, _data_type, 1, _quantization_info }; - SimpleTensor weights{ weights_shape, _data_type, 1, _quantization_info }; + SimpleTensor weights{ weights_shape, _weights_data_type, 1, _weight_quantization_info }; SimpleTensor bias{ bias_shape, _bias_data_type, 1, _quantization_info }; // Fill reference @@ -182,36 +204,59 @@ protected: TensorType _target{}; SimpleTensor _reference{}; DataType _data_type{}; + DataType _weights_data_type{}; DataType _bias_data_type{}; DataLayout _data_layout{}; QuantizationInfo _quantization_info{}; + QuantizationInfo _weight_quantization_info{}; bool _is_quantized = false; }; template -class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture +class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture { public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) { - ConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, - data_type, data_layout, - QuantizationInfo(), act_info); + ConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, + data_type, data_type, data_layout, + QuantizationInfo(), QuantizationInfo(), act_info); } }; template -class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture +class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture { public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info) { - ConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, - data_type, data_layout, quantization_info, act_info); + ConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, + data_type, data_type, data_layout, quantization_info, quantization_info, act_info); + } +}; + +template +class ConvolutionValidationQuantizedPerChannelFixture : public ConvolutionValidationGenericFixture +{ +public: + template + void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, DataType data_type, + DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataType weights_data_type) + { + std::vector weights_scales{}; + std::mt19937 gen(library->seed()); + std::uniform_real_distribution<> dis(0.01f, 1); + for(size_t i = 0; i < output_shape[2]; ++i) + { + weights_scales.push_back(dis(gen)); + } + ConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, + reshape_weights, data_type, weights_data_type, data_layout, + quantization_info, QuantizationInfo(weights_scales), act_info); } }; } // namespace validation -- cgit v1.2.1