From af6204c331eed7894ec4c5fd4e98ec22b6dac676 Mon Sep 17 00:00:00 2001 From: Anton Lokhmotov Date: Wed, 8 Nov 2017 09:34:19 +0000 Subject: COMPMID-661: Add avgpool-uint8 support. Optimize avgpool-fp32 for Bifrost. (#13) Change-Id: I32ba6afbac6694ffa053dd16f03a1b3d14627a19 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94857 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- tests/validation/fixtures/PoolingLayerFixture.h | 68 ++++++++++++++++++------- 1 file changed, 51 insertions(+), 17 deletions(-) (limited to 'tests/validation/fixtures/PoolingLayerFixture.h') diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h index 09b9e0ef1a..d6190e2977 100644 --- a/tests/validation/fixtures/PoolingLayerFixture.h +++ b/tests/validation/fixtures/PoolingLayerFixture.h @@ -43,28 +43,34 @@ namespace test namespace validation { template -class PoolingLayerValidationFixedPointFixture : public framework::Fixture +class PoolingLayerValidationGenericFixture : public framework::Fixture { public: template - void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits) + void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, + DataType data_type, int fractional_bits, QuantizationInfo quantization_info) { - _fractional_bits = fractional_bits; + _fractional_bits = fractional_bits; + _quantization_info = quantization_info; PoolingLayerInfo info(pool_type, pool_size, pad_stride_info, exclude_padding); - _target = compute_target(shape, info, data_type, fractional_bits); - _reference = compute_reference(shape, info, data_type, fractional_bits); + _target = compute_target(shape, info, data_type, fractional_bits, quantization_info); + _reference = compute_reference(shape, info, data_type, fractional_bits, quantization_info); } protected: template void fill(U &&tensor) { - if(_fractional_bits == 0) + if(!is_data_type_quantized(tensor.data_type())) { std::uniform_real_distribution<> distribution(-1.f, 1.f); library->fill(tensor, distribution, 0); } + else if(is_data_type_quantized_asymmetric(tensor.data_type())) + { + library->fill_tensor_uniform(tensor, 0); + } else { const int one_fixed = 1 << _fractional_bits; @@ -73,10 +79,11 @@ protected: } } - TensorType compute_target(const TensorShape &shape, PoolingLayerInfo info, DataType data_type, int fixed_point_position = 0) + TensorType compute_target(const TensorShape &shape, PoolingLayerInfo info, + DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) { // Create tensors - TensorType src = create_tensor(shape, data_type, 1, fixed_point_position); + TensorType src = create_tensor(shape, data_type, 1, fixed_point_position, quantization_info); TensorType dst; // Create and configure function @@ -102,10 +109,11 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &shape, PoolingLayerInfo info, DataType data_type, int fixed_point_position = 0) + SimpleTensor compute_reference(const TensorShape &shape, PoolingLayerInfo info, + DataType data_type, int fixed_point_position, QuantizationInfo quantization_info) { // Create reference - SimpleTensor src{ shape, data_type, 1, fixed_point_position }; + SimpleTensor src{ shape, data_type, 1, fixed_point_position, quantization_info }; // Fill reference fill(src); @@ -113,30 +121,56 @@ protected: return reference::pooling_layer(src, info); } - TensorType _target{}; - SimpleTensor _reference{}; - int _fractional_bits{}; + TensorType _target{}; + SimpleTensor _reference{}; + int _fractional_bits{}; + QuantizationInfo _quantization_info{}; }; template -class PoolingLayerValidationFixture : public PoolingLayerValidationFixedPointFixture +class PoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture { public: template void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type) { - PoolingLayerValidationFixedPointFixture::setup(shape, pool_type, pool_size, pad_stride_info, exclude_padding, data_type, 0); + PoolingLayerValidationGenericFixture::setup(shape, pool_type, pool_size, pad_stride_info, exclude_padding, + data_type, 0, QuantizationInfo()); + } +}; + +template +class PoolingLayerValidationFixedPointFixture : public PoolingLayerValidationGenericFixture +{ +public: + template + void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, int fractional_bits) + { + PoolingLayerValidationGenericFixture::setup(shape, pool_type, pool_size, pad_stride_info, exclude_padding, + data_type, fractional_bits, QuantizationInfo()); + } +}; + +template +class PoolingLayerValidationQuantizedFixture : public PoolingLayerValidationGenericFixture +{ +public: + template + void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, QuantizationInfo quantization_info) + { + PoolingLayerValidationGenericFixture::setup(shape, pool_type, pool_size, pad_stride_info, exclude_padding, + data_type, 0, quantization_info); } }; template -class GlobalPoolingLayerValidationFixture : public PoolingLayerValidationFixedPointFixture +class GlobalPoolingLayerValidationFixture : public PoolingLayerValidationFixture { public: template void setup(TensorShape shape, PoolingType pool_type, DataType data_type) { - PoolingLayerValidationFixedPointFixture::setup(shape, pool_type, shape.x(), PadStrideInfo(1, 1, 0, 0), true, data_type, 0); + PoolingLayerValidationFixture::setup(shape, pool_type, shape.x(), PadStrideInfo(1, 1, 0, 0), true, data_type); } }; } // namespace validation -- cgit v1.2.1