From ed5a492ba791d8c8b3334749d4ae946b8f11d13d Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 13 Sep 2018 16:22:01 +0100 Subject: COMPMID-1586: Add support for NHWC CLDeconvolutionLayer COMPMID-1651: Fix QASYMM8 CLDeconvolutionLayer This patch also extends the range of values used for testing Convolution and Deconvolution to cover quantized [-1.0f, 1.0f]. Change-Id: I8b280669db67bb3ec25bf5d411c8f5954f5b0dab Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/149869 Reviewed-by: Michalis Spyrou Tested-by: bsgcomp --- .../validation/fixtures/ConvolutionLayerFixture.h | 3 +- .../fixtures/DeconvolutionLayerFixture.h | 73 ++++++++++++++-------- .../fixtures/NormalizePlanarYUVLayerFixture.h | 9 ++- tests/validation/fixtures/ReduceMeanFixture.h | 7 +-- 4 files changed, 56 insertions(+), 36 deletions(-) (limited to 'tests/validation/fixtures') diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h index 3b420eac09..795b9de6cd 100644 --- a/tests/validation/fixtures/ConvolutionLayerFixture.h +++ b/tests/validation/fixtures/ConvolutionLayerFixture.h @@ -77,7 +77,8 @@ protected: { case DataType::QASYMM8: { - std::uniform_int_distribution distribution(0, 3); + std::pair bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution distribution(bounds.first, bounds.second); library->fill(tensor, distribution, i); break; } diff --git a/tests/validation/fixtures/DeconvolutionLayerFixture.h b/tests/validation/fixtures/DeconvolutionLayerFixture.h index d3a7be74b0..85c7ed5604 100644 --- a/tests/validation/fixtures/DeconvolutionLayerFixture.h +++ b/tests/validation/fixtures/DeconvolutionLayerFixture.h @@ -23,6 +23,7 @@ */ #include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "tests/AssetsLibrary.h" #include "tests/Globals.h" #include "tests/IAccessor.h" @@ -39,6 +40,8 @@ namespace test { namespace validation { +using namespace arm_compute::misc::shape_calculator; + template class DeconvolutionLayerFixtureBase : public framework::Fixture { @@ -48,12 +51,15 @@ public: public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, - const std::pair &inner_border, DataType data_type, QuantizationInfo quantization_info) + const std::pair &inner_border, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) { - _data_type = data_type; + _data_type = data_type; + _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; + _data_layout = data_layout; + _quantization_info = quantization_info; - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info); + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, inner_border); + _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, inner_border); } protected: @@ -64,7 +70,8 @@ protected: { case DataType::QASYMM8: { - std::uniform_int_distribution distribution(0, 3); + std::pair bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution distribution(bounds.first, bounds.second); library->fill(tensor, distribution, i); break; } @@ -86,14 +93,21 @@ protected: } } - TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, - const PadStrideInfo &info, const std::pair &inner_border, DataType data_type, QuantizationInfo quantization_info) + TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape bias_shape, TensorShape output_shape, + const PadStrideInfo &info, const std::pair &inner_border) { + if(_data_layout == DataLayout::NHWC) + { + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(weights_shape, PermutationVector(2U, 0U, 1U)); + permute(output_shape, PermutationVector(2U, 0U, 1U)); + } + // Create tensors - TensorType src = create_tensor(input_shape, data_type, 1, quantization_info); - TensorType weights = create_tensor(weights_shape, data_type, 1, quantization_info); - TensorType bias = create_tensor(bias_shape, is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type, 1, quantization_info); - TensorType dst = create_tensor(output_shape, data_type, 1, quantization_info); + TensorType src = create_tensor(input_shape, _data_type, 1, _quantization_info, _data_layout); + TensorType weights = create_tensor(weights_shape, _data_type, 1, _quantization_info, _data_layout); + TensorType bias = create_tensor(bias_shape, _bias_data_type, 1, _quantization_info, _data_layout); + TensorType dst = create_tensor(output_shape, _data_type, 1, _quantization_info, _data_layout); // Create and configure function FunctionType conv; @@ -127,12 +141,12 @@ protected: } SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, - const PadStrideInfo &info, const std::pair inner_border, DataType data_type, QuantizationInfo quantization_info) + const PadStrideInfo &info, const std::pair inner_border) { // Create reference - SimpleTensor src{ input_shape, data_type, 1, quantization_info }; - SimpleTensor weights{ weights_shape, data_type, 1, quantization_info }; - SimpleTensor bias{ bias_shape, data_type, 1, quantization_info }; + SimpleTensor src{ input_shape, _data_type, 1, _quantization_info }; + SimpleTensor weights{ weights_shape, _data_type, 1, _quantization_info }; + SimpleTensor bias{ bias_shape, _bias_data_type, 1, _quantization_info }; // Fill reference fill(src, 0); @@ -142,9 +156,12 @@ protected: return reference::deconvolution_layer(src, weights, bias, output_shape, info, inner_border); } - TensorType _target{}; - SimpleTensor _reference{}; - DataType _data_type{}; + TensorType _target{}; + SimpleTensor _reference{}; + DataType _data_type{}; + DataType _bias_data_type{}; + DataLayout _data_layout{}; + QuantizationInfo _quantization_info{}; }; template @@ -153,16 +170,18 @@ class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, - unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type) + unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type, DataLayout data_layout) { ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); const std::pair inner_border(inner_border_right, inner_border_top); - auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, sx, sy); - TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape); - DeconvolutionLayerFixtureBase::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, QuantizationInfo()); + auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, sx, sy); + TensorInfo input_info(input_shape, 1, data_type); + TensorInfo weights_info(weights_shape, 1, data_type); + TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); + DeconvolutionLayerFixtureBase::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, data_layout, QuantizationInfo()); } }; @@ -172,16 +191,18 @@ class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixture public: template void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, - unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info) + unsigned int inner_border_right, unsigned int inner_border_top, unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo quantization_info) { ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); const std::pair inner_border(inner_border_right, inner_border_top); - auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, sx, sy); - TensorShape output_shape = deconvolution_output_shape(out_dim, input_shape, weights_shape); - DeconvolutionLayerFixtureBase::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, quantization_info); + auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, padx, pady, sx, sy); + TensorInfo input_info(input_shape, 1, data_type, quantization_info); + TensorInfo weights_info(weights_shape, 1, data_type, quantization_info); + TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); + DeconvolutionLayerFixtureBase::setup(input_shape, weights_shape, bias_shape, output_shape, info, inner_border, data_type, data_layout, quantization_info); } }; diff --git a/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h b/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h index 3bb935e49f..93e4e64830 100644 --- a/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h +++ b/tests/validation/fixtures/NormalizePlanarYUVLayerFixture.h @@ -68,11 +68,10 @@ protected: } else if(is_data_type_quantized_asymmetric(_data_type)) { - const QuantizationInfo quant_info = src_tensor.quantization_info(); - const int min_bound = quant_info.quantize(-1.f, RoundingPolicy::TO_NEAREST_UP); - const int max_bound = quant_info.quantize(1.f, RoundingPolicy::TO_NEAREST_UP); - std::uniform_int_distribution<> distribution(min_bound, max_bound); - std::uniform_int_distribution<> distribution_std(quant_info.quantize(0.1f, RoundingPolicy::TO_NEAREST_UP), max_bound); + const QuantizationInfo quant_info = src_tensor.quantization_info(); + std::pair bounds = get_quantized_bounds(quant_info, -1.f, 1.0f); + std::uniform_int_distribution<> distribution(bounds.first, bounds.second); + std::uniform_int_distribution<> distribution_std(quant_info.quantize(0.1f, RoundingPolicy::TO_NEAREST_UP), bounds.second); library->fill(src_tensor, distribution, 0); library->fill(mean_tensor, distribution, 1); library->fill(std_tensor, distribution_std, 2); diff --git a/tests/validation/fixtures/ReduceMeanFixture.h b/tests/validation/fixtures/ReduceMeanFixture.h index 6debd4a038..8692213641 100644 --- a/tests/validation/fixtures/ReduceMeanFixture.h +++ b/tests/validation/fixtures/ReduceMeanFixture.h @@ -32,6 +32,7 @@ #include "tests/IAccessor.h" #include "tests/framework/Asserts.h" #include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" #include "tests/validation/reference/ReductionOperation.h" #include "tests/validation/reference/ReshapeLayer.h" @@ -63,10 +64,8 @@ protected: } else { - const QuantizationInfo quant_info = tensor.quantization_info(); - const int min_bound = quant_info.quantize(-1.f, RoundingPolicy::TO_NEAREST_UP); - const int max_bound = quant_info.quantize(1.f, RoundingPolicy::TO_NEAREST_UP); - std::uniform_int_distribution<> distribution(min_bound, max_bound); + std::pair bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); + std::uniform_int_distribution<> distribution(bounds.first, bounds.second); library->fill(tensor, distribution, 0); } -- cgit v1.2.1