diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2020-03-06 13:56:54 +0000 |
---|---|---|
committer | Michele Di Giorgio <michele.digiorgio@arm.com> | 2020-03-06 15:51:08 +0000 |
commit | 398b8e4a0cf02b43f4469079e95b811cc1255e29 (patch) | |
tree | 8c5c4d84a6ec15b329809ce3561ada1d36b693dd | |
parent | b54ba2848515bf0aee0619c760518481f58c7525 (diff) | |
download | ComputeLibrary-398b8e4a0cf02b43f4469079e95b811cc1255e29.tar.gz |
COMPMID-3069: Fix min/max output stage bounds in CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel
Change-Id: I0985f1649c4936b7e16a77e9cd3ea48c4c77cbc9
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2849
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r-- | src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp | 16 | ||||
-rw-r--r-- | tests/validation/CL/FullyConnectedLayer.cpp | 50 | ||||
-rw-r--r-- | tests/validation/NEON/FullyConnectedLayer.cpp | 50 |
3 files changed, 3 insertions, 113 deletions
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp index c4ed691f2e..71b4d76520 100644 --- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp +++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp @@ -160,21 +160,11 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1, } } - PixelValue min_val{}; - PixelValue max_val{}; if(output->total_size() != 0) { ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != output->data_type()); - std::tie(min_val, max_val) = get_min_max(output->data_type()); - ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > max_val.get<int32_t>()); - ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < min_val.get<int32_t>() || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound); - } - else - { - std::tie(min_val, max_val) = get_min_max(output_stage.output_data_type); - ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_max_bound > max_val.get<int32_t>()); - ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound < min_val.get<int32_t>() || output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound); } + ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound); if(output_multipliers != nullptr && output_shifts != nullptr) { @@ -425,8 +415,8 @@ void CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel::configure(const ICLTensor *i PixelValue min_val{}; PixelValue max_val{}; std::tie(min_val, max_val) = get_min_max(output->info()->data_type()); - build_opts.add_option_if((min != min_val.get<int32_t>()) && (min != max), "-DMIN_BOUND=" + support::cpp11::to_string(min)); - build_opts.add_option_if((max != max_val.get<int32_t>()) && (min != max), "-DMAX_BOUND=" + support::cpp11::to_string(max)); + build_opts.add_option_if(min != min_val.get<int32_t>(), "-DMIN_BOUND=" + support::cpp11::to_string(min)); + build_opts.add_option_if(max != max_val.get<int32_t>(), "-DMAX_BOUND=" + support::cpp11::to_string(max)); } // Create kernel diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp index 357d77d03a..50094f1916 100644 --- a/tests/validation/CL/FullyConnectedLayer.cpp +++ b/tests/validation/CL/FullyConnectedLayer.cpp @@ -89,56 +89,6 @@ const auto ActivationFunctionsQuantizedDataset = framework::dataset::make("Activ TEST_SUITE(CL) TEST_SUITE(FullyConnectedLayer) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - CNNDataTypes), - src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type) -{ - const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo(); - - TensorShape ws(weights_shape); - - // Transpose weights if not done in the function - if(!reshape_weights || !transpose_weights) - { - const size_t shape_x = ws.x(); - ws.set(0, ws.y()); - ws.set(1, shape_x); - } - - // Create tensors - CLTensor src = create_tensor<CLTensor>(src_shape, data_type, 1, quantization_info); - CLTensor weights = create_tensor<CLTensor>(ws, data_type, 1, quantization_info); - CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, quantization_info); - CLTensor dst = create_tensor<CLTensor>(dst_shape, data_type, 1, quantization_info); - - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - const QuantizationInfo src_quantization_info = src.info()->quantization_info(); - const QuantizationInfo weights_quantization_info = weights.info()->quantization_info(); - - // Create Fully Connected layer info - FullyConnectedLayerInfo fc_info; - fc_info.transpose_weights = transpose_weights; - fc_info.are_weights_reshaped = !reshape_weights; - - // Create and configure function. - CLFullyConnectedLayer fc; - fc.configure(&src, &weights, &bias, &dst, fc_info); - - // Validate valid region - const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape); - validate(dst.info()->valid_region(), dst_valid_region); - - // Validate QuantizationInfo - ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS); -} - // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp index f66b0ceb8a..cd2986a1e4 100644 --- a/tests/validation/NEON/FullyConnectedLayer.cpp +++ b/tests/validation/NEON/FullyConnectedLayer.cpp @@ -78,56 +78,6 @@ const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo TEST_SUITE(NEON) TEST_SUITE(FullyConnectedLayer) -DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallFullyConnectedLayerDataset(), - FullyConnectedParameters), - CNNDataTypes), - src_shape, weights_shape, bias_shape, dst_shape, transpose_weights, reshape_weights, data_type) -{ - const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; - const QuantizationInfo quantization_info = is_data_type_quantized_asymmetric(data_type) ? QuantizationInfo(2.f / 255.f, 127) : QuantizationInfo(); - - TensorShape ws(weights_shape); - - // Transpose weights if not done in the function - if(!reshape_weights || !transpose_weights) - { - const size_t shape_x = ws.x(); - ws.set(0, ws.y()); - ws.set(1, shape_x); - } - - // Create tensors - Tensor src = create_tensor<Tensor>(src_shape, data_type, 1, quantization_info); - Tensor weights = create_tensor<Tensor>(ws, data_type, 1, quantization_info); - Tensor bias = create_tensor<Tensor>(bias_shape, bias_data_type, 1, quantization_info); - Tensor dst = create_tensor<Tensor>(dst_shape, data_type, 1, quantization_info); - - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Create Fully Connected layer info - FullyConnectedLayerInfo fc_info; - fc_info.transpose_weights = transpose_weights; - fc_info.are_weights_reshaped = !reshape_weights; - - const QuantizationInfo src_quantization_info = src.info()->quantization_info(); - const QuantizationInfo weights_quantization_info = weights.info()->quantization_info(); - - // Create and configure function. - NEFullyConnectedLayer fc; - fc.configure(&src, &weights, &bias, &dst, fc_info); - - // Validate valid region - const ValidRegion dst_valid_region = shape_to_valid_region(dst_shape); - validate(dst.info()->valid_region(), dst_valid_region); - - // Validate QuantizationInfo - ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS); -} - // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( |