From 4cb0bd488f70a07b222e1ed7008d888642dfec6f Mon Sep 17 00:00:00 2001 From: Pablo Marquez Tello Date: Thu, 27 Jul 2023 18:02:37 +0100 Subject: Improved testing for ArgMinMax * ArgMinMax output was fixed to S32, this patch makes the changes required to allow other output types like U64/S64 * Made changes to the ArgMinMax fixture and tests to allow specifying output data type. * Made changes to the reference reduction_operation to allow specifying the output type * Added tests case to output S64 for the CL backend. * Added missing test cases in the neon backend. * Partially resolves MLCE-1089 Change-Id: I6f1cbc7093669d12c2a3aff6974cf19d83b2ecda Signed-off-by: Pablo Marquez Tello Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10003 Reviewed-by: Viet-Hoa Do Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- tests/validation/reference/ReductionOperation.cpp | 45 +++++++++++++++-------- 1 file changed, 30 insertions(+), 15 deletions(-) (limited to 'tests/validation/reference/ReductionOperation.cpp') diff --git a/tests/validation/reference/ReductionOperation.cpp b/tests/validation/reference/ReductionOperation.cpp index e2890afb9f..c189bc2d47 100644 --- a/tests/validation/reference/ReductionOperation.cpp +++ b/tests/validation/reference/ReductionOperation.cpp @@ -181,12 +181,12 @@ OT reduce_operation_arg_min_max(const T *ptr, int reduce_elements, ReductionOper } // namespace template -SimpleTensor compute_reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, RoundingPolicy policy) +SimpleTensor compute_reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type, RoundingPolicy policy) { // Create reference - const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX); - DataType output_data_type = is_arg_min_max ? DataType::S32 : src.data_type(); - SimpleTensor dst{ dst_shape, output_data_type, 1, src.quantization_info() }; + const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX); + SimpleTensor dst{ dst_shape, output_type, 1, src.quantization_info() }; const unsigned int src_width = src.shape().x(); const unsigned int src_height = src.shape().y(); const unsigned int src_depth = src.shape().z(); @@ -275,74 +275,89 @@ SimpleTensor compute_reduction_operation(const SimpleTensor &src, const T } template -SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info_output, RoundingPolicy policy) +SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type, QuantizationInfo quantization_info_output, RoundingPolicy policy) { ARM_COMPUTE_UNUSED(quantization_info_output); - return compute_reduction_operation(src, dst_shape, axis, op, policy); + return compute_reduction_operation(src, dst_shape, axis, op, output_type, policy); } template <> -SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info_output, RoundingPolicy policy) +SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type, QuantizationInfo quantization_info_output, RoundingPolicy policy) { if(src.data_type() == DataType::QASYMM8) { // If the operation is MEAN_SUM, we can directly use the uint8 implementation without taking into account scale and offset if(op == ReductionOperation::MEAN_SUM && src.quantization_info() == quantization_info_output) { - return compute_reduction_operation(src, dst_shape, axis, op, policy); + return compute_reduction_operation(src, dst_shape, axis, op, output_type, policy); } else { SimpleTensor src_f = convert_from_asymmetric(src); - SimpleTensor dst_f = reference::reduction_operation(src_f, dst_shape, axis, op); + SimpleTensor dst_f = reference::reduction_operation(src_f, dst_shape, axis, op, output_type); return convert_to_asymmetric(dst_f, quantization_info_output); } } else { - return compute_reduction_operation(src, dst_shape, axis, op, policy); + return compute_reduction_operation(src, dst_shape, axis, op, output_type, policy); } } template <> -SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, QuantizationInfo quantization_info_output, RoundingPolicy policy) +SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, + ReductionOperation op, DataType output_type, QuantizationInfo quantization_info_output, RoundingPolicy policy) { if(src.data_type() == DataType::QASYMM8_SIGNED) { // If the operation is MEAN_SUM, we can directly use the int8 implementation without taking into account scale and offset if(op == ReductionOperation::MEAN_SUM && src.quantization_info() == quantization_info_output) { - return compute_reduction_operation(src, dst_shape, axis, op, policy); + return compute_reduction_operation(src, dst_shape, axis, op, output_type, policy); } else { SimpleTensor src_f = convert_from_asymmetric(src); - SimpleTensor dst_f = reference::reduction_operation(src_f, dst_shape, axis, op); + SimpleTensor dst_f = reference::reduction_operation(src_f, dst_shape, axis, op, output_type); return convert_to_asymmetric(dst_f, quantization_info_output); } } else { - return compute_reduction_operation(src, dst_shape, axis, op, policy); + return compute_reduction_operation(src, dst_shape, axis, op, output_type, policy); } } template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, - QuantizationInfo quantization_info_output = QuantizationInfo(), RoundingPolicy policy = RoundingPolicy::TO_ZERO); + DataType output_type = DataType::S32, QuantizationInfo quantization_info_output = QuantizationInfo(), + RoundingPolicy policy = RoundingPolicy::TO_ZERO); + template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type = DataType::S32, QuantizationInfo quantization_info_output = QuantizationInfo(), RoundingPolicy policy = RoundingPolicy::TO_ZERO); template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type = DataType::S32, QuantizationInfo quantization_info_output = QuantizationInfo(), RoundingPolicy policy = RoundingPolicy::TO_ZERO); + template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type = DataType::S32, QuantizationInfo quantization_info_output = QuantizationInfo(), RoundingPolicy policy = RoundingPolicy::TO_ZERO); template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type = DataType::S32, QuantizationInfo quantization_info_output = QuantizationInfo(), RoundingPolicy policy = RoundingPolicy::TO_ZERO); template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type = DataType::S32, QuantizationInfo quantization_info_output = QuantizationInfo(), RoundingPolicy policy = RoundingPolicy::TO_ZERO); template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type = DataType::S32, QuantizationInfo quantization_info_output = QuantizationInfo(), RoundingPolicy policy = RoundingPolicy::TO_ZERO); +template SimpleTensor reduction_operation(const SimpleTensor &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op, + DataType output_type = DataType::S32, QuantizationInfo quantization_info_output = QuantizationInfo(), + RoundingPolicy policy = RoundingPolicy::TO_ZERO); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1