From 6b612f5fa1fee9528f2f87491fe7edb3887d9817 Mon Sep 17 00:00:00 2001 From: Michele Di Giorgio Date: Thu, 5 Sep 2019 12:30:22 +0100 Subject: COMPMID-2310: CLGenerateProposalsLayer: support for QASYMM8 Change-Id: I48b77e09857cd43f9498d28e8f4bf346e3d7110d Signed-off-by: Michele Di Giorgio Reviewed-on: https://review.mlplatform.org/c/1969 Reviewed-by: Pablo Marquez Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../CL/kernels/CLGenerateProposalsLayerKernel.cpp | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) (limited to 'src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp') diff --git a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp index 79e364caf7..16d0e86d7d 100644 --- a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp +++ b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp @@ -44,7 +44,7 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(anchors); ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi()); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2); if(all_anchors->total_size() > 0) { @@ -55,6 +55,11 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi()); ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors); + + if(is_data_type_quantized(anchors->data_type())) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors); + } } return Status{}; } @@ -78,12 +83,14 @@ void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *a // Initialize the output if empty const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors); - auto_init_if_empty(*all_anchors->info(), output_shape, 1, data_type); + auto_init_if_empty(*all_anchors->info(), TensorInfo(output_shape, 1, data_type, anchors->info()->quantization_info())); // Set instance variables _anchors = anchors; _all_anchors = all_anchors; + const bool is_quantized = is_data_type_quantized(anchors->info()->data_type()); + // Set build options CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); @@ -93,8 +100,16 @@ void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *a build_opts.add_option("-DNUM_ANCHORS=" + support::cpp11::to_string(num_anchors)); build_opts.add_option("-DNUM_ROI_FIELDS=" + support::cpp11::to_string(info.values_per_roi())); + if(is_quantized) + { + const UniformQuantizationInfo qinfo = anchors->info()->quantization_info().uniform(); + build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale)); + build_opts.add_option("-DOFFSET=" + float_to_string_with_full_precision(qinfo.offset)); + } + // Create kernel - _kernel = static_cast(CLKernelLibrary::get().create_kernel("generate_proposals_compute_all_anchors", build_opts.options())); + const std::string kernel_name = (is_quantized) ? "generate_proposals_compute_all_anchors_quantized" : "generate_proposals_compute_all_anchors"; + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); // The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields). // This means we don't need to pad on the X dimension, as we know in advance how many fields -- cgit v1.2.1