diff options
author | Michele Di Giorgio <michele.digiorgio@arm.com> | 2019-09-05 12:30:22 +0100 |
---|---|---|
committer | Pablo Marquez <pablo.tello@arm.com> | 2019-09-27 16:20:14 +0000 |
commit | 6b612f5fa1fee9528f2f87491fe7edb3887d9817 (patch) | |
tree | 579ef443d61ed1319e5d8f44d8a7a8ce83c82aad /src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp | |
parent | 240b79de1c211ebb8d439b4a1c8c79777aa36f13 (diff) | |
download | ComputeLibrary-6b612f5fa1fee9528f2f87491fe7edb3887d9817.tar.gz |
COMPMID-2310: CLGenerateProposalsLayer: support for QASYMM8
Change-Id: I48b77e09857cd43f9498d28e8f4bf346e3d7110d
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1969
Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp')
-rw-r--r-- | src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp | 21 |
1 files changed, 18 insertions, 3 deletions
diff --git a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp index 79e364caf7..16d0e86d7d 100644 --- a/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp +++ b/src/core/CL/kernels/CLGenerateProposalsLayerKernel.cpp @@ -44,7 +44,7 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(anchors, all_anchors); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(anchors); ARM_COMPUTE_RETURN_ERROR_ON(anchors->dimension(0) != info.values_per_roi()); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(anchors, DataType::QSYMM16, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(anchors->num_dimensions() > 2); if(all_anchors->total_size() > 0) { @@ -55,6 +55,11 @@ Status validate_arguments(const ITensorInfo *anchors, const ITensorInfo *all_anc ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->num_dimensions() > 2); ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(0) != info.values_per_roi()); ARM_COMPUTE_RETURN_ERROR_ON(all_anchors->dimension(1) != feature_height * feature_width * num_anchors); + + if(is_data_type_quantized(anchors->data_type())) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(anchors, all_anchors); + } } return Status{}; } @@ -78,12 +83,14 @@ void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *a // Initialize the output if empty const TensorShape output_shape(info.values_per_roi(), width * height * num_anchors); - auto_init_if_empty(*all_anchors->info(), output_shape, 1, data_type); + auto_init_if_empty(*all_anchors->info(), TensorInfo(output_shape, 1, data_type, anchors->info()->quantization_info())); // Set instance variables _anchors = anchors; _all_anchors = all_anchors; + const bool is_quantized = is_data_type_quantized(anchors->info()->data_type()); + // Set build options CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); @@ -93,8 +100,16 @@ void CLComputeAllAnchorsKernel::configure(const ICLTensor *anchors, ICLTensor *a build_opts.add_option("-DNUM_ANCHORS=" + support::cpp11::to_string(num_anchors)); build_opts.add_option("-DNUM_ROI_FIELDS=" + support::cpp11::to_string(info.values_per_roi())); + if(is_quantized) + { + const UniformQuantizationInfo qinfo = anchors->info()->quantization_info().uniform(); + build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale)); + build_opts.add_option("-DOFFSET=" + float_to_string_with_full_precision(qinfo.offset)); + } + // Create kernel - _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("generate_proposals_compute_all_anchors", build_opts.options())); + const std::string kernel_name = (is_quantized) ? "generate_proposals_compute_all_anchors_quantized" : "generate_proposals_compute_all_anchors"; + _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); // The tensor all_anchors can be interpreted as an array of structs (each structs has values_per_roi fields). // This means we don't need to pad on the X dimension, as we know in advance how many fields |